Merge branch 'work.lookup' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
authorLinus Torvalds <torvalds@linux-foundation.org>
Tue, 14 Aug 2018 03:54:14 +0000 (20:54 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 14 Aug 2018 03:54:14 +0000 (20:54 -0700)
Pull vfs lookup() updates from Al Viro:
 "More conversions of ->lookup() to d_splice_alias().

  Should be reasonably complete now - the only leftovers are in ceph"

* 'work.lookup' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs:
  afs_try_auto_mntpt(): return NULL instead of ERR_PTR(-ENOENT)
  afs_lookup(): switch to d_splice_alias()
  afs: switch dynroot lookups to d_splice_alias()
  hpfs: fix an inode leak in lookup, switch to d_splice_alias()
  hostfs_lookup: switch to d_splice_alias()

2511 files changed:
Documentation/RCU/Design/Data-Structures/Data-Structures.html
Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.html
Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp-cleanup.svg
Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp-init-1.svg
Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp-init-3.svg
Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp.svg
Documentation/RCU/Design/Memory-Ordering/TreeRCU-qs.svg
Documentation/RCU/stallwarn.txt
Documentation/RCU/whatisRCU.txt
Documentation/admin-guide/kernel-parameters.txt
Documentation/admin-guide/pm/intel_pstate.rst
Documentation/core-api/atomic_ops.rst
Documentation/core-api/kernel-api.rst
Documentation/device-mapper/writecache.txt
Documentation/devicetree/bindings/arm/samsung/samsung-boards.txt
Documentation/devicetree/bindings/display/tilcdc/tilcdc.txt
Documentation/devicetree/bindings/gpio/nintendo,hollywood-gpio.txt
Documentation/devicetree/bindings/hwmon/npcm750-pwm-fan.txt [new file with mode: 0644]
Documentation/devicetree/bindings/input/sprd,sc27xx-vibra.txt [new file with mode: 0644]
Documentation/devicetree/bindings/input/touchscreen/hideep.txt
Documentation/devicetree/bindings/interrupt-controller/ingenic,intc.txt
Documentation/devicetree/bindings/interrupt-controller/nvidia,tegra20-ictlr.txt
Documentation/devicetree/bindings/interrupt-controller/renesas,irqc.txt
Documentation/devicetree/bindings/interrupt-controller/st,stm32-exti.txt
Documentation/devicetree/bindings/mips/brcm/soc.txt
Documentation/devicetree/bindings/net/fsl-fman.txt
Documentation/devicetree/bindings/phy/phy-ath79-usb.txt
Documentation/devicetree/bindings/power/power_domain.txt
Documentation/devicetree/bindings/regulator/tps65090.txt
Documentation/devicetree/bindings/reset/st,sti-softreset.txt
Documentation/devicetree/bindings/soc/qcom/qcom,geni-se.txt
Documentation/devicetree/bindings/sound/qcom,apq8016-sbc.txt
Documentation/devicetree/bindings/sound/qcom,apq8096.txt
Documentation/devicetree/bindings/timer/mediatek,mtk-timer.txt
Documentation/devicetree/bindings/usb/rockchip,dwc3.txt
Documentation/devicetree/bindings/w1/w1-gpio.txt
Documentation/driver-api/infrastructure.rst
Documentation/features/sched/membarrier-sync-core/arch-support.txt
Documentation/filesystems/Locking
Documentation/filesystems/cifs/AUTHORS
Documentation/filesystems/cifs/CHANGES
Documentation/filesystems/cifs/TODO
Documentation/filesystems/porting
Documentation/filesystems/vfs.txt
Documentation/hwmon/max34440
Documentation/hwmon/mlxreg-fan [new file with mode: 0644]
Documentation/hwmon/npcm750-pwm-fan [new file with mode: 0644]
Documentation/hwmon/sysfs-interface
Documentation/kbuild/kbuild.txt
Documentation/kbuild/kconfig-language.txt
Documentation/kbuild/kconfig.txt
Documentation/kprobes.txt
Documentation/memory-barriers.txt
Documentation/networking/bonding.txt
Documentation/networking/dpaa2/overview.rst
Documentation/networking/e100.rst
Documentation/networking/e1000.rst
Documentation/networking/strparser.txt
Documentation/trace/histogram.txt
Documentation/translations/ko_KR/memory-barriers.txt
Documentation/usb/gadget_configfs.txt
Documentation/virtual/kvm/api.txt
Documentation/x86/intel_rdt_ui.txt
Documentation/x86/x86_64/boot-options.txt
MAINTAINERS
Makefile
arch/alpha/Kconfig
arch/alpha/include/asm/atomic.h
arch/alpha/kernel/osf_sys.c
arch/alpha/lib/Makefile
arch/alpha/lib/dec_and_lock.c [deleted file]
arch/arc/Kconfig
arch/arc/Makefile
arch/arc/configs/axs101_defconfig
arch/arc/configs/axs103_defconfig
arch/arc/configs/axs103_smp_defconfig
arch/arc/configs/haps_hs_defconfig
arch/arc/configs/haps_hs_smp_defconfig
arch/arc/configs/hsdk_defconfig
arch/arc/configs/nsim_700_defconfig
arch/arc/configs/nsim_hs_defconfig
arch/arc/configs/nsim_hs_smp_defconfig
arch/arc/configs/nsimosci_defconfig
arch/arc/configs/nsimosci_hs_defconfig
arch/arc/configs/nsimosci_hs_smp_defconfig
arch/arc/configs/tb10x_defconfig
arch/arc/include/asm/atomic.h
arch/arc/include/asm/cache.h
arch/arc/include/asm/delay.h
arch/arc/include/asm/entry-compact.h
arch/arc/include/asm/entry.h
arch/arc/include/asm/kprobes.h
arch/arc/include/asm/mach_desc.h
arch/arc/include/asm/page.h
arch/arc/include/asm/pgtable.h
arch/arc/kernel/irq.c
arch/arc/kernel/kprobes.c
arch/arc/kernel/process.c
arch/arc/mm/cache.c
arch/arc/mm/dma.c
arch/arc/plat-eznps/include/plat/ctop.h
arch/arc/plat-eznps/mtm.c
arch/arc/plat-hsdk/Kconfig
arch/arc/plat-hsdk/platform.c
arch/arm/Kconfig
arch/arm/Makefile
arch/arm/boot/dts/am335x-bone-common.dtsi
arch/arm/boot/dts/am3517.dtsi
arch/arm/boot/dts/am437x-sk-evm.dts
arch/arm/boot/dts/armada-385-synology-ds116.dts
arch/arm/boot/dts/armada-38x.dtsi
arch/arm/boot/dts/bcm-cygnus.dtsi
arch/arm/boot/dts/bcm-hr2.dtsi
arch/arm/boot/dts/bcm-nsp.dtsi
arch/arm/boot/dts/bcm5301x.dtsi
arch/arm/boot/dts/da850.dtsi
arch/arm/boot/dts/dra7.dtsi
arch/arm/boot/dts/imx51-zii-rdu1.dts
arch/arm/boot/dts/imx6q.dtsi
arch/arm/boot/dts/imx6qdl-zii-rdu2.dtsi
arch/arm/boot/dts/imx6sx.dtsi
arch/arm/boot/dts/omap4-droid4-xt894.dts
arch/arm/boot/dts/socfpga.dtsi
arch/arm/boot/dts/socfpga_arria10.dtsi
arch/arm/common/Makefile
arch/arm/configs/imx_v4_v5_defconfig
arch/arm/configs/imx_v6_v7_defconfig
arch/arm/configs/multi_v7_defconfig
arch/arm/crypto/speck-neon-core.S
arch/arm/firmware/Makefile
arch/arm/include/asm/assembler.h
arch/arm/include/asm/atomic.h
arch/arm/include/asm/bitops.h
arch/arm/include/asm/efi.h
arch/arm/include/asm/hw_breakpoint.h
arch/arm/include/asm/irq.h
arch/arm/include/asm/kprobes.h
arch/arm/include/asm/mach/arch.h
arch/arm/include/asm/mach/time.h
arch/arm/include/asm/probes.h
arch/arm/include/asm/thread_info.h
arch/arm/include/asm/tlb.h
arch/arm/include/asm/uaccess.h
arch/arm/kernel/entry-armv.S
arch/arm/kernel/entry-common.S
arch/arm/kernel/head-nommu.S
arch/arm/kernel/hw_breakpoint.c
arch/arm/kernel/irq.c
arch/arm/kernel/process.c
arch/arm/kernel/setup.c
arch/arm/kernel/signal.c
arch/arm/kernel/sys_oabi-compat.c
arch/arm/kernel/time.c
arch/arm/lib/copy_from_user.S
arch/arm/mach-bcm/Kconfig
arch/arm/mach-davinci/board-da850-evm.c
arch/arm/mach-omap2/omap-smp.c
arch/arm/mach-pxa/irq.c
arch/arm/mach-rpc/ecard.c
arch/arm/mach-socfpga/Kconfig
arch/arm/mm/Kconfig
arch/arm/mm/init.c
arch/arm/mm/nommu.c
arch/arm/mm/tcm.h
arch/arm/net/bpf_jit_32.c
arch/arm/plat-omap/counter_32k.c
arch/arm/probes/kprobes/core.c
arch/arm/probes/kprobes/test-core.c
arch/arm/vfp/Makefile
arch/arm/vfp/vfpmodule.c
arch/arm/xen/enlighten.c
arch/arm64/Kconfig
arch/arm64/Makefile
arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi
arch/arm64/boot/dts/amlogic/meson-axg-s400.dts
arch/arm64/boot/dts/amlogic/meson-axg.dtsi
arch/arm64/boot/dts/amlogic/meson-gx.dtsi
arch/arm64/boot/dts/amlogic/meson-gxl-mali.dtsi
arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts
arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi
arch/arm64/boot/dts/amlogic/meson-gxl.dtsi
arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi
arch/arm64/boot/dts/broadcom/stingray/bcm958742k.dts
arch/arm64/boot/dts/broadcom/stingray/bcm958742t.dts
arch/arm64/boot/dts/broadcom/stingray/stingray.dtsi
arch/arm64/boot/dts/hisilicon/hi3660-hikey960.dts
arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts
arch/arm64/boot/dts/marvell/armada-cp110.dtsi
arch/arm64/boot/dts/qcom/apq8096-db820c.dtsi
arch/arm64/boot/dts/qcom/msm8916.dtsi
arch/arm64/boot/dts/socionext/uniphier-ld11-global.dts
arch/arm64/boot/dts/socionext/uniphier-ld20-global.dts
arch/arm64/configs/defconfig
arch/arm64/crypto/aes-ce-ccm-core.S
arch/arm64/crypto/aes-glue.c
arch/arm64/crypto/ghash-ce-core.S
arch/arm64/crypto/ghash-ce-glue.c
arch/arm64/include/asm/alternative.h
arch/arm64/include/asm/atomic.h
arch/arm64/include/asm/bitops.h
arch/arm64/include/asm/efi.h
arch/arm64/include/asm/hw_breakpoint.h
arch/arm64/include/asm/irq.h
arch/arm64/include/asm/kprobes.h
arch/arm64/include/asm/kvm_host.h
arch/arm64/include/asm/pgtable.h
arch/arm64/include/asm/simd.h
arch/arm64/include/asm/sysreg.h
arch/arm64/include/asm/tlb.h
arch/arm64/kernel/alternative.c
arch/arm64/kernel/cpufeature.c
arch/arm64/kernel/hw_breakpoint.c
arch/arm64/kernel/irq.c
arch/arm64/kernel/module.c
arch/arm64/kernel/probes/kprobes.c
arch/arm64/kernel/smp.c
arch/arm64/kvm/fpsimd.c
arch/arm64/lib/Makefile
arch/arm64/lib/bitops.S [deleted file]
arch/arm64/mm/dma-mapping.c
arch/arm64/mm/hugetlbpage.c
arch/arm64/mm/init.c
arch/arm64/mm/mmu.c
arch/arm64/mm/proc.S
arch/h8300/include/asm/atomic.h
arch/hexagon/include/asm/atomic.h
arch/ia64/include/asm/atomic.h
arch/ia64/include/asm/kprobes.h
arch/ia64/include/asm/tlb.h
arch/ia64/include/uapi/asm/break.h
arch/ia64/kernel/Makefile
arch/ia64/kernel/jprobes.S [deleted file]
arch/ia64/kernel/kprobes.c
arch/ia64/kernel/perfmon.c
arch/ia64/mm/init.c
arch/m68k/Kconfig
arch/m68k/apollo/config.c
arch/m68k/atari/config.c
arch/m68k/atari/time.c
arch/m68k/bvme6000/config.c
arch/m68k/configs/amiga_defconfig
arch/m68k/configs/apollo_defconfig
arch/m68k/configs/atari_defconfig
arch/m68k/configs/bvme6000_defconfig
arch/m68k/configs/hp300_defconfig
arch/m68k/configs/mac_defconfig
arch/m68k/configs/multi_defconfig
arch/m68k/configs/mvme147_defconfig
arch/m68k/configs/mvme16x_defconfig
arch/m68k/configs/q40_defconfig
arch/m68k/configs/sun3_defconfig
arch/m68k/configs/sun3x_defconfig
arch/m68k/include/asm/Kbuild
arch/m68k/include/asm/atomic.h
arch/m68k/include/asm/bitops.h
arch/m68k/include/asm/dma-mapping.h [deleted file]
arch/m68k/include/asm/io.h
arch/m68k/include/asm/io_mm.h
arch/m68k/include/asm/io_no.h
arch/m68k/include/asm/kmap.h
arch/m68k/include/asm/machdep.h
arch/m68k/include/asm/macintosh.h
arch/m68k/include/asm/mcf_pgalloc.h
arch/m68k/include/asm/page_no.h
arch/m68k/kernel/dma.c
arch/m68k/kernel/setup_mm.c
arch/m68k/kernel/setup_no.c
arch/m68k/mac/config.c
arch/m68k/mac/misc.c
arch/m68k/mm/init.c
arch/m68k/mm/mcfmmu.c
arch/m68k/mm/motorola.c
arch/m68k/mvme147/config.c
arch/m68k/mvme16x/config.c
arch/m68k/q40/config.c
arch/m68k/sun3/config.c
arch/microblaze/Kconfig.debug
arch/microblaze/include/asm/setup.h
arch/microblaze/include/asm/unistd.h
arch/microblaze/include/uapi/asm/unistd.h
arch/microblaze/kernel/Makefile
arch/microblaze/kernel/heartbeat.c [deleted file]
arch/microblaze/kernel/platform.c [deleted file]
arch/microblaze/kernel/reset.c
arch/microblaze/kernel/syscall_table.S
arch/microblaze/kernel/timer.c
arch/mips/Kconfig
arch/mips/Makefile
arch/mips/alchemy/board-gpr.c
arch/mips/alchemy/board-mtx1.c
arch/mips/alchemy/board-xxs1500.c
arch/mips/alchemy/devboards/platform.c
arch/mips/ar7/clock.c
arch/mips/ar7/prom.c
arch/mips/ath25/Kconfig
arch/mips/ath25/board.c
arch/mips/ath25/early_printk.c
arch/mips/ath79/clock.c
arch/mips/ath79/common.c
arch/mips/ath79/early_printk.c
arch/mips/ath79/mach-pb44.c
arch/mips/ath79/setup.c
arch/mips/bcm63xx/early_printk.c
arch/mips/bmips/dma.c
arch/mips/bmips/setup.c
arch/mips/boot/Makefile
arch/mips/boot/compressed/uart-prom.c
arch/mips/boot/dts/ingenic/jz4780.dtsi
arch/mips/boot/dts/mscc/Makefile
arch/mips/boot/dts/mscc/ocelot.dtsi
arch/mips/boot/dts/mscc/ocelot_pcb123.dts
arch/mips/boot/dts/qca/ar9132.dtsi
arch/mips/boot/dts/qca/ar9132_tl_wr1043nd_v1.dts
arch/mips/boot/dts/qca/ar9331.dtsi
arch/mips/boot/dts/qca/ar9331_dpt_module.dts
arch/mips/boot/dts/qca/ar9331_dragino_ms14.dts
arch/mips/boot/dts/qca/ar9331_omega.dts
arch/mips/boot/dts/qca/ar9331_tl_mr3020.dts
arch/mips/boot/ecoff.h
arch/mips/boot/elf2ecoff.c
arch/mips/cavium-octeon/dma-octeon.c
arch/mips/cavium-octeon/executive/cvmx-helper-rgmii.c
arch/mips/cavium-octeon/executive/cvmx-helper-sgmii.c
arch/mips/cavium-octeon/executive/cvmx-helper-spi.c
arch/mips/cavium-octeon/executive/cvmx-helper-xaui.c
arch/mips/cavium-octeon/octeon-irq.c
arch/mips/cavium-octeon/octeon-platform.c
arch/mips/cavium-octeon/setup.c
arch/mips/configs/ci20_defconfig
arch/mips/configs/generic_defconfig
arch/mips/configs/malta_defconfig
arch/mips/configs/malta_kvm_defconfig
arch/mips/configs/malta_kvm_guest_defconfig
arch/mips/configs/malta_qemu_32r6_defconfig
arch/mips/configs/maltaaprp_defconfig
arch/mips/configs/maltasmvp_defconfig
arch/mips/configs/maltasmvp_eva_defconfig
arch/mips/configs/maltaup_defconfig
arch/mips/configs/maltaup_xpa_defconfig
arch/mips/fw/arc/arc_con.c
arch/mips/fw/arc/promlib.c
arch/mips/fw/sni/sniprom.c
arch/mips/generic/Kconfig
arch/mips/generic/Platform
arch/mips/generic/board-ocelot_pcb123.its.S [new file with mode: 0644]
arch/mips/generic/init.c
arch/mips/generic/yamon-dt.c
arch/mips/include/asm/Kbuild
arch/mips/include/asm/atomic.h
arch/mips/include/asm/bmips.h
arch/mips/include/asm/cpu-features.h
arch/mips/include/asm/cpu.h
arch/mips/include/asm/dma-coherence.h
arch/mips/include/asm/dma-direct.h
arch/mips/include/asm/dma-mapping.h
arch/mips/include/asm/io.h
arch/mips/include/asm/kprobes.h
arch/mips/include/asm/mach-ar7/spaces.h
arch/mips/include/asm/mach-ath25/dma-coherence.h [deleted file]
arch/mips/include/asm/mach-ath79/ar71xx_regs.h
arch/mips/include/asm/mach-ath79/ath79.h
arch/mips/include/asm/mach-ath79/cpu-feature-overrides.h
arch/mips/include/asm/mach-bmips/dma-coherence.h [deleted file]
arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h [deleted file]
arch/mips/include/asm/mach-generic/dma-coherence.h [deleted file]
arch/mips/include/asm/mach-generic/kmalloc.h
arch/mips/include/asm/mach-generic/spaces.h
arch/mips/include/asm/mach-ip27/dma-coherence.h [deleted file]
arch/mips/include/asm/mach-ip32/dma-coherence.h [deleted file]
arch/mips/include/asm/mach-jazz/dma-coherence.h [deleted file]
arch/mips/include/asm/mach-loongson64/dma-coherence.h [deleted file]
arch/mips/include/asm/mach-loongson64/kernel-entry-init.h
arch/mips/include/asm/mach-pic32/spaces.h
arch/mips/include/asm/mipsregs.h
arch/mips/include/asm/mmu_context.h
arch/mips/include/asm/netlogic/xlr/fmn.h
arch/mips/include/asm/octeon/cvmx-asxx-defs.h
arch/mips/include/asm/octeon/cvmx-ciu-defs.h
arch/mips/include/asm/octeon/cvmx-gmxx-defs.h
arch/mips/include/asm/octeon/cvmx-pcsx-defs.h
arch/mips/include/asm/octeon/cvmx-pcsxx-defs.h
arch/mips/include/asm/octeon/cvmx-spxx-defs.h
arch/mips/include/asm/octeon/cvmx-stxx-defs.h
arch/mips/include/asm/octeon/octeon.h
arch/mips/include/asm/octeon/pci-octeon.h
arch/mips/include/asm/page.h
arch/mips/include/asm/processor.h
arch/mips/include/asm/setup.h
arch/mips/include/asm/sgialib.h
arch/mips/include/asm/sim.h
arch/mips/include/asm/smp.h
arch/mips/include/asm/txx9/generic.h
arch/mips/include/asm/txx9/tx4939.h
arch/mips/include/uapi/asm/unistd.h
arch/mips/jazz/jazzdma.c
arch/mips/jazz/setup.c
arch/mips/jz4740/Platform
arch/mips/kernel/cpu-probe.c
arch/mips/kernel/early_printk.c
arch/mips/kernel/early_printk_8250.c
arch/mips/kernel/entry.S
arch/mips/kernel/genex.S
arch/mips/kernel/idle.c
arch/mips/kernel/kprobes.c
arch/mips/kernel/linux32.c
arch/mips/kernel/mcount.S
arch/mips/kernel/process.c
arch/mips/kernel/ptrace.c
arch/mips/kernel/ptrace32.c
arch/mips/kernel/relocate_kernel.S
arch/mips/kernel/scall32-o32.S
arch/mips/kernel/scall64-64.S
arch/mips/kernel/scall64-n32.S
arch/mips/kernel/scall64-o32.S
arch/mips/kernel/setup.c
arch/mips/kernel/signal.c
arch/mips/kernel/signal_n32.c
arch/mips/kernel/signal_o32.c
arch/mips/kernel/traps.c
arch/mips/kvm/mips.c
arch/mips/lantiq/early_printk.c
arch/mips/lantiq/prom.c
arch/mips/lantiq/xway/dma.c
arch/mips/lasat/prom.c
arch/mips/lib/memset.S
arch/mips/loongson32/Platform
arch/mips/loongson64/Kconfig
arch/mips/loongson64/common/Makefile
arch/mips/loongson64/common/cs5536/cs5536_ohci.c
arch/mips/loongson64/common/dma-swiotlb.c [deleted file]
arch/mips/loongson64/common/dma.c [new file with mode: 0644]
arch/mips/loongson64/common/early_printk.c
arch/mips/loongson64/common/env.c
arch/mips/loongson64/loongson-3/Makefile
arch/mips/loongson64/loongson-3/dma.c [new file with mode: 0644]
arch/mips/loongson64/loongson-3/smp.c
arch/mips/mm/Makefile
arch/mips/mm/c-r4k.c
arch/mips/mm/cache.c
arch/mips/mm/dma-default.c [deleted file]
arch/mips/mm/dma-noncoherent.c [new file with mode: 0644]
arch/mips/mm/ioremap.c
arch/mips/mm/page.c
arch/mips/mm/tlbex.c
arch/mips/mm/uasm-micromips.c
arch/mips/mm/uasm-mips.c
arch/mips/mti-malta/Makefile
arch/mips/mti-malta/malta-pm.c [deleted file]
arch/mips/mti-malta/malta-reset.c [deleted file]
arch/mips/mti-malta/malta-setup.c
arch/mips/netlogic/common/earlycons.c
arch/mips/netlogic/xlp/dt.c
arch/mips/paravirt/serial.c
arch/mips/pci/ops-bridge.c
arch/mips/pci/pci-ar2315.c
arch/mips/pci/pci-ar724x.c
arch/mips/pci/pci-ip27.c
arch/mips/pci/pci-octeon.c
arch/mips/pci/pci.c
arch/mips/pci/pcie-octeon.c
arch/mips/pic32/pic32mzda/early_console.c
arch/mips/ralink/early_printk.c
arch/mips/sgi-ip27/ip27-console.c
arch/mips/sgi-ip32/Makefile
arch/mips/sgi-ip32/ip32-dma.c [new file with mode: 0644]
arch/mips/sibyte/Kconfig
arch/mips/sibyte/common/cfe.c
arch/mips/txx9/generic/setup.c
arch/mips/vdso/Makefile
arch/mips/vdso/genvdso.h
arch/mips/vr41xx/common/pmu.c
arch/nds32/Kconfig
arch/nds32/Makefile
arch/nds32/include/asm/cacheflush.h
arch/nds32/include/asm/futex.h
arch/nds32/kernel/setup.c
arch/nds32/mm/cacheflush.c
arch/openrisc/Kconfig
arch/openrisc/include/asm/atomic.h
arch/openrisc/include/asm/cmpxchg.h
arch/openrisc/include/asm/irq.h
arch/openrisc/include/asm/pgalloc.h
arch/openrisc/kernel/entry.S
arch/openrisc/kernel/head.S
arch/openrisc/kernel/irq.c
arch/openrisc/kernel/traps.c
arch/parisc/Kconfig
arch/parisc/Makefile
arch/parisc/include/asm/assembly.h
arch/parisc/include/asm/atomic.h
arch/parisc/include/asm/barrier.h [new file with mode: 0644]
arch/parisc/include/asm/dma-mapping.h
arch/parisc/include/asm/linkage.h
arch/parisc/include/asm/ptrace.h
arch/parisc/include/asm/signal.h
arch/parisc/include/asm/spinlock.h
arch/parisc/include/asm/unwind.h
arch/parisc/include/uapi/asm/errno.h
arch/parisc/include/uapi/asm/unistd.h
arch/parisc/kernel/drivers.c
arch/parisc/kernel/entry.S
arch/parisc/kernel/pacache.S
arch/parisc/kernel/pci-dma.c
arch/parisc/kernel/process.c
arch/parisc/kernel/ptrace.c
arch/parisc/kernel/real2.S
arch/parisc/kernel/setup.c
arch/parisc/kernel/syscall.S
arch/parisc/kernel/syscall_table.S
arch/parisc/kernel/traps.c
arch/parisc/kernel/unwind.c
arch/parisc/lib/lusercopy.S
arch/parisc/mm/init.c
arch/powerpc/Makefile
arch/powerpc/include/asm/atomic.h
arch/powerpc/include/asm/book3s/32/pgalloc.h
arch/powerpc/include/asm/book3s/64/pgtable-4k.h
arch/powerpc/include/asm/book3s/64/pgtable-64k.h
arch/powerpc/include/asm/book3s/64/pgtable.h
arch/powerpc/include/asm/hw_breakpoint.h
arch/powerpc/include/asm/kprobes.h
arch/powerpc/include/asm/mmu_context.h
arch/powerpc/include/asm/nmi.h
arch/powerpc/include/asm/nohash/32/pgalloc.h
arch/powerpc/include/asm/nohash/64/pgalloc.h
arch/powerpc/include/asm/systbl.h
arch/powerpc/include/asm/unistd.h
arch/powerpc/include/uapi/asm/unistd.h
arch/powerpc/kernel/dt_cpu_ftrs.c
arch/powerpc/kernel/hw_breakpoint.c
arch/powerpc/kernel/idle_book3s.S
arch/powerpc/kernel/kprobes-ftrace.c
arch/powerpc/kernel/kprobes.c
arch/powerpc/kernel/pci-common.c
arch/powerpc/kernel/pci_32.c
arch/powerpc/kernel/pci_64.c
arch/powerpc/kernel/rtas.c
arch/powerpc/kernel/setup-common.c
arch/powerpc/kernel/setup_64.c
arch/powerpc/kernel/signal.c
arch/powerpc/kernel/signal_32.c
arch/powerpc/kernel/signal_64.c
arch/powerpc/kernel/smp.c
arch/powerpc/kernel/stacktrace.c
arch/powerpc/kernel/syscalls.c
arch/powerpc/kernel/trace/ftrace_64_mprofile.S
arch/powerpc/kvm/book3s_64_vio.c
arch/powerpc/kvm/book3s_64_vio_hv.c
arch/powerpc/kvm/book3s_hv.c
arch/powerpc/mm/hugetlbpage.c
arch/powerpc/mm/mmu_context_iommu.c
arch/powerpc/mm/pgtable-book3s64.c
arch/powerpc/mm/subpage-prot.c
arch/powerpc/mm/tlb-radix.c
arch/powerpc/net/bpf_jit_comp64.c
arch/powerpc/perf/core-book3s.c
arch/powerpc/platforms/powermac/time.c
arch/powerpc/platforms/powernv/pci-ioda.c
arch/powerpc/platforms/pseries/setup.c
arch/powerpc/xmon/xmon.c
arch/riscv/Kconfig
arch/riscv/include/asm/atomic.h
arch/riscv/include/uapi/asm/elf.h
arch/riscv/kernel/irq.c
arch/riscv/kernel/module.c
arch/riscv/kernel/ptrace.c
arch/riscv/kernel/setup.c
arch/riscv/mm/init.c
arch/s390/Kconfig
arch/s390/Makefile
arch/s390/appldata/appldata_base.c
arch/s390/boot/Makefile
arch/s390/boot/als.c [moved from arch/s390/kernel/als.c with 83% similarity]
arch/s390/boot/compressed/.gitignore
arch/s390/boot/compressed/Makefile
arch/s390/boot/compressed/head.S
arch/s390/boot/compressed/misc.c
arch/s390/boot/compressed/vmlinux.lds.S
arch/s390/boot/compressed/vmlinux.scr.lds.S [moved from arch/s390/boot/compressed/vmlinux.scr with 70% similarity]
arch/s390/boot/ebcdic.c [new file with mode: 0644]
arch/s390/boot/head.S [moved from arch/s390/kernel/head.S with 98% similarity]
arch/s390/boot/head_kdump.S [moved from arch/s390/kernel/head_kdump.S with 100% similarity]
arch/s390/boot/mem.S [new file with mode: 0644]
arch/s390/boot/sclp_early_core.c [new file with mode: 0644]
arch/s390/hypfs/hypfs_diag.c
arch/s390/hypfs/inode.c
arch/s390/include/asm/ap.h
arch/s390/include/asm/atomic.h
arch/s390/include/asm/cpu_mf.h
arch/s390/include/asm/css_chars.h
arch/s390/include/asm/gmap.h
arch/s390/include/asm/hugetlb.h
arch/s390/include/asm/kprobes.h
arch/s390/include/asm/lowcore.h
arch/s390/include/asm/mmu.h
arch/s390/include/asm/mmu_context.h
arch/s390/include/asm/nospec-insn.h
arch/s390/include/asm/pci.h
arch/s390/include/asm/pgtable.h
arch/s390/include/asm/purgatory.h
arch/s390/include/asm/qdio.h
arch/s390/include/asm/sections.h
arch/s390/include/asm/setup.h
arch/s390/include/uapi/asm/chsc.h
arch/s390/kernel/Makefile
arch/s390/kernel/compat_wrapper.c
arch/s390/kernel/crash_dump.c
arch/s390/kernel/early.c
arch/s390/kernel/entry.S
arch/s390/kernel/entry.h
arch/s390/kernel/head64.S
arch/s390/kernel/kprobes.c
arch/s390/kernel/nospec-branch.c
arch/s390/kernel/nospec-sysfs.c
arch/s390/kernel/perf_cpum_sf.c
arch/s390/kernel/perf_regs.c
arch/s390/kernel/setup.c
arch/s390/kernel/signal.c
arch/s390/kernel/syscalls/syscall.tbl
arch/s390/kernel/sysinfo.c
arch/s390/kernel/time.c
arch/s390/kernel/topology.c
arch/s390/kernel/vdso.c
arch/s390/kernel/vmlinux.lds.S
arch/s390/kvm/interrupt.c
arch/s390/kvm/kvm-s390.c
arch/s390/kvm/priv.c
arch/s390/lib/mem.S
arch/s390/mm/cmm.c
arch/s390/mm/extmem.c
arch/s390/mm/fault.c
arch/s390/mm/gmap.c
arch/s390/mm/hugetlbpage.c
arch/s390/mm/page-states.c
arch/s390/mm/pageattr.c
arch/s390/mm/pgalloc.c
arch/s390/mm/pgtable.c
arch/s390/net/bpf_jit_comp.c
arch/s390/numa/numa.c
arch/s390/pci/pci_debug.c
arch/s390/purgatory/Makefile
arch/s390/purgatory/head.S
arch/s390/purgatory/purgatory.c
arch/s390/scripts/Makefile.chkbss
arch/s390/tools/gen_opcode_table.c
arch/sh/include/asm/atomic.h
arch/sh/include/asm/cmpxchg-xchg.h
arch/sh/include/asm/hw_breakpoint.h
arch/sh/include/asm/kprobes.h
arch/sh/kernel/hw_breakpoint.c
arch/sh/kernel/kprobes.c
arch/sparc/include/asm/Kbuild
arch/sparc/include/asm/atomic_32.h
arch/sparc/include/asm/atomic_64.h
arch/sparc/include/asm/kprobes.h
arch/sparc/include/asm/msi.h [deleted file]
arch/sparc/kernel/kprobes.c
arch/sparc/kernel/time_64.c
arch/sparc/lib/atomic32.c
arch/sparc/mm/srmmu.c
arch/x86/Kconfig
arch/x86/Makefile
arch/x86/boot/bitops.h
arch/x86/boot/compressed/Makefile
arch/x86/boot/compressed/eboot.c
arch/x86/boot/compressed/eboot.h
arch/x86/boot/compressed/kaslr.c
arch/x86/boot/compressed/pgtable_64.c
arch/x86/boot/string.c
arch/x86/crypto/aegis128-aesni-asm.S
arch/x86/crypto/aegis128-aesni-glue.c
arch/x86/crypto/aegis128l-aesni-asm.S
arch/x86/crypto/aegis128l-aesni-glue.c
arch/x86/crypto/aegis256-aesni-asm.S
arch/x86/crypto/aegis256-aesni-glue.c
arch/x86/crypto/aesni-intel_asm.S
arch/x86/crypto/aesni-intel_avx-x86_64.S
arch/x86/crypto/morus1280-avx2-asm.S
arch/x86/crypto/morus1280-avx2-glue.c
arch/x86/crypto/morus1280-sse2-asm.S
arch/x86/crypto/morus1280-sse2-glue.c
arch/x86/crypto/morus640-sse2-asm.S
arch/x86/crypto/morus640-sse2-glue.c
arch/x86/crypto/sha1_ssse3_asm.S
arch/x86/entry/common.c
arch/x86/entry/entry_32.S
arch/x86/entry/entry_64.S
arch/x86/entry/entry_64_compat.S
arch/x86/entry/vdso/Makefile
arch/x86/events/amd/ibs.c
arch/x86/events/intel/core.c
arch/x86/events/intel/ds.c
arch/x86/events/intel/lbr.c
arch/x86/events/intel/uncore.h
arch/x86/events/intel/uncore_snbep.c
arch/x86/events/perf_event.h
arch/x86/hyperv/hv_apic.c
arch/x86/hyperv/hv_init.c
arch/x86/hyperv/mmu.c
arch/x86/include/asm/apm.h
arch/x86/include/asm/asm.h
arch/x86/include/asm/atomic.h
arch/x86/include/asm/atomic64_32.h
arch/x86/include/asm/atomic64_64.h
arch/x86/include/asm/barrier.h
arch/x86/include/asm/cmpxchg.h
arch/x86/include/asm/cmpxchg_64.h
arch/x86/include/asm/cpufeatures.h
arch/x86/include/asm/hw_breakpoint.h
arch/x86/include/asm/intel-family.h
arch/x86/include/asm/intel-mid.h
arch/x86/include/asm/intel_ds.h
arch/x86/include/asm/irqflags.h
arch/x86/include/asm/kprobes.h
arch/x86/include/asm/kvm_guest.h [deleted file]
arch/x86/include/asm/kvm_para.h
arch/x86/include/asm/mmu_context.h
arch/x86/include/asm/mshyperv.h
arch/x86/include/asm/nospec-branch.h
arch/x86/include/asm/orc_types.h
arch/x86/include/asm/percpu.h
arch/x86/include/asm/pgalloc.h
arch/x86/include/asm/pgtable-2level.h
arch/x86/include/asm/pgtable-2level_types.h
arch/x86/include/asm/pgtable-3level.h
arch/x86/include/asm/pgtable-3level_types.h
arch/x86/include/asm/pgtable.h
arch/x86/include/asm/pgtable_32.h
arch/x86/include/asm/pgtable_32_types.h
arch/x86/include/asm/pgtable_64.h
arch/x86/include/asm/pgtable_64_types.h
arch/x86/include/asm/pgtable_types.h
arch/x86/include/asm/processor-flags.h
arch/x86/include/asm/processor.h
arch/x86/include/asm/pti.h
arch/x86/include/asm/qspinlock_paravirt.h
arch/x86/include/asm/refcount.h
arch/x86/include/asm/sections.h
arch/x86/include/asm/set_memory.h
arch/x86/include/asm/switch_to.h
arch/x86/include/asm/text-patching.h
arch/x86/include/asm/tlbflush.h
arch/x86/include/asm/trace/hyperv.h
arch/x86/include/asm/tsc.h
arch/x86/include/asm/uaccess_64.h
arch/x86/include/asm/unwind_hints.h
arch/x86/include/asm/vmx.h
arch/x86/kernel/Makefile
arch/x86/kernel/alternative.c
arch/x86/kernel/apic/apic.c
arch/x86/kernel/apic/vector.c
arch/x86/kernel/apic/x2apic_uv_x.c
arch/x86/kernel/apm_32.c
arch/x86/kernel/asm-offsets.c
arch/x86/kernel/asm-offsets_32.c
arch/x86/kernel/asm-offsets_64.c
arch/x86/kernel/cpu/Makefile
arch/x86/kernel/cpu/amd.c
arch/x86/kernel/cpu/bugs.c
arch/x86/kernel/cpu/cacheinfo.c
arch/x86/kernel/cpu/common.c
arch/x86/kernel/cpu/intel.c
arch/x86/kernel/cpu/intel_rdt.c
arch/x86/kernel/cpu/intel_rdt.h
arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c
arch/x86/kernel/cpu/intel_rdt_pseudo_lock.c [new file with mode: 0644]
arch/x86/kernel/cpu/intel_rdt_pseudo_lock_event.h [new file with mode: 0644]
arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
arch/x86/kernel/cpu/mcheck/mce-severity.c
arch/x86/kernel/cpu/mcheck/mce.c
arch/x86/kernel/cpu/microcode/intel.c
arch/x86/kernel/cpu/mtrr/if.c
arch/x86/kernel/dumpstack.c
arch/x86/kernel/e820.c
arch/x86/kernel/head64.c
arch/x86/kernel/head_32.S
arch/x86/kernel/head_64.S
arch/x86/kernel/hw_breakpoint.c
arch/x86/kernel/irqflags.S [new file with mode: 0644]
arch/x86/kernel/jump_label.c
arch/x86/kernel/kprobes/common.h
arch/x86/kernel/kprobes/core.c
arch/x86/kernel/kprobes/ftrace.c
arch/x86/kernel/kprobes/opt.c
arch/x86/kernel/kvm.c
arch/x86/kernel/kvmclock.c
arch/x86/kernel/ldt.c
arch/x86/kernel/machine_kexec_32.c
arch/x86/kernel/paravirt.c
arch/x86/kernel/paravirt_patch_64.c
arch/x86/kernel/pci-iommu_table.c
arch/x86/kernel/pcspeaker.c
arch/x86/kernel/process.c
arch/x86/kernel/process_32.c
arch/x86/kernel/process_64.c
arch/x86/kernel/quirks.c
arch/x86/kernel/setup.c
arch/x86/kernel/signal.c
arch/x86/kernel/smpboot.c
arch/x86/kernel/stacktrace.c
arch/x86/kernel/traps.c
arch/x86/kernel/tsc.c
arch/x86/kernel/tsc_msr.c
arch/x86/kernel/unwind_orc.c
arch/x86/kernel/uprobes.c
arch/x86/kernel/vm86_32.c
arch/x86/kernel/vmlinux.lds.S
arch/x86/kernel/x86_init.c
arch/x86/kvm/Kconfig
arch/x86/kvm/lapic.c
arch/x86/kvm/mmu.c
arch/x86/kvm/vmx.c
arch/x86/kvm/x86.c
arch/x86/kvm/x86.h
arch/x86/lib/memcpy_64.S
arch/x86/mm/dump_pagetables.c
arch/x86/mm/fault.c
arch/x86/mm/init.c
arch/x86/mm/init_64.c
arch/x86/mm/numa_emulation.c
arch/x86/mm/pageattr.c
arch/x86/mm/pgtable.c
arch/x86/mm/pti.c
arch/x86/mm/tlb.c
arch/x86/net/bpf_jit_comp32.c
arch/x86/platform/efi/efi_64.c
arch/x86/platform/efi/quirks.c
arch/x86/platform/intel-mid/Makefile
arch/x86/platform/intel-mid/intel-mid.c
arch/x86/platform/intel-mid/intel_mid_weak_decls.h [deleted file]
arch/x86/platform/intel-mid/mfld.c [deleted file]
arch/x86/platform/intel-mid/mrfld.c [deleted file]
arch/x86/platform/olpc/olpc.c
arch/x86/platform/uv/tlb_uv.c
arch/x86/power/hibernate_asm_64.S
arch/x86/purgatory/Makefile
arch/x86/tools/relocs.c
arch/x86/um/mem_32.c
arch/x86/um/vdso/.gitignore
arch/x86/um/vdso/Makefile
arch/x86/xen/enlighten.c
arch/x86/xen/enlighten_pv.c
arch/x86/xen/enlighten_pvh.c
arch/x86/xen/irq.c
arch/x86/xen/mmu_pv.c
arch/x86/xen/smp_pv.c
arch/x86/xen/suspend_pv.c
arch/x86/xen/time.c
arch/x86/xen/xen-ops.h
arch/xtensa/include/asm/atomic.h
arch/xtensa/include/asm/hw_breakpoint.h
arch/xtensa/kernel/hw_breakpoint.c
block/bio.c
block/blk-core.c
block/blk-mq-debugfs.c
block/blk-mq-tag.c
block/blk-mq.c
block/blk-softirq.c
block/blk-timeout.c
block/bsg.c
block/sed-opal.c
certs/blacklist.h
crypto/af_alg.c
crypto/algif_aead.c
crypto/algif_skcipher.c
crypto/asymmetric_keys/x509_cert_parser.c
crypto/morus640.c
crypto/sha3_generic.c
drivers/acpi/acpi_lpss.c
drivers/acpi/acpica/hwsleep.c
drivers/acpi/acpica/psloop.c
drivers/acpi/acpica/uterror.c
drivers/acpi/battery.c
drivers/acpi/ec.c
drivers/acpi/nfit/core.c
drivers/acpi/nfit/nfit.h
drivers/acpi/osl.c
drivers/acpi/pptt.c
drivers/ata/Kconfig
drivers/ata/ahci.c
drivers/ata/ahci_mvebu.c
drivers/ata/libahci.c
drivers/ata/libata-core.c
drivers/ata/libata-eh.c
drivers/ata/libata-scsi.c
drivers/ata/sata_fsl.c
drivers/ata/sata_nv.c
drivers/atm/iphase.c
drivers/atm/zatm.c
drivers/base/Makefile
drivers/base/core.c
drivers/base/dd.c
drivers/base/power/domain.c
drivers/block/drbd/drbd_req.c
drivers/block/drbd/drbd_worker.c
drivers/block/loop.c
drivers/block/nbd.c
drivers/block/null_blk.c
drivers/block/rbd.c
drivers/block/zram/zram_drv.c
drivers/bluetooth/hci_nokia.c
drivers/bus/ti-sysc.c
drivers/char/agp/alpha-agp.c
drivers/char/agp/amd64-agp.c
drivers/char/hw_random/core.c
drivers/char/ipmi/ipmi_si_intf.c
drivers/char/ipmi/kcs_bmc.c
drivers/char/mem.c
drivers/char/random.c
drivers/clk/Makefile
drivers/clk/clk-aspeed.c
drivers/clk/clk.c
drivers/clk/clkdev.c
drivers/clk/davinci/da8xx-cfgchip.c
drivers/clk/davinci/psc.h
drivers/clk/meson/clk-audio-divider.c
drivers/clk/meson/gxbb.c
drivers/clk/mvebu/armada-37xx-periph.c
drivers/clk/qcom/gcc-msm8996.c
drivers/clk/qcom/mmcc-msm8996.c
drivers/clk/sunxi-ng/Makefile
drivers/clocksource/Makefile
drivers/clocksource/arm_arch_timer.c
drivers/clocksource/mtk_timer.c [deleted file]
drivers/clocksource/tegra20_timer.c
drivers/clocksource/timer-atcpit100.c
drivers/clocksource/timer-keystone.c
drivers/clocksource/timer-mediatek.c [new file with mode: 0644]
drivers/clocksource/timer-sprd.c
drivers/clocksource/timer-stm32.c
drivers/clocksource/timer-ti-32k.c
drivers/clocksource/zevio-timer.c
drivers/cpufreq/intel_pstate.c
drivers/cpufreq/pcc-cpufreq.c
drivers/cpufreq/qcom-cpufreq-kryo.c
drivers/crypto/chelsio/chtls/chtls_io.c
drivers/crypto/padlock-aes.c
drivers/dax/device.c
drivers/dax/super.c
drivers/dma/k3dma.c
drivers/dma/pl330.c
drivers/dma/ti/omap-dma.c
drivers/firmware/dmi-id.c
drivers/firmware/dmi_scan.c
drivers/firmware/efi/Kconfig
drivers/firmware/efi/cper.c
drivers/firmware/efi/efi.c
drivers/firmware/efi/esrt.c
drivers/firmware/efi/libstub/arm-stub.c
drivers/firmware/efi/libstub/efi-stub-helper.c
drivers/firmware/efi/libstub/efistub.h
drivers/firmware/efi/libstub/tpm.c
drivers/firmware/efi/runtime-wrappers.c
drivers/fpga/altera-cvp.c
drivers/gpio/gpio-uniphier.c
drivers/gpio/gpiolib-acpi.c
drivers/gpio/gpiolib-of.c
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
drivers/gpu/drm/amd/display/dc/dc.h
drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c
drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h
drivers/gpu/drm/amd/include/atomfirmware.h
drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c
drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h
drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h
drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c
drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h
drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
drivers/gpu/drm/arm/malidp_drv.c
drivers/gpu/drm/arm/malidp_hw.c
drivers/gpu/drm/arm/malidp_planes.c
drivers/gpu/drm/armada/armada_crtc.c
drivers/gpu/drm/armada/armada_hw.h
drivers/gpu/drm/armada/armada_overlay.c
drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
drivers/gpu/drm/bridge/sil-sii8620.c
drivers/gpu/drm/drm_atomic_helper.c
drivers/gpu/drm/drm_context.c
drivers/gpu/drm/drm_drv.c
drivers/gpu/drm/drm_lease.c
drivers/gpu/drm/drm_property.c
drivers/gpu/drm/etnaviv/etnaviv_drv.c
drivers/gpu/drm/etnaviv/etnaviv_gpu.h
drivers/gpu/drm/etnaviv/etnaviv_sched.c
drivers/gpu/drm/exynos/exynos5433_drm_decon.c
drivers/gpu/drm/exynos/exynos_drm_drv.c
drivers/gpu/drm/exynos/exynos_drm_fb.c
drivers/gpu/drm/exynos/exynos_drm_fimc.c
drivers/gpu/drm/exynos/exynos_drm_gem.c
drivers/gpu/drm/exynos/exynos_drm_gsc.c
drivers/gpu/drm/exynos/exynos_drm_ipp.c
drivers/gpu/drm/exynos/exynos_drm_plane.c
drivers/gpu/drm/exynos/exynos_drm_rotator.c
drivers/gpu/drm/exynos/exynos_drm_scaler.c
drivers/gpu/drm/exynos/regs-gsc.h
drivers/gpu/drm/i915/gvt/cmd_parser.c
drivers/gpu/drm/i915/gvt/display.c
drivers/gpu/drm/i915/gvt/gtt.c
drivers/gpu/drm/i915/gvt/gtt.h
drivers/gpu/drm/i915/gvt/gvt.h
drivers/gpu/drm/i915/gvt/handlers.c
drivers/gpu/drm/i915/gvt/mmio.h
drivers/gpu/drm/i915/gvt/mmio_context.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_context.c
drivers/gpu/drm/i915/i915_gem_execbuffer.c
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/i915_vma.c
drivers/gpu/drm/i915/intel_crt.c
drivers/gpu/drm/i915/intel_ddi.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_dp_mst.c
drivers/gpu/drm/i915/intel_drv.h
drivers/gpu/drm/i915/intel_dsi.c
drivers/gpu/drm/i915/intel_dvo.c
drivers/gpu/drm/i915/intel_hdmi.c
drivers/gpu/drm/i915/intel_lrc.c
drivers/gpu/drm/i915/intel_lvds.c
drivers/gpu/drm/i915/intel_sdvo.c
drivers/gpu/drm/i915/intel_tv.c
drivers/gpu/drm/imx/imx-ldb.c
drivers/gpu/drm/meson/meson_drv.c
drivers/gpu/drm/nouveau/dispnv04/disp.c
drivers/gpu/drm/nouveau/dispnv50/curs507a.c
drivers/gpu/drm/nouveau/dispnv50/disp.c
drivers/gpu/drm/nouveau/dispnv50/wndw.c
drivers/gpu/drm/nouveau/nouveau_backlight.c
drivers/gpu/drm/nouveau/nouveau_connector.c
drivers/gpu/drm/nouveau/nouveau_connector.h
drivers/gpu/drm/nouveau/nouveau_display.c
drivers/gpu/drm/nouveau/nouveau_drm.c
drivers/gpu/drm/nouveau/nouveau_gem.c
drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c
drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp100.c
drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c
drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
drivers/gpu/drm/qxl/qxl_display.c
drivers/gpu/drm/sun4i/Makefile
drivers/gpu/drm/sun4i/sun4i_tcon.c
drivers/gpu/drm/tegra/drm.c
drivers/gpu/drm/udl/udl_fb.c
drivers/gpu/drm/udl/udl_transfer.c
drivers/gpu/drm/vc4/vc4_plane.c
drivers/gpu/host1x/dev.c
drivers/gpu/host1x/job.c
drivers/gpu/ipu-v3/ipu-csi.c
drivers/hid/hid-core.c
drivers/hid/hid-debug.c
drivers/hid/hid-google-hammer.c
drivers/hid/hid-ids.h
drivers/hid/hid-steam.c
drivers/hid/i2c-hid/i2c-hid.c
drivers/hid/intel-ish-hid/ipc/pci-ish.c
drivers/hid/usbhid/hiddev.c
drivers/hid/wacom_sys.c
drivers/hid/wacom_wac.c
drivers/hwmon/Kconfig
drivers/hwmon/Makefile
drivers/hwmon/adt7475.c
drivers/hwmon/dell-smm-hwmon.c
drivers/hwmon/emc1403.c
drivers/hwmon/iio_hwmon.c
drivers/hwmon/k10temp.c
drivers/hwmon/mlxreg-fan.c [new file with mode: 0644]
drivers/hwmon/nct6775.c
drivers/hwmon/nct7904.c
drivers/hwmon/npcm750-pwm-fan.c [new file with mode: 0644]
drivers/hwmon/pmbus/Kconfig
drivers/hwmon/pmbus/max34440.c
drivers/i2c/algos/i2c-algo-bit.c
drivers/i2c/busses/i2c-cht-wc.c
drivers/i2c/busses/i2c-davinci.c
drivers/i2c/busses/i2c-gpio.c
drivers/i2c/busses/i2c-imx.c
drivers/i2c/busses/i2c-rcar.c
drivers/i2c/busses/i2c-stu300.c
drivers/i2c/busses/i2c-tegra.c
drivers/i2c/busses/i2c-xlp9xx.c
drivers/i2c/i2c-core-base.c
drivers/i2c/i2c-core-smbus.c
drivers/i2c/i2c-mux.c
drivers/iio/accel/mma8452.c
drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
drivers/iio/light/tsl2772.c
drivers/iio/pressure/bmp280-core.c
drivers/infiniband/core/rdma_core.c
drivers/infiniband/core/uverbs_cmd.c
drivers/infiniband/core/uverbs_main.c
drivers/infiniband/core/verbs.c
drivers/infiniband/hw/cxgb4/mem.c
drivers/infiniband/hw/hfi1/rc.c
drivers/infiniband/hw/hfi1/uc.c
drivers/infiniband/hw/hfi1/ud.c
drivers/infiniband/hw/hfi1/verbs_txreq.c
drivers/infiniband/hw/hfi1/verbs_txreq.h
drivers/infiniband/hw/mlx4/mr.c
drivers/infiniband/hw/mlx5/main.c
drivers/infiniband/hw/mlx5/srq.c
drivers/infiniband/hw/qedr/verbs.c
drivers/infiniband/sw/rxe/rxe_req.c
drivers/input/input-mt.c
drivers/input/joystick/xpad.c
drivers/input/keyboard/goldfish_events.c
drivers/input/keyboard/hilkbd.c
drivers/input/misc/Kconfig
drivers/input/misc/Makefile
drivers/input/misc/sc27xx-vibra.c [new file with mode: 0644]
drivers/input/mouse/elan_i2c.h
drivers/input/mouse/elan_i2c_core.c
drivers/input/mouse/elan_i2c_smbus.c
drivers/input/mouse/elantech.c
drivers/input/mouse/psmouse-base.c
drivers/input/rmi4/Kconfig
drivers/input/rmi4/rmi_2d_sensor.c
drivers/input/rmi4/rmi_bus.c
drivers/input/rmi4/rmi_bus.h
drivers/input/rmi4/rmi_driver.c
drivers/input/rmi4/rmi_f01.c
drivers/input/rmi4/rmi_f03.c
drivers/input/rmi4/rmi_f11.c
drivers/input/rmi4/rmi_f12.c
drivers/input/rmi4/rmi_f30.c
drivers/input/rmi4/rmi_f34.c
drivers/input/rmi4/rmi_f54.c
drivers/input/serio/i8042-x86ia64io.h
drivers/input/touchscreen/silead.c
drivers/iommu/Kconfig
drivers/iommu/intel-iommu.c
drivers/irqchip/Kconfig
drivers/irqchip/irq-gic-v2m.c
drivers/irqchip/irq-gic-v3-its-fsl-mc-msi.c
drivers/irqchip/irq-gic-v3-its-pci-msi.c
drivers/irqchip/irq-gic-v3-its-platform-msi.c
drivers/irqchip/irq-gic-v3-its.c
drivers/irqchip/irq-gic-v3.c
drivers/irqchip/irq-ingenic.c
drivers/irqchip/irq-ls-scfg-msi.c
drivers/irqchip/irq-stm32-exti.c
drivers/isdn/mISDN/socket.c
drivers/lightnvm/Kconfig
drivers/md/dm-raid.c
drivers/md/dm-table.c
drivers/md/dm-thin-metadata.c
drivers/md/dm-thin.c
drivers/md/dm-writecache.c
drivers/md/dm-zoned-target.c
drivers/md/dm.c
drivers/md/md.c
drivers/md/raid10.c
drivers/media/platform/vsp1/vsp1_drm.c
drivers/media/rc/bpf-lirc.c
drivers/media/rc/rc-ir-raw.c
drivers/media/rc/rc-main.c
drivers/misc/cxl/api.c
drivers/misc/ibmasm/ibmasmfs.c
drivers/misc/mei/interrupt.c
drivers/misc/vmw_balloon.c
drivers/mmc/core/slot-gpio.c
drivers/mmc/host/dw_mmc.c
drivers/mmc/host/mxcmmc.c
drivers/mmc/host/renesas_sdhi_internal_dmac.c
drivers/mmc/host/sdhci-esdhc-imx.c
drivers/mmc/host/sunxi-mmc.c
drivers/mtd/chips/cfi_cmdset_0002.c
drivers/mtd/devices/mtd_dataflash.c
drivers/mtd/nand/raw/denali_dt.c
drivers/mtd/nand/raw/mxc_nand.c
drivers/mtd/nand/raw/nand_base.c
drivers/mtd/nand/raw/nand_macronix.c
drivers/mtd/nand/raw/nand_micron.c
drivers/mtd/spi-nor/cadence-quadspi.c
drivers/net/bonding/bond_main.c
drivers/net/bonding/bond_options.c
drivers/net/can/m_can/m_can.c
drivers/net/can/mscan/mpc5xxx_can.c
drivers/net/can/peak_canfd/peak_pciefd_main.c
drivers/net/can/usb/ems_usb.c
drivers/net/can/xilinx_can.c
drivers/net/dsa/mv88e6xxx/chip.c
drivers/net/ethernet/3com/Kconfig
drivers/net/ethernet/8390/mac8390.c
drivers/net/ethernet/amazon/ena/ena_com.c
drivers/net/ethernet/amd/Kconfig
drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
drivers/net/ethernet/apm/xgene-v2/Kconfig
drivers/net/ethernet/apm/xgene/Kconfig
drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
drivers/net/ethernet/aquantia/atlantic/aq_hw.h
drivers/net/ethernet/aquantia/atlantic/aq_main.c
drivers/net/ethernet/aquantia/atlantic/aq_nic.c
drivers/net/ethernet/aquantia/atlantic/aq_nic.h
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
drivers/net/ethernet/arc/Kconfig
drivers/net/ethernet/atheros/alx/main.c
drivers/net/ethernet/atheros/atl1c/atl1c_main.c
drivers/net/ethernet/broadcom/Kconfig
drivers/net/ethernet/broadcom/bcmsysport.c
drivers/net/ethernet/broadcom/bcmsysport.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
drivers/net/ethernet/broadcom/bnxt/bnxt.c
drivers/net/ethernet/broadcom/bnxt/bnxt.h
drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
drivers/net/ethernet/broadcom/cnic.c
drivers/net/ethernet/broadcom/tg3.c
drivers/net/ethernet/broadcom/tg3.h
drivers/net/ethernet/cadence/macb.h
drivers/net/ethernet/cadence/macb_main.c
drivers/net/ethernet/cadence/macb_ptp.c
drivers/net/ethernet/calxeda/Kconfig
drivers/net/ethernet/cavium/Kconfig
drivers/net/ethernet/cavium/liquidio/lio_main.c
drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
drivers/net/ethernet/cavium/thunder/thunder_bgx.c
drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
drivers/net/ethernet/cirrus/Kconfig
drivers/net/ethernet/cisco/enic/enic_clsf.c
drivers/net/ethernet/cisco/enic/enic_main.c
drivers/net/ethernet/faraday/ftgmac100.c
drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
drivers/net/ethernet/freescale/fman/fman_port.c
drivers/net/ethernet/hisilicon/Kconfig
drivers/net/ethernet/huawei/hinic/hinic_main.c
drivers/net/ethernet/huawei/hinic/hinic_rx.c
drivers/net/ethernet/huawei/hinic/hinic_tx.c
drivers/net/ethernet/ibm/ibmvnic.c
drivers/net/ethernet/intel/i40e/i40e_txrx.c
drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/net/ethernet/marvell/Kconfig
drivers/net/ethernet/marvell/mvneta.c
drivers/net/ethernet/mellanox/mlx4/en_rx.c
drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
drivers/net/ethernet/mellanox/mlx5/core/alloc.c
drivers/net/ethernet/mellanox/mlx5/core/cmd.c
drivers/net/ethernet/mellanox/mlx5/core/en.h
drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
drivers/net/ethernet/mellanox/mlx5/core/fw.c
drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c
drivers/net/ethernet/mellanox/mlx5/core/port.c
drivers/net/ethernet/mellanox/mlx5/core/sriov.c
drivers/net/ethernet/mellanox/mlx5/core/vport.c
drivers/net/ethernet/mellanox/mlx5/core/wq.c
drivers/net/ethernet/mellanox/mlxsw/Kconfig
drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
drivers/net/ethernet/mscc/ocelot.c
drivers/net/ethernet/netronome/nfp/bpf/main.c
drivers/net/ethernet/netronome/nfp/flower/main.c
drivers/net/ethernet/netronome/nfp/flower/match.c
drivers/net/ethernet/netronome/nfp/flower/offload.c
drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
drivers/net/ethernet/netronome/nfp/nfp_main.c
drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.c
drivers/net/ethernet/qlogic/qed/qed.h
drivers/net/ethernet/qlogic/qed/qed_dcbx.c
drivers/net/ethernet/qlogic/qed/qed_debug.c
drivers/net/ethernet/qlogic/qed/qed_dev.c
drivers/net/ethernet/qlogic/qed/qed_l2.c
drivers/net/ethernet/qlogic/qed/qed_l2.h
drivers/net/ethernet/qlogic/qed/qed_ll2.c
drivers/net/ethernet/qlogic/qed/qed_main.c
drivers/net/ethernet/qlogic/qed/qed_mcp.c
drivers/net/ethernet/qlogic/qed/qed_sriov.c
drivers/net/ethernet/qlogic/qed/qed_vf.c
drivers/net/ethernet/qlogic/qed/qed_vf.h
drivers/net/ethernet/qlogic/qede/qede_ptp.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
drivers/net/ethernet/qualcomm/qca_spi.c
drivers/net/ethernet/realtek/r8169.c
drivers/net/ethernet/renesas/Kconfig
drivers/net/ethernet/renesas/ravb_main.c
drivers/net/ethernet/renesas/sh_eth.c
drivers/net/ethernet/sfc/ef10.c
drivers/net/ethernet/sfc/efx.c
drivers/net/ethernet/sfc/farch.c
drivers/net/ethernet/stmicro/stmmac/Kconfig
drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h
drivers/net/ethernet/stmicro/stmmac/hwif.h
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
drivers/net/ethernet/sun/sungem.c
drivers/net/ethernet/ti/cpsw.c
drivers/net/ethernet/ti/cpsw_ale.c
drivers/net/ethernet/ti/davinci_cpdma.c
drivers/net/ethernet/ti/davinci_emac.c
drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
drivers/net/geneve.c
drivers/net/hamradio/bpqether.c
drivers/net/hyperv/hyperv_net.h
drivers/net/hyperv/netvsc.c
drivers/net/hyperv/netvsc_drv.c
drivers/net/hyperv/rndis_filter.c
drivers/net/ieee802154/adf7242.c
drivers/net/ieee802154/at86rf230.c
drivers/net/ieee802154/fakelb.c
drivers/net/ieee802154/mcr20a.c
drivers/net/ipvlan/ipvlan_main.c
drivers/net/net_failover.c
drivers/net/netdevsim/devlink.c
drivers/net/phy/dp83tc811.c
drivers/net/phy/marvell.c
drivers/net/phy/mdio-mux-bcm-iproc.c
drivers/net/phy/phy.c
drivers/net/phy/phy_device.c
drivers/net/phy/sfp-bus.c
drivers/net/ppp/pppoe.c
drivers/net/tun.c
drivers/net/usb/asix_devices.c
drivers/net/usb/cdc_ncm.c
drivers/net/usb/lan78xx.c
drivers/net/usb/qmi_wwan.c
drivers/net/usb/r8152.c
drivers/net/usb/rtl8150.c
drivers/net/usb/smsc75xx.c
drivers/net/virtio_net.c
drivers/net/vxlan.c
drivers/net/wan/lmc/lmc_main.c
drivers/net/wireless/ath/ath10k/mac.c
drivers/net/wireless/ath/ath10k/wmi.h
drivers/net/wireless/ath/wcn36xx/testmode.c
drivers/net/wireless/broadcom/brcm80211/Kconfig
drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
drivers/net/wireless/intel/iwlwifi/cfg/9000.c
drivers/net/wireless/intel/iwlwifi/iwl-config.h
drivers/net/wireless/intel/iwlwifi/pcie/drv.c
drivers/net/wireless/marvell/mwifiex/usb.c
drivers/net/wireless/mediatek/mt7601u/phy.c
drivers/net/wireless/quantenna/qtnfmac/Kconfig
drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
drivers/net/wireless/realtek/rtlwifi/base.c
drivers/net/wireless/realtek/rtlwifi/base.h
drivers/net/wireless/realtek/rtlwifi/core.c
drivers/net/wireless/realtek/rtlwifi/pci.c
drivers/net/wireless/realtek/rtlwifi/ps.c
drivers/net/wireless/realtek/rtlwifi/usb.c
drivers/net/xen-netfront.c
drivers/nfc/pn533/usb.c
drivers/nubus/bus.c
drivers/nvdimm/claim.c
drivers/nvdimm/pmem.c
drivers/nvme/host/core.c
drivers/nvme/host/fabrics.c
drivers/nvme/host/fabrics.h
drivers/nvme/host/fc.c
drivers/nvme/host/nvme.h
drivers/nvme/host/pci.c
drivers/nvme/host/rdma.c
drivers/nvme/target/configfs.c
drivers/nvme/target/core.c
drivers/nvme/target/fc.c
drivers/nvme/target/loop.c
drivers/nvmem/core.c
drivers/of/base.c
drivers/of/of_private.h
drivers/of/overlay.c
drivers/opp/core.c
drivers/pci/Makefile
drivers/pci/bus.c
drivers/pci/controller/Kconfig
drivers/pci/controller/dwc/Kconfig
drivers/pci/controller/dwc/pcie-designware-host.c
drivers/pci/controller/pci-aardvark.c
drivers/pci/controller/pci-ftpci100.c
drivers/pci/controller/pci-hyperv.c
drivers/pci/controller/pci-v3-semi.c
drivers/pci/controller/pci-versatile.c
drivers/pci/controller/pci-xgene.c
drivers/pci/controller/pcie-mediatek.c
drivers/pci/controller/pcie-mobiveil.c
drivers/pci/controller/pcie-rcar.c
drivers/pci/controller/pcie-xilinx-nwl.c
drivers/pci/controller/pcie-xilinx.c
drivers/pci/endpoint/pci-epf-core.c
drivers/pci/hotplug/acpi_pcihp.c
drivers/pci/hotplug/acpiphp_glue.c
drivers/pci/iov.c
drivers/pci/of.c
drivers/pci/pci-acpi.c
drivers/pci/pci-driver.c
drivers/pci/pci.c
drivers/pci/pci.h
drivers/pci/pcie/err.c
drivers/pci/probe.c
drivers/pci/remove.c
drivers/perf/xgene_pmu.c
drivers/phy/broadcom/phy-brcm-usb-init.c
drivers/phy/motorola/phy-mapphone-mdm6600.c
drivers/pinctrl/actions/pinctrl-owl.c
drivers/pinctrl/bcm/pinctrl-nsp-mux.c
drivers/pinctrl/devicetree.c
drivers/pinctrl/mediatek/pinctrl-mt7622.c
drivers/pinctrl/mediatek/pinctrl-mtk-common.c
drivers/pinctrl/pinctrl-ingenic.c
drivers/pinctrl/pinctrl-single.c
drivers/pinctrl/sh-pfc/pfc-r8a77970.c
drivers/platform/mips/cpu_hwmon.c
drivers/platform/x86/dell-laptop.c
drivers/ptp/ptp_chardev.c
drivers/ptp/ptp_qoriq.c
drivers/rtc/interface.c
drivers/rtc/rtc-mrst.c
drivers/s390/block/dasd.c
drivers/s390/block/dasd_alias.c
drivers/s390/block/dasd_devmap.c
drivers/s390/block/dasd_diag.c
drivers/s390/block/dasd_eckd.c
drivers/s390/block/dasd_eer.c
drivers/s390/block/dasd_fba.c
drivers/s390/block/dasd_int.h
drivers/s390/block/scm_blk.c
drivers/s390/char/Makefile
drivers/s390/char/keyboard.c
drivers/s390/char/monwriter.c
drivers/s390/char/sclp_async.c
drivers/s390/char/tape_3590.c
drivers/s390/char/tape_class.c
drivers/s390/cio/Makefile
drivers/s390/cio/chp.c
drivers/s390/cio/chsc.c
drivers/s390/cio/chsc.h
drivers/s390/cio/cio.c
drivers/s390/cio/cio.h
drivers/s390/cio/css.c
drivers/s390/cio/css.h
drivers/s390/cio/qdio_main.c
drivers/s390/cio/trace.h
drivers/s390/cio/vfio_ccw_cp.c
drivers/s390/cio/vfio_ccw_drv.c
drivers/s390/cio/vfio_ccw_fsm.c
drivers/s390/cio/vfio_ccw_trace.h [new file with mode: 0644]
drivers/s390/crypto/ap_asm.h [deleted file]
drivers/s390/crypto/ap_bus.c
drivers/s390/crypto/ap_bus.h
drivers/s390/crypto/ap_card.c
drivers/s390/crypto/ap_queue.c
drivers/s390/crypto/pkey_api.c
drivers/s390/crypto/zcrypt_card.c
drivers/s390/crypto/zcrypt_cca_key.h
drivers/s390/crypto/zcrypt_msgtype6.c
drivers/s390/crypto/zcrypt_queue.c
drivers/s390/net/qeth_core.h
drivers/s390/net/qeth_core_main.c
drivers/s390/net/qeth_l2_main.c
drivers/s390/net/qeth_l3_main.c
drivers/s390/scsi/zfcp_aux.c
drivers/scsi/aacraid/aachba.c
drivers/scsi/cxlflash/main.h
drivers/scsi/cxlflash/ocxl_hw.c
drivers/scsi/fcoe/fcoe_ctlr.c
drivers/scsi/hpsa.c
drivers/scsi/hpsa.h
drivers/scsi/ipr.c
drivers/scsi/libfc/fc_rport.c
drivers/scsi/libiscsi.c
drivers/scsi/mpt3sas/mpt3sas_base.c
drivers/scsi/qedf/qedf_main.c
drivers/scsi/qedi/qedi_main.c
drivers/scsi/qla2xxx/qla_attr.c
drivers/scsi/qla2xxx/qla_def.h
drivers/scsi/qla2xxx/qla_gbl.h
drivers/scsi/qla2xxx/qla_gs.c
drivers/scsi/qla2xxx/qla_init.c
drivers/scsi/qla2xxx/qla_inline.h
drivers/scsi/qla2xxx/qla_iocb.c
drivers/scsi/qla2xxx/qla_isr.c
drivers/scsi/qla2xxx/qla_mbx.c
drivers/scsi/qla2xxx/qla_mid.c
drivers/scsi/qla2xxx/qla_os.c
drivers/scsi/qla2xxx/qla_sup.c
drivers/scsi/qla2xxx/qla_target.c
drivers/scsi/scsi_debug.c
drivers/scsi/scsi_error.c
drivers/scsi/scsi_transport_fc.c
drivers/scsi/sd_zbc.c
drivers/scsi/sg.c
drivers/scsi/sr.c
drivers/scsi/vmw_pvscsi.c
drivers/scsi/xen-scsifront.c
drivers/soc/imx/gpc.c
drivers/soc/imx/gpcv2.c
drivers/soc/qcom/Kconfig
drivers/soc/renesas/rcar-sysc.c
drivers/staging/android/ashmem.c
drivers/staging/android/ion/ion_heap.c
drivers/staging/comedi/drivers/quatech_daqp_cs.c
drivers/staging/ks7010/ks_hostif.c
drivers/staging/media/omap4iss/iss_video.c
drivers/staging/rtl8188eu/Kconfig
drivers/staging/rtl8188eu/core/rtw_recv.c
drivers/staging/rtl8188eu/core/rtw_security.c
drivers/staging/rtl8723bs/core/rtw_ap.c
drivers/staging/rtlwifi/rtl8822be/hw.c
drivers/staging/rtlwifi/wifi.h
drivers/staging/speakup/speakup_soft.c
drivers/staging/typec/Kconfig
drivers/target/iscsi/cxgbit/cxgbit_target.c
drivers/target/target_core_pr.c
drivers/target/target_core_user.c
drivers/thunderbolt/domain.c
drivers/tty/n_tty.c
drivers/tty/serdev/core.c
drivers/tty/serial/8250/8250_pci.c
drivers/tty/vt/vt.c
drivers/uio/uio.c
drivers/usb/chipidea/Kconfig
drivers/usb/chipidea/Makefile
drivers/usb/chipidea/ci.h
drivers/usb/chipidea/host.c
drivers/usb/chipidea/ulpi.c
drivers/usb/class/cdc-acm.c
drivers/usb/core/hub.c
drivers/usb/core/quirks.c
drivers/usb/dwc2/core.h
drivers/usb/dwc2/gadget.c
drivers/usb/dwc2/hcd.c
drivers/usb/dwc2/hcd.h
drivers/usb/dwc2/hcd_intr.c
drivers/usb/dwc2/hcd_queue.c
drivers/usb/dwc3/core.c
drivers/usb/dwc3/dwc3-of-simple.c
drivers/usb/dwc3/dwc3-pci.c
drivers/usb/dwc3/dwc3-qcom.c
drivers/usb/dwc3/ep0.c
drivers/usb/gadget/composite.c
drivers/usb/gadget/function/f_fs.c
drivers/usb/gadget/function/f_uac2.c
drivers/usb/gadget/function/u_audio.c
drivers/usb/gadget/udc/aspeed-vhub/Kconfig
drivers/usb/gadget/udc/aspeed-vhub/ep0.c
drivers/usb/gadget/udc/aspeed-vhub/epn.c
drivers/usb/gadget/udc/aspeed-vhub/vhub.h
drivers/usb/gadget/udc/r8a66597-udc.c
drivers/usb/host/xhci-dbgcap.c
drivers/usb/host/xhci-mem.c
drivers/usb/host/xhci-tegra.c
drivers/usb/host/xhci-trace.h
drivers/usb/host/xhci.c
drivers/usb/host/xhci.h
drivers/usb/misc/yurex.c
drivers/usb/phy/phy-fsl-usb.c
drivers/usb/serial/ch341.c
drivers/usb/serial/cp210x.c
drivers/usb/serial/keyspan_pda.c
drivers/usb/serial/mos7840.c
drivers/usb/typec/tcpm.c
drivers/usb/typec/ucsi/ucsi.c
drivers/usb/typec/ucsi/ucsi_acpi.c
drivers/vfio/pci/Kconfig
drivers/vfio/pci/vfio_pci.c
drivers/vfio/vfio_iommu_spapr_tce.c
drivers/vfio/vfio_iommu_type1.c
drivers/vhost/net.c
drivers/vhost/vhost.c
drivers/video/fbdev/efifb.c
drivers/virtio/virtio_balloon.c
drivers/xen/Makefile
drivers/xen/events/events_base.c
drivers/xen/grant-table.c
drivers/xen/manage.c
drivers/xen/privcmd-buf.c [new file with mode: 0644]
drivers/xen/privcmd.c
drivers/xen/privcmd.h
drivers/xen/xen-scsiback.c
fs/9p/vfs_inode.c
fs/9p/vfs_inode_dotl.c
fs/adfs/inode.c
fs/adfs/super.c
fs/afs/rxrpc.c
fs/aio.c
fs/anon_inodes.c
fs/autofs/Makefile
fs/autofs/dev-ioctl.c
fs/autofs/init.c
fs/bad_inode.c
fs/binfmt_elf.c
fs/binfmt_misc.c
fs/block_dev.c
fs/btrfs/extent_io.c
fs/btrfs/inode.c
fs/btrfs/ioctl.c
fs/btrfs/qgroup.c
fs/btrfs/scrub.c
fs/btrfs/volumes.c
fs/cachefiles/bind.c
fs/cachefiles/namei.c
fs/cachefiles/rdwr.c
fs/ceph/file.c
fs/ceph/inode.c
fs/ceph/super.h
fs/cifs/cifs_debug.c
fs/cifs/cifsencrypt.c
fs/cifs/cifsfs.h
fs/cifs/cifsglob.h
fs/cifs/cifsproto.h
fs/cifs/cifssmb.c
fs/cifs/connect.c
fs/cifs/dir.c
fs/cifs/inode.c
fs/cifs/misc.c
fs/cifs/smb1ops.c
fs/cifs/smb2file.c
fs/cifs/smb2misc.c
fs/cifs/smb2ops.c
fs/cifs/smb2pdu.c
fs/cifs/smb2pdu.h
fs/cifs/smb2proto.h
fs/cifs/smb2transport.c
fs/cifs/smbdirect.c
fs/cifs/smbdirect.h
fs/cifs/trace.h
fs/cifs/transport.c
fs/dcache.c
fs/efivarfs/inode.c
fs/eventfd.c
fs/eventpoll.c
fs/exec.c
fs/ext2/ext2.h
fs/ext2/ialloc.c
fs/ext2/namei.c
fs/ext2/super.c
fs/ext4/balloc.c
fs/ext4/ext4.h
fs/ext4/ext4_extents.h
fs/ext4/extents.c
fs/ext4/ialloc.c
fs/ext4/inline.c
fs/ext4/inode.c
fs/ext4/mballoc.c
fs/ext4/mmp.c
fs/ext4/super.c
fs/ext4/xattr.c
fs/fat/inode.c
fs/file_table.c
fs/fscache/cache.c
fs/fscache/cookie.c
fs/fscache/object.c
fs/fscache/operation.c
fs/fuse/dir.c
fs/gfs2/inode.c
fs/hfs/inode.c
fs/hugetlbfs/inode.c
fs/inode.c
fs/internal.h
fs/iomap.c
fs/jbd2/transaction.c
fs/jfs/jfs_dinode.h
fs/jfs/jfs_imap.c
fs/jfs/jfs_incore.h
fs/jfs/jfs_inode.c
fs/jfs/namei.c
fs/jfs/super.c
fs/jfs/xattr.c
fs/namei.c
fs/namespace.c
fs/nfs/delegation.c
fs/nfs/dir.c
fs/nfs/flexfilelayout/flexfilelayout.c
fs/nfs/nfs4_fs.h
fs/nfs/nfs4proc.c
fs/nfs/pnfs.h
fs/nfsd/vfs.c
fs/open.c
fs/pipe.c
fs/proc/base.c
fs/proc/generic.c
fs/proc/task_mmu.c
fs/quota/dquot.c
fs/reiserfs/prints.c
fs/select.c
fs/squashfs/block.c
fs/squashfs/cache.c
fs/squashfs/file.c
fs/squashfs/file_cache.c
fs/squashfs/file_direct.c
fs/squashfs/fragment.c
fs/squashfs/squashfs.h
fs/squashfs/squashfs_fs.h
fs/squashfs/squashfs_fs_sb.h
fs/squashfs/super.c
fs/timerfd.c
fs/udf/balloc.c
fs/udf/directory.c
fs/udf/inode.c
fs/udf/namei.c
fs/udf/udfdecl.h
fs/ufs/ialloc.c
fs/ufs/namei.c
fs/userfaultfd.c
fs/xfs/libxfs/xfs_ag_resv.c
fs/xfs/libxfs/xfs_alloc.c
fs/xfs/libxfs/xfs_bmap.c
fs/xfs/libxfs/xfs_bmap.h
fs/xfs/libxfs/xfs_format.h
fs/xfs/libxfs/xfs_inode_buf.c
fs/xfs/libxfs/xfs_rtbitmap.c
fs/xfs/xfs_bmap_util.c
fs/xfs/xfs_fsmap.c
fs/xfs/xfs_fsops.c
fs/xfs/xfs_inode.c
fs/xfs/xfs_iomap.c
fs/xfs/xfs_iops.c
fs/xfs/xfs_trans.c
include/acpi/processor.h
include/asm-generic/atomic-instrumented.h
include/asm-generic/atomic.h
include/asm-generic/atomic64.h
include/asm-generic/bitops/atomic.h
include/asm-generic/bitops/lock.h
include/asm-generic/pgtable.h
include/asm-generic/qspinlock_types.h
include/asm-generic/tlb.h
include/crypto/if_alg.h
include/dt-bindings/clock/imx6ul-clock.h
include/linux/acpi.h
include/linux/atmdev.h
include/linux/atomic.h
include/linux/backing-dev-defs.h
include/linux/bitops.h
include/linux/bits.h [new file with mode: 0644]
include/linux/blk-mq.h
include/linux/blkdev.h
include/linux/bpf-cgroup.h
include/linux/bpf.h
include/linux/bpf_lirc.h
include/linux/bpfilter.h
include/linux/clocksource.h
include/linux/compat.h
include/linux/compat_time.h
include/linux/compiler-gcc.h
include/linux/compiler_types.h
include/linux/cpu.h
include/linux/cpuhotplug.h
include/linux/dax.h
include/linux/dcache.h
include/linux/delayacct.h
include/linux/dma-contiguous.h
include/linux/dma-noncoherent.h
include/linux/efi.h
include/linux/eventfd.h
include/linux/file.h
include/linux/filter.h
include/linux/fs.h
include/linux/fsl/guts.h
include/linux/ftrace.h
include/linux/hid.h
include/linux/if_bridge.h
include/linux/igmp.h
include/linux/iio/buffer-dma.h
include/linux/ima.h
include/linux/input/mt.h
include/linux/intel-iommu.h
include/linux/irq.h
include/linux/irqchip/arm-gic-v3.h
include/linux/irqdesc.h
include/linux/kernel.h
include/linux/kprobes.h
include/linux/kthread.h
include/linux/ktime.h
include/linux/libata.h
include/linux/lsm_hooks.h
include/linux/marvell_phy.h
include/linux/memory.h
include/linux/mlx5/driver.h
include/linux/mlx5/eswitch.h
include/linux/mlx5/mlx5_ifc.h
include/linux/mm.h
include/linux/mm_types.h
include/linux/mod_devicetable.h
include/linux/net.h
include/linux/netdevice.h
include/linux/nfs_xdr.h
include/linux/nmi.h
include/linux/pci.h
include/linux/perf_event.h
include/linux/pm_domain.h
include/linux/poll.h
include/linux/posix-timers.h
include/linux/pti.h
include/linux/rculist.h
include/linux/rcupdate.h
include/linux/rcutiny.h
include/linux/refcount.h
include/linux/ring_buffer.h
include/linux/rmi.h
include/linux/rtmutex.h
include/linux/scatterlist.h
include/linux/sched.h
include/linux/sched/sysctl.h
include/linux/sched/task.h
include/linux/sched_clock.h
include/linux/security.h
include/linux/skbuff.h
include/linux/slub_def.h
include/linux/smpboot.h
include/linux/spinlock.h
include/linux/srcu.h
include/linux/swait.h
include/linux/syscalls.h
include/linux/time.h
include/linux/time64.h
include/linux/timekeeping.h
include/linux/torture.h
include/linux/uio_driver.h
include/net/af_vsock.h
include/net/bluetooth/bluetooth.h
include/net/cfg80211.h
include/net/ip6_fib.h
include/net/ip6_route.h
include/net/ipv6.h
include/net/iucv/af_iucv.h
include/net/llc.h
include/net/net_namespace.h
include/net/netfilter/nf_tables.h
include/net/netfilter/nf_tables_core.h
include/net/netfilter/nf_tproxy.h
include/net/netns/ipv6.h
include/net/pkt_cls.h
include/net/sctp/sctp.h
include/net/tc_act/tc_csum.h
include/net/tc_act/tc_tunnel_key.h
include/net/tcp.h
include/net/tls.h
include/net/udp.h
include/net/xdp_sock.h
include/rdma/ib_verbs.h
include/trace/events/rcu.h
include/uapi/linux/aio_abi.h
include/uapi/linux/bpf.h
include/uapi/linux/btf.h
include/uapi/linux/elf.h
include/uapi/linux/ethtool.h
include/uapi/linux/kvm.h
include/uapi/linux/nbd.h
include/uapi/linux/perf_event.h
include/uapi/linux/rseq.h
include/uapi/linux/target_core_user.h
include/uapi/linux/tcp.h
include/uapi/linux/time.h
include/uapi/linux/types_32_64.h [deleted file]
include/xen/xen.h
init/Kconfig
init/main.c
ipc/sem.c
ipc/shm.c
kernel/Makefile
kernel/auditsc.c
kernel/bpf/arraymap.c
kernel/bpf/btf.c
kernel/bpf/cgroup.c
kernel/bpf/core.c
kernel/bpf/cpumap.c
kernel/bpf/devmap.c
kernel/bpf/hashtab.c
kernel/bpf/sockmap.c
kernel/bpf/syscall.c
kernel/bpf/verifier.c
kernel/compat.c
kernel/cpu.c
kernel/dma/Kconfig [new file with mode: 0644]
kernel/dma/Makefile [new file with mode: 0644]
kernel/dma/coherent.c [moved from drivers/base/dma-coherent.c with 100% similarity]
kernel/dma/contiguous.c [moved from drivers/base/dma-contiguous.c with 100% similarity]
kernel/dma/debug.c [moved from lib/dma-debug.c with 100% similarity]
kernel/dma/direct.c [moved from lib/dma-direct.c with 100% similarity]
kernel/dma/mapping.c [moved from drivers/base/dma-mapping.c with 99% similarity]
kernel/dma/noncoherent.c [moved from lib/dma-noncoherent.c with 92% similarity]
kernel/dma/swiotlb.c [moved from lib/swiotlb.c with 99% similarity]
kernel/dma/virt.c [moved from lib/dma-virt.c with 98% similarity]
kernel/events/core.c
kernel/events/hw_breakpoint.c
kernel/events/ring_buffer.c
kernel/events/uprobes.c
kernel/fail_function.c
kernel/fork.c
kernel/irq/Kconfig
kernel/irq/debugfs.c
kernel/irq/irqdesc.c
kernel/irq/manage.c
kernel/irq/proc.c
kernel/kprobes.c
kernel/kthread.c
kernel/locking/lockdep.c
kernel/locking/locktorture.c
kernel/locking/rtmutex.c
kernel/locking/rwsem.c
kernel/memremap.c
kernel/power/suspend.c
kernel/rcu/rcu.h
kernel/rcu/rcuperf.c
kernel/rcu/rcutorture.c
kernel/rcu/srcutiny.c
kernel/rcu/srcutree.c
kernel/rcu/tiny.c
kernel/rcu/tree.c
kernel/rcu/tree.h
kernel/rcu/tree_exp.h
kernel/rcu/tree_plugin.h
kernel/rcu/update.c
kernel/rseq.c
kernel/sched/Makefile
kernel/sched/clock.c
kernel/sched/completion.c
kernel/sched/core.c
kernel/sched/cpufreq_schedutil.c
kernel/sched/deadline.c
kernel/sched/debug.c
kernel/sched/fair.c
kernel/sched/pelt.c [new file with mode: 0644]
kernel/sched/pelt.h [new file with mode: 0644]
kernel/sched/rt.c
kernel/sched/sched.h
kernel/sched/swait.c
kernel/sched/topology.c
kernel/sched/wait.c
kernel/smpboot.c
kernel/softirq.c
kernel/stop_machine.c
kernel/sys.c
kernel/sysctl.c
kernel/test_kprobes.c
kernel/time/alarmtimer.c
kernel/time/clockevents.c
kernel/time/clocksource.c
kernel/time/hrtimer.c
kernel/time/ntp.c
kernel/time/ntp_internal.h
kernel/time/posix-cpu-timers.c
kernel/time/posix-stubs.c
kernel/time/posix-timers.c
kernel/time/posix-timers.h
kernel/time/sched_clock.c
kernel/time/tick-broadcast-hrtimer.c
kernel/time/tick-common.c
kernel/time/tick-sched.c
kernel/time/time.c
kernel/time/timekeeping.c
kernel/time/timekeeping_debug.c
kernel/time/timekeeping_internal.h
kernel/time/timer.c
kernel/torture.c
kernel/trace/ftrace.c
kernel/trace/ring_buffer.c
kernel/trace/trace.c
kernel/trace/trace.h
kernel/trace/trace_events_filter.c
kernel/trace/trace_events_hist.c
kernel/trace/trace_events_trigger.c
kernel/trace/trace_functions_graph.c
kernel/trace/trace_kprobe.c
kernel/trace/trace_output.c
kernel/watchdog.c
kernel/watchdog_hld.c
lib/Kconfig
lib/Kconfig.debug
lib/Kconfig.kasan
lib/Kconfig.ubsan
lib/Makefile
lib/atomic64.c
lib/debugobjects.c
lib/dec_and_lock.c
lib/ioremap.c
lib/iov_iter.c
lib/percpu_ida.c
lib/raid6/s390vx.uc
lib/refcount.c
lib/rhashtable.c
lib/scatterlist.c
lib/test_bpf.c
lib/test_printf.c
mm/backing-dev.c
mm/debug.c
mm/gup.c
mm/huge_memory.c
mm/hugetlb.c
mm/init-mm.c
mm/kasan/kasan.c
mm/memblock.c
mm/memcontrol.c
mm/memfd.c
mm/memory.c
mm/mempolicy.c
mm/mmap.c
mm/nommu.c
mm/page_alloc.c
mm/rmap.c
mm/shmem.c
mm/slab_common.c
mm/slub.c
mm/vmstat.c
mm/zswap.c
net/8021q/vlan.c
net/9p/client.c
net/Makefile
net/appletalk/ddp.c
net/atm/br2684.c
net/atm/clip.c
net/atm/common.c
net/atm/common.h
net/atm/lec.c
net/atm/mpc.c
net/atm/pppoatm.c
net/atm/pvc.c
net/atm/raw.c
net/atm/svc.c
net/ax25/af_ax25.c
net/batman-adv/bat_iv_ogm.c
net/batman-adv/bat_v.c
net/batman-adv/debugfs.c
net/batman-adv/debugfs.h
net/batman-adv/hard-interface.c
net/batman-adv/translation-table.c
net/bluetooth/af_bluetooth.c
net/bluetooth/hci_sock.c
net/bluetooth/l2cap_sock.c
net/bluetooth/rfcomm/sock.c
net/bluetooth/sco.c
net/bpf/test_run.c
net/bpfilter/.gitignore [new file with mode: 0644]
net/bpfilter/Kconfig
net/bpfilter/Makefile
net/bpfilter/bpfilter_kern.c
net/bpfilter/bpfilter_umh_blob.S [new file with mode: 0644]
net/caif/caif_dev.c
net/caif/caif_socket.c
net/can/bcm.c
net/can/raw.c
net/core/datagram.c
net/core/dev.c
net/core/dev_ioctl.c
net/core/fib_rules.c
net/core/filter.c
net/core/gen_stats.c
net/core/lwt_bpf.c
net/core/page_pool.c
net/core/rtnetlink.c
net/core/skbuff.c
net/core/sock.c
net/core/xdp.c
net/dccp/ccids/ccid2.c
net/dccp/ccids/ccid3.c
net/dccp/dccp.h
net/dccp/ipv4.c
net/dccp/ipv6.c
net/dccp/proto.c
net/decnet/af_decnet.c
net/dns_resolver/dns_key.c
net/dsa/slave.c
net/ieee802154/6lowpan/core.c
net/ieee802154/socket.c
net/ipv4/af_inet.c
net/ipv4/fib_frontend.c
net/ipv4/fou.c
net/ipv4/gre_offload.c
net/ipv4/igmp.c
net/ipv4/inet_fragment.c
net/ipv4/inet_hashtables.c
net/ipv4/ip_fragment.c
net/ipv4/ip_output.c
net/ipv4/ip_sockglue.c
net/ipv4/netfilter/ip_tables.c
net/ipv4/netfilter/nf_tproxy_ipv4.c
net/ipv4/sysctl_net_ipv4.c
net/ipv4/tcp.c
net/ipv4/tcp_bbr.c
net/ipv4/tcp_dctcp.c
net/ipv4/tcp_input.c
net/ipv4/tcp_ipv4.c
net/ipv4/tcp_output.c
net/ipv4/udp.c
net/ipv4/udp_offload.c
net/ipv6/Kconfig
net/ipv6/addrconf.c
net/ipv6/af_inet6.c
net/ipv6/calipso.c
net/ipv6/datagram.c
net/ipv6/esp6.c
net/ipv6/exthdrs.c
net/ipv6/icmp.c
net/ipv6/inet6_hashtables.c
net/ipv6/ip6_fib.c
net/ipv6/ip6_gre.c
net/ipv6/ip6_output.c
net/ipv6/ip6_tunnel.c
net/ipv6/ip6_vti.c
net/ipv6/ipv6_sockglue.c
net/ipv6/mcast.c
net/ipv6/ndisc.c
net/ipv6/netfilter/ip6_tables.c
net/ipv6/netfilter/nf_conntrack_reasm.c
net/ipv6/netfilter/nf_tproxy_ipv6.c
net/ipv6/raw.c
net/ipv6/route.c
net/ipv6/seg6_hmac.c
net/ipv6/seg6_iptunnel.c
net/ipv6/tcp_ipv6.c
net/iucv/af_iucv.c
net/kcm/kcmsock.c
net/key/af_key.c
net/l2tp/l2tp_ip.c
net/l2tp/l2tp_ip6.c
net/l2tp/l2tp_ppp.c
net/llc/af_llc.c
net/llc/llc_core.c
net/mac80211/rx.c
net/mac80211/tx.c
net/mac80211/util.c
net/ncsi/ncsi-aen.c
net/ncsi/ncsi-manage.c
net/netfilter/Kconfig
net/netfilter/Makefile
net/netfilter/nf_conncount.c
net/netfilter/nf_conntrack_core.c
net/netfilter/nf_conntrack_helper.c
net/netfilter/nf_conntrack_proto_dccp.c
net/netfilter/nf_log.c
net/netfilter/nf_tables_api.c
net/netfilter/nf_tables_set_core.c [new file with mode: 0644]
net/netfilter/nfnetlink_queue.c
net/netfilter/nft_compat.c
net/netfilter/nft_immediate.c
net/netfilter/nft_lookup.c
net/netfilter/nft_set_bitmap.c
net/netfilter/nft_set_hash.c
net/netfilter/nft_set_rbtree.c
net/netfilter/xt_TPROXY.c
net/netlink/af_netlink.c
net/netrom/af_netrom.c
net/nfc/llcp_commands.c
net/nfc/llcp_sock.c
net/nfc/rawsock.c
net/nsh/nsh.c
net/openvswitch/meter.c
net/packet/af_packet.c
net/phonet/socket.c
net/qrtr/qrtr.c
net/rds/connection.c
net/rds/ib_frmr.c
net/rds/ib_mr.h
net/rds/ib_rdma.c
net/rds/loop.c
net/rds/loop.h
net/rds/rdma.c
net/rds/rds.h
net/rds/send.c
net/rose/af_rose.c
net/rxrpc/af_rxrpc.c
net/rxrpc/ar-internal.h
net/rxrpc/call_accept.c
net/rxrpc/call_object.c
net/rxrpc/conn_event.c
net/rxrpc/conn_object.c
net/rxrpc/local_object.c
net/rxrpc/net_ns.c
net/rxrpc/output.c
net/rxrpc/peer_event.c
net/rxrpc/peer_object.c
net/rxrpc/rxkad.c
net/sched/act_csum.c
net/sched/act_ife.c
net/sched/act_tunnel_key.c
net/sched/cls_api.c
net/sched/cls_flower.c
net/sched/sch_blackhole.c
net/sched/sch_fq_codel.c
net/sched/sch_hfsc.c
net/sctp/chunk.c
net/sctp/ipv6.c
net/sctp/protocol.c
net/sctp/socket.c
net/sctp/transport.c
net/smc/af_smc.c
net/smc/smc.h
net/smc/smc_cdc.c
net/smc/smc_clc.c
net/smc/smc_close.c
net/smc/smc_tx.c
net/socket.c
net/strparser/strparser.c
net/sunrpc/xprt.c
net/tipc/discover.c
net/tipc/net.c
net/tipc/node.c
net/tipc/socket.c
net/tls/tls_main.c
net/tls/tls_sw.c
net/unix/af_unix.c
net/vmw_vsock/af_vsock.c
net/vmw_vsock/virtio_transport.c
net/vmw_vsock/vmci_transport.c
net/wireless/nl80211.c
net/wireless/reg.c
net/wireless/trace.h
net/x25/af_x25.c
net/xdp/xsk.c
net/xdp/xsk_queue.h
net/xfrm/xfrm_policy.c
net/xfrm/xfrm_user.c
samples/bpf/.gitignore [new file with mode: 0644]
samples/bpf/parse_varlen.c
samples/bpf/test_overhead_user.c
samples/bpf/trace_event_user.c
samples/bpf/xdp2skb_meta.sh
samples/bpf/xdp_fwd_kern.c
samples/bpf/xdp_redirect_cpu_kern.c
samples/bpf/xdp_redirect_cpu_user.c
samples/bpf/xdpsock_user.c
samples/vfio-mdev/mbochs.c
scripts/Kbuild.include
scripts/Makefile.build
scripts/Makefile.clean
scripts/Makefile.modbuiltin
scripts/Makefile.modinst
scripts/Makefile.modpost
scripts/Makefile.modsign
scripts/Makefile.ubsan
scripts/cc-can-link.sh
scripts/checkpatch.pl
scripts/extract-vmlinux
scripts/gcc-x86_64-has-stack-protector.sh
scripts/kconfig/expr.h
scripts/kconfig/preprocess.c
scripts/kconfig/zconf.y
scripts/tags.sh
security/Kconfig
security/apparmor/lsm.c
security/integrity/ima/ima.h
security/integrity/ima/ima_appraise.c
security/integrity/ima/ima_main.c
security/keys/dh.c
security/security.c
security/selinux/hooks.c
security/selinux/selinuxfs.c
security/smack/smack_lsm.c
security/tomoyo/tomoyo.c
sound/core/rawmidi.c
sound/core/seq/seq_clientmgr.c
sound/core/timer.c
sound/pci/hda/hda_codec.c
sound/pci/hda/hda_codec.h
sound/pci/hda/patch_ca0132.c
sound/pci/hda/patch_conexant.c
sound/pci/hda/patch_hdmi.c
sound/pci/hda/patch_realtek.c
sound/pci/lx6464es/lx6464es.c
tools/arch/arm/include/uapi/asm/kvm.h
tools/arch/arm64/include/uapi/asm/kvm.h
tools/arch/arm64/include/uapi/asm/unistd.h [new file with mode: 0644]
tools/arch/parisc/include/uapi/asm/errno.h
tools/arch/powerpc/include/uapi/asm/kvm.h
tools/arch/powerpc/include/uapi/asm/unistd.h
tools/arch/x86/include/asm/cpufeatures.h
tools/arch/x86/include/asm/mcsafe_test.h [new file with mode: 0644]
tools/arch/x86/lib/memcpy_64.S
tools/bpf/bpftool/common.c
tools/bpf/bpftool/map.c
tools/bpf/bpftool/perf.c
tools/bpf/bpftool/prog.c
tools/build/Build.include
tools/build/Makefile
tools/include/uapi/asm-generic/unistd.h [new file with mode: 0644]
tools/include/uapi/drm/drm.h
tools/include/uapi/linux/bpf.h
tools/include/uapi/linux/btf.h
tools/include/uapi/linux/if_link.h
tools/include/uapi/linux/in.h [new file with mode: 0644]
tools/include/uapi/linux/kvm.h
tools/include/uapi/linux/perf_event.h
tools/lib/bpf/btf.c
tools/lib/bpf/btf.h
tools/lib/bpf/libbpf.c
tools/lib/bpf/libbpf.h
tools/memory-model/Documentation/explanation.txt
tools/memory-model/Documentation/recipes.txt
tools/memory-model/README
tools/memory-model/linux-kernel.bell
tools/memory-model/litmus-tests/IRIW+fencembonceonces+OnceOnce.litmus [moved from tools/memory-model/litmus-tests/IRIW+mbonceonces+OnceOnce.litmus with 95% similarity]
tools/memory-model/litmus-tests/ISA2+pooncelock+pooncelock+pombonce.litmus
tools/memory-model/litmus-tests/LB+fencembonceonce+ctrlonceonce.litmus [moved from tools/memory-model/litmus-tests/LB+ctrlonceonce+mbonceonce.litmus with 95% similarity]
tools/memory-model/litmus-tests/MP+fencewmbonceonce+fencermbonceonce.litmus [moved from tools/memory-model/litmus-tests/MP+wmbonceonce+rmbonceonce.litmus with 91% similarity]
tools/memory-model/litmus-tests/R+fencembonceonces.litmus [moved from tools/memory-model/litmus-tests/R+mbonceonces.litmus with 95% similarity]
tools/memory-model/litmus-tests/README
tools/memory-model/litmus-tests/S+fencewmbonceonce+poacquireonce.litmus [moved from tools/memory-model/litmus-tests/S+wmbonceonce+poacquireonce.litmus with 90% similarity]
tools/memory-model/litmus-tests/SB+fencembonceonces.litmus [moved from tools/memory-model/litmus-tests/SB+mbonceonces.litmus with 95% similarity]
tools/memory-model/litmus-tests/SB+rfionceonce-poonceonces.litmus [new file with mode: 0644]
tools/memory-model/litmus-tests/WRC+pooncerelease+fencermbonceonce+Once.litmus [moved from tools/memory-model/litmus-tests/WRC+pooncerelease+rmbonceonce+Once.litmus with 93% similarity]
tools/memory-model/litmus-tests/Z6.0+pooncerelease+poacquirerelease+fencembonceonce.litmus [moved from tools/memory-model/litmus-tests/Z6.0+pooncerelease+poacquirerelease+mbonceonce.litmus with 94% similarity]
tools/memory-model/scripts/checkalllitmus.sh [changed mode: 0644->0755]
tools/memory-model/scripts/checklitmus.sh [changed mode: 0644->0755]
tools/objtool/arch/x86/include/asm/orc_types.h
tools/objtool/check.c
tools/objtool/check.h
tools/objtool/elf.c
tools/objtool/orc_dump.c
tools/objtool/orc_gen.c
tools/perf/Documentation/perf-list.txt
tools/perf/Documentation/perf-record.txt
tools/perf/Documentation/perf-stat.txt
tools/perf/Makefile.config
tools/perf/Makefile.perf
tools/perf/arch/arm64/Makefile
tools/perf/arch/arm64/entry/syscalls/mksyscalltbl [new file with mode: 0755]
tools/perf/arch/powerpc/util/skip-callchain-idx.c
tools/perf/arch/s390/util/kvm-stat.c
tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
tools/perf/arch/x86/util/perf_regs.c
tools/perf/arch/x86/util/pmu.c
tools/perf/arch/x86/util/tsc.c
tools/perf/bench/Build
tools/perf/bench/mem-memcpy-x86-64-asm.S
tools/perf/bench/mem-memcpy-x86-64-lib.c [new file with mode: 0644]
tools/perf/bench/numa.c
tools/perf/builtin-annotate.c
tools/perf/builtin-c2c.c
tools/perf/builtin-diff.c
tools/perf/builtin-report.c
tools/perf/builtin-script.c
tools/perf/builtin-stat.c
tools/perf/builtin-top.c
tools/perf/builtin-trace.c
tools/perf/check-headers.sh
tools/perf/include/bpf/bpf.h
tools/perf/jvmti/jvmti_agent.c
tools/perf/perf.h
tools/perf/pmu-events/Build
tools/perf/pmu-events/arch/arm64/cavium/thunderx2/core-imp-def.json
tools/perf/pmu-events/arch/s390/cf_z10/basic.json
tools/perf/pmu-events/arch/s390/cf_z10/crypto.json
tools/perf/pmu-events/arch/s390/cf_z10/extended.json
tools/perf/pmu-events/arch/s390/cf_z13/basic.json
tools/perf/pmu-events/arch/s390/cf_z13/crypto.json
tools/perf/pmu-events/arch/s390/cf_z13/extended.json
tools/perf/pmu-events/arch/s390/cf_z13/transaction.json [new file with mode: 0644]
tools/perf/pmu-events/arch/s390/cf_z14/basic.json
tools/perf/pmu-events/arch/s390/cf_z14/crypto.json
tools/perf/pmu-events/arch/s390/cf_z14/extended.json
tools/perf/pmu-events/arch/s390/cf_z14/transaction.json [new file with mode: 0644]
tools/perf/pmu-events/arch/s390/cf_z196/basic.json
tools/perf/pmu-events/arch/s390/cf_z196/crypto.json
tools/perf/pmu-events/arch/s390/cf_z196/extended.json
tools/perf/pmu-events/arch/s390/cf_zec12/basic.json
tools/perf/pmu-events/arch/s390/cf_zec12/crypto.json
tools/perf/pmu-events/arch/s390/cf_zec12/extended.json
tools/perf/pmu-events/arch/s390/cf_zec12/transaction.json [new file with mode: 0644]
tools/perf/pmu-events/jevents.c
tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Core.py
tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/EventClass.py
tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/SchedGui.py
tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py
tools/perf/scripts/python/sched-migration.py
tools/perf/tests/builtin-test.c
tools/perf/tests/parse-events.c
tools/perf/tests/shell/record+probe_libc_inet_pton.sh
tools/perf/tests/shell/trace+probe_vfs_getname.sh
tools/perf/tests/topology.c
tools/perf/trace/beauty/Build
tools/perf/trace/beauty/beauty.h
tools/perf/trace/beauty/drm_ioctl.sh
tools/perf/trace/beauty/kcmp_type.sh
tools/perf/trace/beauty/kvm_ioctl.sh
tools/perf/trace/beauty/madvise_behavior.sh
tools/perf/trace/beauty/perf_ioctl.sh
tools/perf/trace/beauty/pkey_alloc_access_rights.sh
tools/perf/trace/beauty/sndrv_ctl_ioctl.sh
tools/perf/trace/beauty/sndrv_pcm_ioctl.sh
tools/perf/trace/beauty/socket.c [new file with mode: 0644]
tools/perf/trace/beauty/socket_ipproto.sh [new file with mode: 0755]
tools/perf/trace/beauty/vhost_virtio_ioctl.sh
tools/perf/ui/gtk/hists.c
tools/perf/ui/stdio/hist.c
tools/perf/util/bpf-loader.c
tools/perf/util/c++/clang.cpp
tools/perf/util/comm.c
tools/perf/util/cs-etm-decoder/cs-etm-decoder.c
tools/perf/util/cs-etm-decoder/cs-etm-decoder.h
tools/perf/util/cs-etm.c
tools/perf/util/evsel.c
tools/perf/util/evsel.h
tools/perf/util/header.c
tools/perf/util/header.h
tools/perf/util/hist.c
tools/perf/util/hist.h
tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c
tools/perf/util/llvm-utils.c
tools/perf/util/machine.c
tools/perf/util/metricgroup.c
tools/perf/util/metricgroup.h
tools/perf/util/namespaces.h
tools/perf/util/parse-events.y
tools/perf/util/pmu.c
tools/perf/util/scripting-engines/trace-event-python.c
tools/perf/util/sort.h
tools/perf/util/stat-shadow.c
tools/perf/util/syscalltbl.c
tools/perf/util/unwind-libdw.c
tools/perf/util/unwind-libunwind-local.c
tools/power/x86/turbostat/turbostat.8
tools/power/x86/turbostat/turbostat.c
tools/testing/nvdimm/test/nfit.c
tools/testing/selftests/bpf/Makefile
tools/testing/selftests/bpf/bpf_helpers.h
tools/testing/selftests/bpf/config
tools/testing/selftests/bpf/test_btf.c
tools/testing/selftests/bpf/test_btf_haskv.c
tools/testing/selftests/bpf/test_kmod.sh
tools/testing/selftests/bpf/test_lirc_mode2.sh
tools/testing/selftests/bpf/test_lwt_seg6local.sh
tools/testing/selftests/bpf/test_offload.py
tools/testing/selftests/bpf/test_sockmap.c
tools/testing/selftests/bpf/test_tunnel.sh
tools/testing/selftests/bpf/test_verifier.c
tools/testing/selftests/ftrace/test.d/00basic/snapshot.tc [new file with mode: 0644]
tools/testing/selftests/net/.gitignore
tools/testing/selftests/net/config
tools/testing/selftests/net/fib_tests.sh [changed mode: 0644->0755]
tools/testing/selftests/net/tcp_mmap.c
tools/testing/selftests/net/udpgso_bench.sh
tools/testing/selftests/pstore/pstore_post_reboot_tests
tools/testing/selftests/rcutorture/bin/configinit.sh
tools/testing/selftests/rcutorture/bin/kvm-build.sh
tools/testing/selftests/rcutorture/bin/kvm-recheck-rcu.sh
tools/testing/selftests/rcutorture/bin/kvm-recheck.sh
tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh
tools/testing/selftests/rcutorture/bin/kvm.sh
tools/testing/selftests/rcutorture/bin/parse-console.sh
tools/testing/selftests/rcutorture/configs/rcu/TREE03.boot
tools/testing/selftests/rcutorture/configs/rcu/TREE08-T.boot [deleted file]
tools/testing/selftests/rcutorture/configs/rcu/ver_functions.sh
tools/testing/selftests/rseq/param_test.c
tools/testing/selftests/rseq/rseq-arm.h
tools/testing/selftests/rseq/rseq-mips.h [new file with mode: 0644]
tools/testing/selftests/rseq/rseq-s390.h [new file with mode: 0644]
tools/testing/selftests/rseq/rseq.h
tools/testing/selftests/rseq/run_param_test.sh [changed mode: 0644->0755]
tools/testing/selftests/sparc64/Makefile
tools/testing/selftests/sparc64/drivers/Makefile
tools/testing/selftests/static_keys/test_static_keys.sh
tools/testing/selftests/sync/config [new file with mode: 0644]
tools/testing/selftests/sysctl/sysctl.sh
tools/testing/selftests/timers/raw_skew.c
tools/testing/selftests/user/test_user_copy.sh
tools/testing/selftests/vm/compaction_test.c
tools/testing/selftests/vm/mlock2-tests.c
tools/testing/selftests/vm/run_vmtests
tools/testing/selftests/vm/userfaultfd.c
tools/testing/selftests/x86/sigreturn.c
tools/testing/selftests/zram/zram.sh
tools/testing/selftests/zram/zram_lib.sh
tools/usb/ffs-test.c
tools/virtio/asm/barrier.h
tools/virtio/linux/kernel.h
tools/virtio/linux/scatterlist.h
virt/kvm/Kconfig
virt/kvm/arm/arm.c
virt/kvm/arm/mmu.c
virt/kvm/arm/psci.c
virt/kvm/arm/vgic/vgic-v3.c
virt/kvm/async_pf.c
virt/kvm/eventfd.c
virt/kvm/kvm_main.c

index 6c06e10bd04bd107380e391cee17c5f50a366633..f5120a00f5116bc72fda540b427d4d774069a13b 100644 (file)
@@ -380,31 +380,26 @@ and therefore need no protection.
 as follows:
 
 <pre>
-  1   unsigned long gpnum;
-  2   unsigned long completed;
+  1   unsigned long gp_seq;
 </pre>
 
 <p>RCU grace periods are numbered, and
-the <tt>-&gt;gpnum</tt> field contains the number of the grace
-period that started most recently.
-The <tt>-&gt;completed</tt> field contains the number of the
-grace period that completed most recently.
-If the two fields are equal, the RCU grace period that most recently
-started has already completed, and therefore the corresponding
-flavor of RCU is idle.
-If <tt>-&gt;gpnum</tt> is one greater than <tt>-&gt;completed</tt>,
-then <tt>-&gt;gpnum</tt> gives the number of the current RCU
-grace period, which has not yet completed.
-Any other combination of values indicates that something is broken.
-These two fields are protected by the root <tt>rcu_node</tt>'s
+the <tt>-&gt;gp_seq</tt> field contains the current grace-period
+sequence number.
+The bottom two bits are the state of the current grace period,
+which can be zero for not yet started or one for in progress.
+In other words, if the bottom two bits of <tt>-&gt;gp_seq</tt> are
+zero, the corresponding flavor of RCU is idle.
+Any other value in the bottom two bits indicates that something is broken.
+This field is protected by the root <tt>rcu_node</tt> structure's
 <tt>-&gt;lock</tt> field.
 
-</p><p>There are <tt>-&gt;gpnum</tt> and <tt>-&gt;completed</tt> fields
+</p><p>There are <tt>-&gt;gp_seq</tt> fields
 in the <tt>rcu_node</tt> and <tt>rcu_data</tt> structures
 as well.
 The fields in the <tt>rcu_state</tt> structure represent the
-most current values, and those of the other structures are compared
-in order to detect the start of a new grace period in a distributed
+most current value, and those of the other structures are compared
+in order to detect the beginnings and ends of grace periods in a distributed
 fashion.
 The values flow from <tt>rcu_state</tt> to <tt>rcu_node</tt>
 (down the tree from the root to the leaves) to <tt>rcu_data</tt>.
@@ -512,27 +507,47 @@ than to be heisenbugged out of existence.
 as follows:
 
 <pre>
-  1   unsigned long gpnum;
-  2   unsigned long completed;
+  1   unsigned long gp_seq;
+  2   unsigned long gp_seq_needed;
 </pre>
 
-<p>These fields are the counterparts of the fields of the same name in
-the <tt>rcu_state</tt> structure.
-They each may lag up to one behind their <tt>rcu_state</tt>
-counterparts.
-If a given <tt>rcu_node</tt> structure's <tt>-&gt;gpnum</tt> and
-<tt>-&gt;complete</tt> fields are equal, then this <tt>rcu_node</tt>
+<p>The <tt>rcu_node</tt> structures' <tt>-&gt;gp_seq</tt> fields are
+the counterparts of the field of the same name in the <tt>rcu_state</tt>
+structure.
+They each may lag up to one step behind their <tt>rcu_state</tt>
+counterpart.
+If the bottom two bits of a given <tt>rcu_node</tt> structure's
+<tt>-&gt;gp_seq</tt> field is zero, then this <tt>rcu_node</tt>
 structure believes that RCU is idle.
-Otherwise, as with the <tt>rcu_state</tt> structure,
-the <tt>-&gt;gpnum</tt> field will be one greater than the
-<tt>-&gt;complete</tt> fields, with <tt>-&gt;gpnum</tt>
-indicating which grace period this <tt>rcu_node</tt> believes
-is still being waited for.
+</p><p>The <tt>&gt;gp_seq</tt> field of each <tt>rcu_node</tt>
+structure is updated at the beginning and the end
+of each grace period.
+
+<p>The <tt>-&gt;gp_seq_needed</tt> fields record the
+furthest-in-the-future grace period request seen by the corresponding
+<tt>rcu_node</tt> structure.  The request is considered fulfilled when
+the value of the <tt>-&gt;gp_seq</tt> field equals or exceeds that of
+the <tt>-&gt;gp_seq_needed</tt> field.
 
-</p><p>The <tt>&gt;gpnum</tt> field of each <tt>rcu_node</tt>
-structure is updated at the beginning
-of each grace period, and the <tt>-&gt;completed</tt> fields are
-updated at the end of each grace period.
+<table>
+<tr><th>&nbsp;</th></tr>
+<tr><th align="left">Quick Quiz:</th></tr>
+<tr><td>
+       Suppose that this <tt>rcu_node</tt> structure doesn't see
+       a request for a very long time.
+       Won't wrapping of the <tt>-&gt;gp_seq</tt> field cause
+       problems?
+</td></tr>
+<tr><th align="left">Answer:</th></tr>
+<tr><td bgcolor="#ffffff"><font color="ffffff">
+       No, because if the <tt>-&gt;gp_seq_needed</tt> field lags behind the
+       <tt>-&gt;gp_seq</tt> field, the <tt>-&gt;gp_seq_needed</tt> field
+       will be updated at the end of the grace period.
+       Modulo-arithmetic comparisons therefore will always get the
+       correct answer, even with wrapping.
+</font></td></tr>
+<tr><td>&nbsp;</td></tr>
+</table>
 
 <h5>Quiescent-State Tracking</h5>
 
@@ -626,9 +641,8 @@ normal and expedited grace periods, respectively.
        </ol>
 
        <p><font color="ffffff">So the locking is absolutely required in
-       order to coordinate
-       clearing of the bits with the grace-period numbers in
-       <tt>-&gt;gpnum</tt> and <tt>-&gt;completed</tt>.
+       order to coordinate clearing of the bits with updating of the
+       grace-period sequence number in <tt>-&gt;gp_seq</tt>.
 </font></td></tr>
 <tr><td>&nbsp;</td></tr>
 </table>
@@ -1038,15 +1052,15 @@ out any <tt>rcu_data</tt> structure for which this flag is not set.
 as follows:
 
 <pre>
-  1   unsigned long completed;
-  2   unsigned long gpnum;
+  1   unsigned long gp_seq;
+  2   unsigned long gp_seq_needed;
   3   bool cpu_no_qs;
   4   bool core_needs_qs;
   5   bool gpwrap;
   6   unsigned long rcu_qs_ctr_snap;
 </pre>
 
-<p>The <tt>completed</tt> and <tt>gpnum</tt>
+<p>The <tt>-&gt;gp_seq</tt> and <tt>-&gt;gp_seq_needed</tt>
 fields are the counterparts of the fields of the same name
 in the <tt>rcu_state</tt> and <tt>rcu_node</tt> structures.
 They may each lag up to one behind their <tt>rcu_node</tt>
@@ -1054,15 +1068,9 @@ counterparts, but in <tt>CONFIG_NO_HZ_IDLE</tt> and
 <tt>CONFIG_NO_HZ_FULL</tt> kernels can lag
 arbitrarily far behind for CPUs in dyntick-idle mode (but these counters
 will catch up upon exit from dyntick-idle mode).
-If a given <tt>rcu_data</tt> structure's <tt>-&gt;gpnum</tt> and
-<tt>-&gt;complete</tt> fields are equal, then this <tt>rcu_data</tt>
+If the lower two bits of a given <tt>rcu_data</tt> structure's
+<tt>-&gt;gp_seq</tt> are zero, then this <tt>rcu_data</tt>
 structure believes that RCU is idle.
-Otherwise, as with the <tt>rcu_state</tt> and <tt>rcu_node</tt>
-structure,
-the <tt>-&gt;gpnum</tt> field will be one greater than the
-<tt>-&gt;complete</tt> fields, with <tt>-&gt;gpnum</tt>
-indicating which grace period this <tt>rcu_data</tt> believes
-is still being waited for.
 
 <table>
 <tr><th>&nbsp;</th></tr>
@@ -1070,13 +1078,13 @@ is still being waited for.
 <tr><td>
        All this replication of the grace period numbers can only cause
        massive confusion.
-       Why not just keep a global pair of counters and be done with it???
+       Why not just keep a global sequence number and be done with it???
 </td></tr>
 <tr><th align="left">Answer:</th></tr>
 <tr><td bgcolor="#ffffff"><font color="ffffff">
-       Because if there was only a single global pair of grace-period
+       Because if there was only a single global sequence
        numbers, there would need to be a single global lock to allow
-       safely accessing and updating them.
+       safely accessing and updating it.
        And if we are not going to have a single global lock, we need
        to carefully manage the numbers on a per-node basis.
        Recall from the answer to a previous Quick Quiz that the consequences
@@ -1091,8 +1099,8 @@ CPU has not yet passed through a quiescent state,
 while the <tt>-&gt;core_needs_qs</tt> flag indicates that the
 RCU core needs a quiescent state from the corresponding CPU.
 The <tt>-&gt;gpwrap</tt> field indicates that the corresponding
-CPU has remained idle for so long that the <tt>completed</tt>
-and <tt>gpnum</tt> counters are in danger of overflow, which
+CPU has remained idle for so long that the
+<tt>gp_seq</tt> counter is in danger of overflow, which
 will cause the CPU to disregard the values of its counters on
 its next exit from idle.
 Finally, the <tt>rcu_qs_ctr_snap</tt> field is used to detect
@@ -1130,10 +1138,10 @@ The CPU advances the callbacks in its <tt>rcu_data</tt> structure
 whenever it notices that another RCU grace period has completed.
 The CPU detects the completion of an RCU grace period by noticing
 that the value of its <tt>rcu_data</tt> structure's
-<tt>-&gt;completed</tt> field differs from that of its leaf
+<tt>-&gt;gp_seq</tt> field differs from that of its leaf
 <tt>rcu_node</tt> structure.
 Recall that each <tt>rcu_node</tt> structure's
-<tt>-&gt;completed</tt> field is updated at the end of each
+<tt>-&gt;gp_seq</tt> field is updated at the beginnings and ends of each
 grace period.
 
 <p>
index 8651b0b4fd79f37caef5572037656d1eb2a43cda..a346ce0116eb5ebc04733e85f074cfd00240c886 100644 (file)
@@ -357,7 +357,7 @@ parts, starting in this section with the various phases of
 grace-period initialization.
 
 <p>The first ordering-related grace-period initialization action is to
-increment the <tt>rcu_state</tt> structure's <tt>-&gt;gpnum</tt>
+advance the <tt>rcu_state</tt> structure's <tt>-&gt;gp_seq</tt>
 grace-period-number counter, as shown below:
 
 </p><p><img src="TreeRCU-gp-init-1.svg" alt="TreeRCU-gp-init-1.svg" width="75%">
@@ -388,7 +388,7 @@ its last CPU and if the next <tt>rcu_node</tt> structure has no online CPUs).
 
 <p>The final <tt>rcu_gp_init()</tt> pass through the <tt>rcu_node</tt>
 tree traverses breadth-first, setting each <tt>rcu_node</tt> structure's
-<tt>-&gt;gpnum</tt> field to the newly incremented value from the
+<tt>-&gt;gp_seq</tt> field to the newly advanced value from the
 <tt>rcu_state</tt> structure, as shown in the following diagram.
 
 </p><p><img src="TreeRCU-gp-init-3.svg" alt="TreeRCU-gp-init-1.svg" width="75%">
@@ -398,9 +398,9 @@ tree traverses breadth-first, setting each <tt>rcu_node</tt> structure's
 to notice that a new grace period has started, as described in the next
 section.
 But because the grace-period kthread started the grace period at the
-root (with the increment of the <tt>rcu_state</tt> structure's
-<tt>-&gt;gpnum</tt> field) before setting each leaf <tt>rcu_node</tt>
-structure's <tt>-&gt;gpnum</tt> field, each CPU's observation of
+root (with the advancing of the <tt>rcu_state</tt> structure's
+<tt>-&gt;gp_seq</tt> field) before setting each leaf <tt>rcu_node</tt>
+structure's <tt>-&gt;gp_seq</tt> field, each CPU's observation of
 the start of the grace period will happen after the actual start
 of the grace period.
 
@@ -466,7 +466,7 @@ section that the grace period must wait on.
 <tr><td>
        But a RCU read-side critical section might have started
        after the beginning of the grace period
-       (the <tt>-&gt;gpnum++</tt> from earlier), so why should
+       (the advancing of <tt>-&gt;gp_seq</tt> from earlier), so why should
        the grace period wait on such a critical section?
 </td></tr>
 <tr><th align="left">Answer:</th></tr>
@@ -609,10 +609,8 @@ states outstanding from other CPUs.
 <h4><a name="Grace-Period Cleanup">Grace-Period Cleanup</a></h4>
 
 <p>Grace-period cleanup first scans the <tt>rcu_node</tt> tree
-breadth-first setting all the <tt>-&gt;completed</tt> fields equal
-to the number of the newly completed grace period, then it sets
-the <tt>rcu_state</tt> structure's <tt>-&gt;completed</tt> field,
-again to the number of the newly completed grace period.
+breadth-first advancing all the <tt>-&gt;gp_seq</tt> fields, then it
+advances the <tt>rcu_state</tt> structure's <tt>-&gt;gp_seq</tt> field.
 The ordering effects are shown below:
 
 </p><p><img src="TreeRCU-gp-cleanup.svg" alt="TreeRCU-gp-cleanup.svg" width="75%">
@@ -634,7 +632,7 @@ grace-period cleanup is complete, the next grace period can begin.
        CPU has reported its quiescent state, but it may be some
        milliseconds before RCU becomes aware of this.
        The latest reasonable candidate is once the <tt>rcu_state</tt>
-       structure's <tt>-&gt;completed</tt> field has been updated,
+       structure's <tt>-&gt;gp_seq</tt> field has been updated,
        but it is quite possible that some CPUs have already completed
        phase two of their updates by that time.
        In short, if you are going to work with RCU, you need to
@@ -647,7 +645,7 @@ grace-period cleanup is complete, the next grace period can begin.
 <h4><a name="Callback Invocation">Callback Invocation</a></h4>
 
 <p>Once a given CPU's leaf <tt>rcu_node</tt> structure's
-<tt>-&gt;completed</tt> field has been updated, that CPU can begin
+<tt>-&gt;gp_seq</tt> field has been updated, that CPU can begin
 invoking its RCU callbacks that were waiting for this grace period
 to end.
 These callbacks are identified by <tt>rcu_advance_cbs()</tt>,
index 754f426b297ab8a4611641d1c326871ce80899d9..bf84fbab27eece0856b030d34f421d608d460bce 100644 (file)
      inkscape:window-height="1144"
      id="namedview208"
      showgrid="true"
-     inkscape:zoom="0.70710678"
-     inkscape:cx="617.89017"
-     inkscape:cy="542.52419"
-     inkscape:window-x="86"
-     inkscape:window-y="28"
+     inkscape:zoom="0.78716603"
+     inkscape:cx="513.06403"
+     inkscape:cy="623.1214"
+     inkscape:window-x="102"
+     inkscape:window-y="38"
      inkscape:window-maximized="0"
      inkscape:current-layer="g3188-3"
      fit-margin-top="5"
      id="g3188">
     <text
        xml:space="preserve"
-       x="3199.1516"
+       x="3145.9592"
        y="13255.592"
        font-style="normal"
        font-weight="bold"
        font-size="192"
        id="text202"
-       style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;font-family:Courier">-&gt;completed = -&gt;gpnum</text>
+       style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;font-family:Courier"><tspan
+         style="font-size:172.87567139px"
+         id="tspan3143">rcu_seq_end(&amp;rnp-&gt;gp_seq)</tspan></text>
     <g
        id="g3107"
        transform="translate(947.90548,11584.029)">
     </g>
     <text
        xml:space="preserve"
-       x="5324.5371"
-       y="15414.598"
+       x="5264.4731"
+       y="15428.84"
        font-style="normal"
        font-weight="bold"
        font-size="192"
-       id="text202-753"
-       style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;completed = -&gt;gpnum</text>
+       id="text202-36-7"
+       style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"><tspan
+         style="font-size:172.87567139px"
+         id="tspan3166-5">rcu_seq_end(&amp;rnp-&gt;gp_seq)</tspan></text>
   </g>
   <g
      style="fill:none;stroke-width:0.025in"
        sodipodi:linespacing="125%"><tspan
          style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
          id="tspan3104-6-5-6-0">Leaf</tspan></text>
-    <text
-       xml:space="preserve"
-       x="7479.5796"
-       y="17699.943"
-       font-style="normal"
-       font-weight="bold"
-       font-size="192"
-       id="text202-9"
-       style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;completed = -&gt;gpnum</text>
     <path
        sodipodi:nodetypes="cc"
        inkscape:connector-curvature="0"
        style="fill:none;stroke-width:0.025in"
        transform="translate(-737.93887,7732.6672)"
        id="g3188-3">
-      <text
-         xml:space="preserve"
-         x="3225.7478"
-         y="13175.802"
-         font-style="normal"
-         font-weight="bold"
-         font-size="192"
-         id="text202-60"
-         style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;font-family:Courier">rsp-&gt;completed =</text>
       <g
          id="g3107-62"
          transform="translate(947.90548,11584.029)">
          sodipodi:linespacing="125%"><tspan
            style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
            id="tspan3104-6-5-7">Root</tspan></text>
-      <text
-         xml:space="preserve"
-         x="3225.7478"
-         y="13390.038"
-         font-style="normal"
-         font-weight="bold"
-         font-size="192"
-         id="text202-60-3"
-         style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">       rnp-&gt;completed</text>
       <flowRoot
          xml:space="preserve"
          id="flowRoot3356"
              height="63.63961"
              x="332.34018"
              y="681.87292" /></flowRegion><flowPara
-           id="flowPara3362" /></flowRoot>    </g>
+           id="flowPara3362" /></flowRoot>      <text
+         xml:space="preserve"
+         x="3156.6121"
+         y="13317.754"
+         font-style="normal"
+         font-weight="bold"
+         font-size="192"
+         id="text202-36-6"
+         style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"><tspan
+           style="font-size:172.87567139px"
+           id="tspan3166-0">rcu_seq_end(&amp;rsp-&gt;gp_seq)</tspan></text>
+    </g>
     <g
        style="fill:none;stroke-width:0.025in"
        transform="translate(-858.40227,7769.0342)"
        id="path3414-8-3-6-6"
        inkscape:connector-curvature="0"
        sodipodi:nodetypes="cc" />
+    <text
+       xml:space="preserve"
+       x="7418.769"
+       y="17646.104"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       id="text202-36-70"
+       style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"><tspan
+         style="font-size:172.87567139px"
+         id="tspan3166-93">rcu_seq_end(&amp;rnp-&gt;gp_seq)</tspan></text>
   </g>
   <g
      transform="translate(-1642.5377,-11611.245)"
     </g>
     <text
        xml:space="preserve"
-       x="5327.3057"
+       x="5274.1133"
        y="15428.84"
        font-style="normal"
        font-weight="bold"
        font-size="192"
        id="text202-36"
-       style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;completed = -&gt;gpnum</text>
+       style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"><tspan
+         style="font-size:172.87567139px"
+         id="tspan3166">rcu_seq_end(&amp;rnp-&gt;gp_seq)</tspan></text>
   </g>
   <g
      transform="translate(-151.71746,-11647.612)"
          id="tspan3104-6-5-6-0-92">Leaf</tspan></text>
     <text
        xml:space="preserve"
-       x="7486.4907"
-       y="17670.119"
+       x="7408.5918"
+       y="17619.504"
        font-style="normal"
        font-weight="bold"
        font-size="192"
-       id="text202-6"
-       style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;completed = -&gt;gpnum</text>
+       id="text202-36-2"
+       style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"><tspan
+         style="font-size:172.87567139px"
+         id="tspan3166-9">rcu_seq_end(&amp;rnp-&gt;gp_seq)</tspan></text>
   </g>
   <g
      transform="translate(-6817.1997,-11647.612)"
          id="tspan3104-6-5-6-0-1">Leaf</tspan></text>
     <text
        xml:space="preserve"
-       x="7474.1382"
-       y="17688.926"
+       x="7416.8003"
+       y="17619.504"
        font-style="normal"
        font-weight="bold"
        font-size="192"
-       id="text202-5"
-       style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;completed = -&gt;gpnum</text>
+       id="text202-36-3"
+       style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"><tspan
+         style="font-size:172.87567139px"
+         id="tspan3166-56">rcu_seq_end(&amp;rnp-&gt;gp_seq)</tspan></text>
   </g>
   <path
      style="fill:none;stroke:#000000;stroke-width:13.29812908px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow2Lend)"
      id="path3414-8-3-6"
      inkscape:connector-curvature="0"
      sodipodi:nodetypes="cc" />
-  <text
-     xml:space="preserve"
-     x="7318.9653"
-     y="6031.6353"
-     font-style="normal"
-     font-weight="bold"
-     font-size="192"
-     id="text202-2"
-     style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;completed = -&gt;gpnum</text>
   <g
      style="fill:none;stroke-width:0.025in"
      id="g4504-3-9"
      id="path3134-9-0-3-5"
      d="m 6875.6003,15833.906 1595.7755,0"
      style="fill:none;stroke:#969696;stroke-width:53.19251633;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;marker-end:url(#Arrow1Send-36)" />
+  <text
+     xml:space="preserve"
+     x="7275.2612"
+     y="5971.8916"
+     font-style="normal"
+     font-weight="bold"
+     font-size="192"
+     id="text202-36-1"
+     style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"><tspan
+       style="font-size:172.87567139px"
+       id="tspan3166-2">rcu_seq_end(&amp;rnp-&gt;gp_seq)</tspan></text>
 </svg>
index 0161262904ece0892946636996e7f17ed0ad1cdb..8c207550818f3f544a4eb2292ffd7a2353946189 100644 (file)
      inkscape:window-height="1144"
      id="namedview208"
      showgrid="true"
-     inkscape:zoom="0.70710678"
-     inkscape:cx="617.89019"
-     inkscape:cy="636.57143"
-     inkscape:window-x="697"
+     inkscape:zoom="2.6330492"
+     inkscape:cx="524.82797"
+     inkscape:cy="519.31194"
+     inkscape:window-x="79"
      inkscape:window-y="28"
      inkscape:window-maximized="0"
-     inkscape:current-layer="svg2"
+     inkscape:current-layer="g3188"
      fit-margin-top="5"
      fit-margin-right="5"
      fit-margin-left="5"
      id="g3188">
     <text
        xml:space="preserve"
-       x="3305.5364"
+       x="3119.363"
        y="13255.592"
        font-style="normal"
        font-weight="bold"
        font-size="192"
        id="text202"
-       style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;font-family:Courier">rsp-&gt;gpnum++</text>
+       style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;font-family:Courier"><tspan
+         style="font-size:172.87567139px"
+         id="tspan3071">rcu_seq_start(rsp-&gt;gp_seq)</tspan></text>
     <g
        id="g3107"
        transform="translate(947.90548,11584.029)">
index de6ecc51b00e0e7073273ce7e2d6a9ef755ab91d..d24d7d555dbce63f0534ec07b7870ea94545f4e1 100644 (file)
@@ -19,7 +19,7 @@
    id="svg2"
    version="1.1"
    inkscape:version="0.48.4 r9939"
-   sodipodi:docname="TreeRCU-gp-init-2.svg">
+   sodipodi:docname="TreeRCU-gp-init-3.svg">
   <metadata
      id="metadata212">
     <rdf:RDF>
      inkscape:window-width="1087"
      inkscape:window-height="1144"
      id="namedview208"
-     showgrid="false"
-     inkscape:zoom="0.70710678"
+     showgrid="true"
+     inkscape:zoom="0.68224756"
      inkscape:cx="617.89019"
      inkscape:cy="625.84293"
-     inkscape:window-x="697"
+     inkscape:window-x="54"
      inkscape:window-y="28"
      inkscape:window-maximized="0"
-     inkscape:current-layer="svg2"
+     inkscape:current-layer="g3153"
      fit-margin-top="5"
      fit-margin-right="5"
      fit-margin-left="5"
-     fit-margin-bottom="5" />
+     fit-margin-bottom="5">
+    <inkscape:grid
+       type="xygrid"
+       id="grid3090" />
+  </sodipodi:namedview>
   <path
      sodipodi:nodetypes="cccccccccccccccccccccccc"
      inkscape:connector-curvature="0"
      id="g3188">
     <text
        xml:space="preserve"
-       x="3305.5364"
+       x="3145.9592"
        y="13255.592"
        font-style="normal"
        font-weight="bold"
        font-size="192"
        id="text202"
-       style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;font-family:Courier">-&gt;gpnum = rsp-&gt;gpnum</text>
+       style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;font-family:Courier">-&gt;gp_seq = rsp-&gt;gp_seq</text>
     <g
        id="g3107"
        transform="translate(947.90548,11584.029)">
     </g>
     <text
        xml:space="preserve"
-       x="5392.3345"
-       y="15407.104"
+       x="5253.6904"
+       y="15407.032"
        font-style="normal"
        font-weight="bold"
        font-size="192"
        id="text202-6"
-       style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gpnum = rsp-&gt;gpnum</text>
+       style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gp_seq = rsp-&gt;gp_seq</text>
   </g>
   <g
      style="fill:none;stroke-width:0.025in"
          id="tspan3104-6-5-6-0">Leaf</tspan></text>
     <text
        xml:space="preserve"
-       x="7536.4883"
-       y="17640.934"
+       x="7415.4365"
+       y="17670.572"
        font-style="normal"
        font-weight="bold"
        font-size="192"
        id="text202-9"
-       style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gpnum = rsp-&gt;gpnum</text>
+       style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gp_seq = rsp-&gt;gp_seq</text>
   </g>
   <g
      transform="translate(-1642.5375,-11610.962)"
     </g>
     <text
        xml:space="preserve"
-       x="5378.4146"
-       y="15436.927"
+       x="5258.0688"
+       y="15412.313"
        font-style="normal"
        font-weight="bold"
        font-size="192"
        id="text202-3"
-       style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gpnum = rsp-&gt;gpnum</text>
+       style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gp_seq = rsp-&gt;gp_seq</text>
   </g>
   <g
      transform="translate(-151.71726,-11647.329)"
          id="tspan3104-6-5-6-0-92">Leaf</tspan></text>
     <text
        xml:space="preserve"
-       x="7520.1294"
-       y="17673.639"
+       x="7405.2607"
+       y="17670.572"
        font-style="normal"
        font-weight="bold"
        font-size="192"
        id="text202-35"
-       style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gpnum = rsp-&gt;gpnum</text>
+       style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gp_seq = rsp-&gt;gp_seq</text>
   </g>
   <g
      transform="translate(-6817.1998,-11647.329)"
          id="tspan3104-6-5-6-0-1">Leaf</tspan></text>
     <text
        xml:space="preserve"
-       x="7521.4663"
-       y="17666.062"
+       x="7413.4688"
+       y="17670.566"
        font-style="normal"
        font-weight="bold"
        font-size="192"
        id="text202-75"
-       style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gpnum = rsp-&gt;gpnum</text>
+       style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gp_seq = rsp-&gt;gp_seq</text>
   </g>
   <path
      style="fill:none;stroke:#000000;stroke-width:13.29812908px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow2Lend)"
      sodipodi:nodetypes="cc" />
   <text
      xml:space="preserve"
-     x="7370.856"
-     y="5997.5972"
+     x="7271.9297"
+     y="6023.2412"
      font-style="normal"
      font-weight="bold"
      font-size="192"
      id="text202-62"
-     style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gpnum = rsp-&gt;gpnum</text>
+     style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gp_seq = rsp-&gt;gp_seq</text>
 </svg>
index b13b7b01bb3ab1e7818dcc644a8dfb5b89e11068..acd73c7ad0f4c168bff494fbdf44a03e98a44f8c 100644 (file)
      inkscape:window-height="1144"
      id="namedview208"
      showgrid="true"
-     inkscape:zoom="0.6004608"
-     inkscape:cx="826.65969"
-     inkscape:cy="483.3047"
-     inkscape:window-x="66"
-     inkscape:window-y="28"
+     inkscape:zoom="0.81932583"
+     inkscape:cx="840.45848"
+     inkscape:cy="5052.4242"
+     inkscape:window-x="787"
+     inkscape:window-y="24"
      inkscape:window-maximized="0"
-     inkscape:current-layer="svg2"
+     inkscape:current-layer="g4"
      fit-margin-top="5"
      fit-margin-right="5"
      fit-margin-left="5"
        style="fill:none;stroke-width:0.025in"
        transform="translate(1749.0282,658.72243)"
        id="g3188">
-      <text
-         xml:space="preserve"
-         x="3305.5364"
-         y="13255.592"
-         font-style="normal"
-         font-weight="bold"
-         font-size="192"
-         id="text202-5"
-         style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;font-family:Courier">rsp-&gt;gpnum++</text>
       <g
          id="g3107-62"
          transform="translate(947.90548,11584.029)">
          sodipodi:linespacing="125%"><tspan
            style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
            id="tspan3104-6-5-7">Root</tspan></text>
+      <text
+         xml:space="preserve"
+         x="3137.9988"
+         y="13271.316"
+         font-style="normal"
+         font-weight="bold"
+         font-size="192"
+         id="text202-626"
+         style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"><tspan
+           style="font-size:172.87567139px"
+           id="tspan3071">rcu_seq_start(rsp-&gt;gp_seq)</tspan></text>
     </g>
     <rect
        ry="0"
        style="fill:none;stroke-width:0.025in"
        transform="translate(1739.0986,17188.625)"
        id="g3188-6">
-      <text
-         xml:space="preserve"
-         x="3305.5364"
-         y="13255.592"
-         font-style="normal"
-         font-weight="bold"
-         font-size="192"
-         id="text202-1"
-         style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;font-family:Courier">-&gt;gpnum = rsp-&gt;gpnum</text>
       <g
          id="g3107-5"
          transform="translate(947.90548,11584.029)">
          sodipodi:linespacing="125%"><tspan
            style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
            id="tspan3104-6-5-1">Root</tspan></text>
+      <text
+         xml:space="preserve"
+         x="3147.9268"
+         y="13240.524"
+         font-style="normal"
+         font-weight="bold"
+         font-size="192"
+         id="text202-1"
+         style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gp_seq = rsp-&gt;gp_seq</text>
     </g>
     <g
        style="fill:none;stroke-width:0.025in"
       </g>
       <text
          xml:space="preserve"
-         x="5392.3345"
-         y="15407.104"
+         x="5263.1094"
+         y="15411.646"
          font-style="normal"
          font-weight="bold"
          font-size="192"
-         id="text202-6-7"
-         style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gpnum = rsp-&gt;gpnum</text>
+         id="text202-92"
+         style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gp_seq = rsp-&gt;gp_seq</text>
     </g>
     <g
        style="fill:none;stroke-width:0.025in"
            id="tspan3104-6-5-6-0-94">Leaf</tspan></text>
       <text
          xml:space="preserve"
-         x="7536.4883"
-         y="17640.934"
+         x="7417.4053"
+         y="17655.502"
          font-style="normal"
          font-weight="bold"
          font-size="192"
-         id="text202-9"
-         style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gpnum = rsp-&gt;gpnum</text>
+         id="text202-759"
+         style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gp_seq = rsp-&gt;gp_seq</text>
     </g>
     <g
        transform="translate(-2353.8462,17224.992)"
       </g>
       <text
          xml:space="preserve"
-         x="5378.4146"
-         y="15436.927"
+         x="5246.1548"
+         y="15411.648"
          font-style="normal"
          font-weight="bold"
          font-size="192"
-         id="text202-3"
-         style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gpnum = rsp-&gt;gpnum</text>
+         id="text202-87"
+         style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gp_seq = rsp-&gt;gp_seq</text>
     </g>
     <g
        transform="translate(-863.02613,17188.625)"
            id="tspan3104-6-5-6-0-92-6">Leaf</tspan></text>
       <text
          xml:space="preserve"
-         x="7520.1294"
-         y="17673.639"
+         x="7433.8257"
+         y="17682.098"
          font-style="normal"
          font-weight="bold"
          font-size="192"
-         id="text202-35"
-         style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gpnum = rsp-&gt;gpnum</text>
+         id="text202-2"
+         style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gp_seq = rsp-&gt;gp_seq</text>
     </g>
     <g
        transform="translate(-7528.5085,17188.625)"
            id="tspan3104-6-5-6-0-1-8">Leaf</tspan></text>
       <text
          xml:space="preserve"
-         x="7521.4663"
-         y="17666.062"
+         x="7415.4404"
+         y="17682.098"
          font-style="normal"
          font-weight="bold"
          font-size="192"
-         id="text202-75-1"
-         style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gpnum = rsp-&gt;gpnum</text>
+         id="text202-0"
+         style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gp_seq = rsp-&gt;gp_seq</text>
     </g>
     <path
        style="fill:none;stroke:#000000;stroke-width:13.29812813px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow2Lend)"
        id="path3414-8-3-6-4"
        inkscape:connector-curvature="0"
        sodipodi:nodetypes="cc" />
-    <text
-       xml:space="preserve"
-       x="6659.5469"
-       y="34833.551"
-       font-style="normal"
-       font-weight="bold"
-       font-size="192"
-       id="text202-62"
-       style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gpnum = rsp-&gt;gpnum</text>
     <path
        sodipodi:nodetypes="ccc"
        inkscape:connector-curvature="0"
          font-weight="bold"
          font-size="192"
          id="text202-6-6-5"
-         style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">rdp-&gt;gpnum</text>
+         style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">rdp-&gt;gp_seq</text>
       <text
          xml:space="preserve"
          x="5035.4155"
        style="fill:none;stroke-width:0.025in"
        transform="translate(1874.038,53203.538)"
        id="g3188-7">
-      <text
-         xml:space="preserve"
-         x="3199.1516"
-         y="13255.592"
-         font-style="normal"
-         font-weight="bold"
-         font-size="192"
-         id="text202-82"
-         style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;font-family:Courier">-&gt;completed = -&gt;gpnum</text>
       <g
          id="g3107-53"
          transform="translate(947.90548,11584.029)">
          sodipodi:linespacing="125%"><tspan
            style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
            id="tspan3104-6-5-19">Root</tspan></text>
+      <text
+         xml:space="preserve"
+         x="3175.896"
+         y="13240.11"
+         font-style="normal"
+         font-weight="bold"
+         font-size="192"
+         id="text202-36-3"
+         style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"><tspan
+           style="font-size:172.87567139px"
+           id="tspan3166">rcu_seq_end(&amp;rnp-&gt;gp_seq)</tspan></text>
     </g>
     <rect
        ry="0"
       </g>
       <text
          xml:space="preserve"
-         x="5324.5371"
-         y="15414.598"
+         x="5264.4829"
+         y="15411.231"
          font-style="normal"
          font-weight="bold"
          font-size="192"
-         id="text202-753"
-         style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;completed = -&gt;gpnum</text>
+         id="text202-36-7"
+         style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"><tspan
+           style="font-size:172.87567139px"
+           id="tspan3166-5">rcu_seq_end(&amp;rnp-&gt;gp_seq)</tspan></text>
     </g>
     <g
        style="fill:none;stroke-width:0.025in"
        sodipodi:linespacing="125%"><tspan
          style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
          id="tspan3104-6-5-6-0-4">Leaf</tspan></text>
-    <text
-       xml:space="preserve"
-       x="10084.225"
-       y="70903.312"
-       font-style="normal"
-       font-weight="bold"
-       font-size="192"
-       id="text202-9-0"
-       style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;completed = -&gt;gpnum</text>
     <path
        sodipodi:nodetypes="ccc"
        inkscape:connector-curvature="0"
        id="path3134-9-0-3-9"
        d="m 6315.6122,72629.054 -20.9533,8108.684 1648.968,0"
        style="fill:none;stroke:#969696;stroke-width:53.19251251;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;marker-end:url(#Arrow1Send)" />
-    <text
-       xml:space="preserve"
-       x="5092.4683"
-       y="74111.672"
-       font-style="normal"
-       font-weight="bold"
-       font-size="192"
-       id="text202-60"
-       style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">rsp-&gt;completed =</text>
     <g
        style="fill:none;stroke-width:0.025in"
        id="g3107-62-6"
        sodipodi:linespacing="125%"><tspan
          style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
          id="tspan3104-6-5-7-7">Root</tspan></text>
-    <text
-       xml:space="preserve"
-       x="5092.4683"
-       y="74325.906"
-       font-style="normal"
-       font-weight="bold"
-       font-size="192"
-       id="text202-60-3"
-       style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">       rnp-&gt;completed</text>
     <g
        style="fill:none;stroke-width:0.025in"
        transform="translate(1746.2528,60972.572)"
       </g>
       <text
          xml:space="preserve"
-         x="5327.3057"
-         y="15428.84"
+         x="5274.1216"
+         y="15411.231"
          font-style="normal"
          font-weight="bold"
          font-size="192"
          id="text202-36"
-         style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;completed = -&gt;gpnum</text>
+         style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"><tspan
+           style="font-size:172.87567139px"
+           id="tspan3166-6">rcu_seq_end(&amp;rnp-&gt;gp_seq)</tspan></text>
     </g>
     <g
        transform="translate(-728.08545,53203.538)"
            id="tspan3104-6-5-6-0-92-5">Leaf</tspan></text>
       <text
          xml:space="preserve"
-         x="7486.4907"
-         y="17670.119"
+         x="7435.1987"
+         y="17708.281"
          font-style="normal"
          font-weight="bold"
          font-size="192"
-         id="text202-6-2"
-         style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;completed = -&gt;gpnum</text>
+         id="text202-36-9"
+         style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"><tspan
+           style="font-size:172.87567139px"
+           id="tspan3166-1">rcu_seq_end(&amp;rnp-&gt;gp_seq)</tspan></text>
     </g>
     <g
        transform="translate(-7393.5687,53203.538)"
            id="tspan3104-6-5-6-0-1-5">Leaf</tspan></text>
       <text
          xml:space="preserve"
-         x="7474.1382"
-         y="17688.926"
+         x="7416.8125"
+         y="17708.281"
          font-style="normal"
          font-weight="bold"
          font-size="192"
-         id="text202-5-1"
-         style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;completed = -&gt;gpnum</text>
+         id="text202-36-35"
+         style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"><tspan
+           style="font-size:172.87567139px"
+           id="tspan3166-62">rcu_seq_end(&amp;rnp-&gt;gp_seq)</tspan></text>
     </g>
     <path
        style="fill:none;stroke:#000000;stroke-width:13.29812813px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow2Lend)"
        id="path3414-8-3-6-67"
        inkscape:connector-curvature="0"
        sodipodi:nodetypes="cc" />
-    <text
-       xml:space="preserve"
-       x="6742.6001"
-       y="70882.617"
-       font-style="normal"
-       font-weight="bold"
-       font-size="192"
-       id="text202-2"
-       style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;completed = -&gt;gpnum</text>
     <g
        style="fill:none;stroke-width:0.025in"
        id="g4504-3-9-6"
        font-size="192"
        id="text202-7-9-6-6-7"
        style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">rcu_do_batch()</text>
+    <text
+       xml:space="preserve"
+       x="6698.9019"
+       y="70885.211"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       id="text202-36-2"
+       style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"><tspan
+         style="font-size:172.87567139px"
+         id="tspan3166-7">rcu_seq_end(&amp;rnp-&gt;gp_seq)</tspan></text>
+    <text
+       xml:space="preserve"
+       x="10023.457"
+       y="70885.234"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       id="text202-36-0"
+       style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"><tspan
+         style="font-size:172.87567139px"
+         id="tspan3166-9">rcu_seq_end(&amp;rnp-&gt;gp_seq)</tspan></text>
+    <text
+       xml:space="preserve"
+       x="5023.3389"
+       y="74209.773"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       id="text202-36-36"
+       style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"><tspan
+         style="font-size:172.87567139px"
+         id="tspan3166-0">rcu_seq_end(&amp;rsp-&gt;gp_seq)</tspan></text>
+    <text
+       xml:space="preserve"
+       x="6562.5884"
+       y="34870.727"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       id="text202-3"
+       style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gp_seq = rsp-&gt;gp_seq</text>
   </g>
 </svg>
index de3992f4cbe1bfcb3c91667621d435ada38768ac..149bec2a4493d53c69f2e0589cbff4bcc1342486 100644 (file)
      inkscape:window-height="1144"
      id="namedview208"
      showgrid="true"
-     inkscape:zoom="0.70710678"
-     inkscape:cx="616.47598"
-     inkscape:cy="595.41964"
-     inkscape:window-x="813"
+     inkscape:zoom="0.96484375"
+     inkscape:cx="507.0191"
+     inkscape:cy="885.62207"
+     inkscape:window-x="47"
      inkscape:window-y="28"
      inkscape:window-maximized="0"
-     inkscape:current-layer="g4405"
+     inkscape:current-layer="g3115"
      fit-margin-top="5"
      fit-margin-right="5"
      fit-margin-left="5"
          font-weight="bold"
          font-size="192"
          id="text202-6-6"
-         style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">rdp-&gt;gpnum</text>
+         style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">rdp-&gt;gp_seq</text>
       <text
          xml:space="preserve"
          x="5035.4155"
index 4259f95c32616eb04f23037475163e4195f2ddf7..f99cf11b314b2c5667a917460d092c4f83be0a3d 100644 (file)
@@ -172,7 +172,7 @@ it will print a message similar to the following:
        INFO: rcu_sched detected stalls on CPUs/tasks:
        2-...: (3 GPs behind) idle=06c/0/0 softirq=1453/1455 fqs=0
        16-...: (0 ticks this GP) idle=81c/0/0 softirq=764/764 fqs=0
-       (detected by 32, t=2603 jiffies, g=7073, c=7072, q=625)
+       (detected by 32, t=2603 jiffies, g=7075, q=625)
 
 This message indicates that CPU 32 detected that CPUs 2 and 16 were both
 causing stalls, and that the stall was affecting RCU-sched.  This message
@@ -215,11 +215,10 @@ CPU since the last time that this CPU noted the beginning of a grace
 period.
 
 The "detected by" line indicates which CPU detected the stall (in this
-case, CPU 32), how many jiffies have elapsed since the start of the
-grace period (in this case 2603), the number of the last grace period
-to start and to complete (7073 and 7072, respectively), and an estimate
-of the total number of RCU callbacks queued across all CPUs (625 in
-this case).
+case, CPU 32), how many jiffies have elapsed since the start of the grace
+period (in this case 2603), the grace-period sequence number (7075), and
+an estimate of the total number of RCU callbacks queued across all CPUs
+(625 in this case).
 
 In kernels with CONFIG_RCU_FAST_NO_HZ, more information is printed
 for each CPU:
@@ -266,15 +265,16 @@ If the relevant grace-period kthread has been unable to run prior to
 the stall warning, as was the case in the "All QSes seen" line above,
 the following additional line is printed:
 
-       kthread starved for 23807 jiffies! g7073 c7072 f0x0 RCU_GP_WAIT_FQS(3) ->state=0x1
+       kthread starved for 23807 jiffies! g7075 f0x0 RCU_GP_WAIT_FQS(3) ->state=0x1 ->cpu=5
 
 Starving the grace-period kthreads of CPU time can of course result
 in RCU CPU stall warnings even when all CPUs and tasks have passed
-through the required quiescent states.  The "g" and "c" numbers flag the
-number of the last grace period started and completed, respectively,
-the "f" precedes the ->gp_flags command to the grace-period kthread,
-the "RCU_GP_WAIT_FQS" indicates that the kthread is waiting for a short
-timeout, and the "state" precedes value of the task_struct ->state field.
+through the required quiescent states.  The "g" number shows the current
+grace-period sequence number, the "f" precedes the ->gp_flags command
+to the grace-period kthread, the "RCU_GP_WAIT_FQS" indicates that the
+kthread is waiting for a short timeout, the "state" precedes value of the
+task_struct ->state field, and the "cpu" indicates that the grace-period
+kthread last ran on CPU 5.
 
 
 Multiple Warnings From One Stall
index 65eb856526b7c308b08088a31957d70baa21b5a7..c2a7facf7ff937a2265ad3103bd267d7aabf0d4b 100644 (file)
@@ -588,6 +588,7 @@ It is extremely simple:
        void synchronize_rcu(void)
        {
                write_lock(&rcu_gp_mutex);
+               smp_mb__after_spinlock();
                write_unlock(&rcu_gp_mutex);
        }
 
@@ -609,12 +610,15 @@ don't forget about them when submitting patches making use of RCU!]
 
 The rcu_read_lock() and rcu_read_unlock() primitive read-acquire
 and release a global reader-writer lock.  The synchronize_rcu()
-primitive write-acquires this same lock, then immediately releases
-it.  This means that once synchronize_rcu() exits, all RCU read-side
-critical sections that were in progress before synchronize_rcu() was
-called are guaranteed to have completed -- there is no way that
-synchronize_rcu() would have been able to write-acquire the lock
-otherwise.
+primitive write-acquires this same lock, then releases it.  This means
+that once synchronize_rcu() exits, all RCU read-side critical sections
+that were in progress before synchronize_rcu() was called are guaranteed
+to have completed -- there is no way that synchronize_rcu() would have
+been able to write-acquire the lock otherwise.  The smp_mb__after_spinlock()
+promotes synchronize_rcu() to a full memory barrier in compliance with
+the "Memory-Barrier Guarantees" listed in:
+
+       Documentation/RCU/Design/Requirements/Requirements.html.
 
 It is possible to nest rcu_read_lock(), since reader-writer locks may
 be recursively acquired.  Note also that rcu_read_lock() is immune
@@ -816,11 +820,13 @@ RCU list traversal:
        list_next_rcu
        list_for_each_entry_rcu
        list_for_each_entry_continue_rcu
+       list_for_each_entry_from_rcu
        hlist_first_rcu
        hlist_next_rcu
        hlist_pprev_rcu
        hlist_for_each_entry_rcu
        hlist_for_each_entry_rcu_bh
+       hlist_for_each_entry_from_rcu
        hlist_for_each_entry_continue_rcu
        hlist_for_each_entry_continue_rcu_bh
        hlist_nulls_first_rcu
index efc7aa7a067099f6bacdb860cde13f2d876030a8..5cde1ff32ff3771c971c0ef318dba09c3659f90e 100644 (file)
 
        nosync          [HW,M68K] Disables sync negotiation for all devices.
 
-       notsc           [BUGS=X86-32] Disable Time Stamp Counter
-
        nowatchdog      [KNL] Disable both lockup detectors, i.e.
                        soft-lockup and NMI watchdog (hard-lockup).
 
                        Set time (s) after boot for CPU-hotplug testing.
 
        rcutorture.onoff_interval= [KNL]
-                       Set time (s) between CPU-hotplug operations, or
-                       zero to disable CPU-hotplug testing.
+                       Set time (jiffies) between CPU-hotplug operations,
+                       or zero to disable CPU-hotplug testing.
 
        rcutorture.shuffle_interval= [KNL]
                        Set task-shuffle interval (s).  Shuffling tasks
        xirc2ps_cs=     [NET,PCMCIA]
                        Format:
                        <irq>,<irq_mask>,<io>,<full_duplex>,<do_sound>,<lockup_hack>[,<irq2>[,<irq3>[,<irq4>]]]
+
+       xhci-hcd.quirks         [USB,KNL]
+                       A hex value specifying bitmask with supplemental xhci
+                       host controller quirks. Meaning of each bit can be
+                       consulted in header drivers/usb/host/xhci.h.
index ab2fe0eda1d7c317faefab52363ce96755ac64d5..8f1d3de449b53fedcc78d1aee506e6882f2be90c 100644 (file)
@@ -324,8 +324,7 @@ Global Attributes
 
 ``intel_pstate`` exposes several global attributes (files) in ``sysfs`` to
 control its functionality at the system level.  They are located in the
-``/sys/devices/system/cpu/cpufreq/intel_pstate/`` directory and affect all
-CPUs.
+``/sys/devices/system/cpu/intel_pstate/`` directory and affect all CPUs.
 
 Some of them are not present if the ``intel_pstate=per_cpu_perf_limits``
 argument is passed to the kernel in the command line.
@@ -379,6 +378,17 @@ argument is passed to the kernel in the command line.
        but it affects the maximum possible value of per-policy P-state limits
        (see `Interpretation of Policy Attributes`_ below for details).
 
+``hwp_dynamic_boost``
+       This attribute is only present if ``intel_pstate`` works in the
+       `active mode with the HWP feature enabled <Active Mode With HWP_>`_ in
+       the processor.  If set (equal to 1), it causes the minimum P-state limit
+       to be increased dynamically for a short time whenever a task previously
+       waiting on I/O is selected to run on a given logical CPU (the purpose
+       of this mechanism is to improve performance).
+
+       This setting has no effect on logical CPUs whose minimum P-state limit
+       is directly set to the highest non-turbo P-state or above it.
+
 .. _status_attr:
 
 ``status``
@@ -410,7 +420,7 @@ argument is passed to the kernel in the command line.
        That only is supported in some configurations, though (for example, if
        the `HWP feature is enabled in the processor <Active Mode With HWP_>`_,
        the operation mode of the driver cannot be changed), and if it is not
-       supported in the current configuration, writes to this attribute with
+       supported in the current configuration, writes to this attribute will
        fail with an appropriate error.
 
 Interpretation of Policy Attributes
index 2e7165f86f553a65eafc16c92431ff0b6e14927e..724583453e1fbe38fe137fd935e7ea3554555836 100644 (file)
@@ -29,7 +29,7 @@ updated by one CPU, local_t is probably more appropriate. Please see
 local_t.
 
 The first operations to implement for atomic_t's are the initializers and
-plain reads. ::
+plain writes. ::
 
        #define ATOMIC_INIT(i)          { (i) }
        #define atomic_set(v, i)        ((v)->counter = (i))
index 8e44aea366c262068900cddaabd240d8615ac552..76fe2d0f5e7d7db307bfa4ead890ead2d8840bdd 100644 (file)
@@ -284,7 +284,7 @@ Resources Management
 MTRR Handling
 -------------
 
-.. kernel-doc:: arch/x86/kernel/cpu/mtrr/main.c
+.. kernel-doc:: arch/x86/kernel/cpu/mtrr/mtrr.c
    :export:
 
 Security Framework
index 4424fa2c67d79ebbdfc3aea44d0bd93fa01407c3..01532b3008ae56bb9db1dc1db7d9b9709db5965e 100644 (file)
@@ -15,6 +15,8 @@ Constructor parameters:
    size)
 5. the number of optional parameters (the parameters with an argument
    count as two)
+       start_sector n          (default: 0)
+               offset from the start of cache device in 512-byte sectors
        high_watermark n        (default: 50)
                start writeback when the number of used blocks reach this
                watermark
index bdadc3da9556d47e52372f0a68846779dccc1d95..6970f30a3770f8027a2509aab25f4fe75785667e 100644 (file)
@@ -66,7 +66,7 @@ Required root node properties:
        - "insignal,arndale-octa" - for Exynos5420-based Insignal Arndale
                                    Octa board.
        - "insignal,origen"       - for Exynos4210-based Insignal Origen board.
-       - "insignal,origen4412    - for Exynos4412-based Insignal Origen board.
+       - "insignal,origen4412"   - for Exynos4412-based Insignal Origen board.
 
 
 Optional nodes:
index 6fddb4f4f71a45f0fc001b7f904a89f6e50948b3..3055d5c2c04e0ab796215803196c7590a69e02be 100644 (file)
@@ -36,7 +36,7 @@ Optional nodes:
 
  - port/ports: to describe a connection to an external encoder. The
    binding follows Documentation/devicetree/bindings/graph.txt and
-   suppors a single port with a single endpoint.
+   supports a single port with a single endpoint.
 
  - See also Documentation/devicetree/bindings/display/tilcdc/panel.txt and
    Documentation/devicetree/bindings/display/tilcdc/tfp410.txt for connecting
index 20fc72d9e61e5721e56e0aeb0479682f921fd154..45a61b46228712592029e75fe117262ba47d9112 100644 (file)
@@ -1,7 +1,7 @@
 Nintendo Wii (Hollywood) GPIO controller
 
 Required properties:
-- compatible: "nintendo,hollywood-gpio
+- compatible: "nintendo,hollywood-gpio"
 - reg: Physical base address and length of the controller's registers.
 - gpio-controller: Marks the device node as a GPIO controller.
 - #gpio-cells: Should be <2>. The first cell is the pin number and the
diff --git a/Documentation/devicetree/bindings/hwmon/npcm750-pwm-fan.txt b/Documentation/devicetree/bindings/hwmon/npcm750-pwm-fan.txt
new file mode 100644 (file)
index 0000000..28f43e9
--- /dev/null
@@ -0,0 +1,84 @@
+Nuvoton NPCM7xx PWM and Fan Tacho controller device
+
+The Nuvoton BMC NPCM7XX supports 8 Pulse-width modulation (PWM)
+controller outputs and 16 Fan tachometer controller inputs.
+
+Required properties for pwm-fan node
+- #address-cells : should be 1.
+- #size-cells  : should be 0.
+- compatible   : "nuvoton,npcm750-pwm-fan" for Poleg NPCM7XX.
+- reg                  : specifies physical base address and size of the registers.
+- reg-names    : must contain:
+                                       * "pwm" for the PWM registers.
+                                       * "fan" for the Fan registers.
+- clocks               : phandle of reference clocks.
+- clock-names  : must contain
+                                       * "pwm" for PWM controller operating clock.
+                                       * "fan" for Fan controller operating clock.
+- interrupts   : contain the Fan interrupts with flags for falling edge.
+- pinctrl-names        : a pinctrl state named "default" must be defined.
+- pinctrl-0    : phandle referencing pin configuration of the PWM and Fan
+                                       controller ports.
+
+fan subnode format:
+===================
+Under fan subnode can be upto 8 child nodes, each child node representing a fan.
+Each fan subnode must have one PWM channel and atleast one Fan tach channel.
+
+For PWM channel can be configured cooling-levels to create cooling device.
+Cooling device could be bound to a thermal zone for the thermal control.
+
+Required properties for each child node:
+- reg : specify the PWM output channel.
+       integer value in the range 0 through 7, that represent
+       the PWM channel number that used.
+
+- fan-tach-ch : specify the Fan tach input channel.
+               integer value in the range 0 through 15, that represent
+               the fan tach channel number that used.
+
+               At least one Fan tach input channel is required
+
+Optional property for each child node:
+- cooling-levels: PWM duty cycle values in a range from 0 to 255
+                  which correspond to thermal cooling states.
+
+Examples:
+
+pwm_fan:pwm-fan-controller@103000 {
+       #address-cells = <1>;
+       #size-cells = <0>;
+       compatible = "nuvoton,npcm750-pwm-fan";
+       reg = <0x103000 0x2000>,
+               <0x180000 0x8000>;
+       reg-names = "pwm", "fan";
+       clocks = <&clk NPCM7XX_CLK_APB3>,
+               <&clk NPCM7XX_CLK_APB4>;
+       clock-names = "pwm","fan";
+       interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>,
+                       <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>,
+                       <GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>,
+                       <GIC_SPI 99 IRQ_TYPE_LEVEL_HIGH>,
+                       <GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>,
+                       <GIC_SPI 101 IRQ_TYPE_LEVEL_HIGH>,
+                       <GIC_SPI 102 IRQ_TYPE_LEVEL_HIGH>,
+                       <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>;
+       pinctrl-names = "default";
+       pinctrl-0 = <&pwm0_pins &pwm1_pins &pwm2_pins
+                       &fanin0_pins &fanin1_pins &fanin2_pins
+                       &fanin3_pins &fanin4_pins>;
+       fan@0 {
+               reg = <0x00>;
+               fan-tach-ch = /bits/ 8 <0x00 0x01>;
+               cooling-levels = <127 255>;
+       };
+       fan@1 {
+               reg = <0x01>;
+               fan-tach-ch = /bits/ 8 <0x02 0x03>;
+       };
+       fan@2 {
+               reg = <0x02>;
+               fan-tach-ch = /bits/ 8 <0x04>;
+       };
+
+};
diff --git a/Documentation/devicetree/bindings/input/sprd,sc27xx-vibra.txt b/Documentation/devicetree/bindings/input/sprd,sc27xx-vibra.txt
new file mode 100644 (file)
index 0000000..f2ec0d4
--- /dev/null
@@ -0,0 +1,23 @@
+Spreadtrum SC27xx PMIC Vibrator
+
+Required properties:
+- compatible: should be "sprd,sc2731-vibrator".
+- reg: address of vibrator control register.
+
+Example :
+
+       sc2731_pmic: pmic@0 {
+               compatible = "sprd,sc2731";
+               reg = <0>;
+               spi-max-frequency = <26000000>;
+               interrupts = <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>;
+               interrupt-controller;
+               #interrupt-cells = <2>;
+               #address-cells = <1>;
+               #size-cells = <0>;
+
+               vibrator@eb4 {
+                       compatible = "sprd,sc2731-vibrator";
+                       reg = <0xeb4>;
+               };
+       };
index 121d9b7c79a24cd05e6452bb8b52a14d3d20f46a..1063c30d53f7d0fd7b642d323d32799ba4fb51fe 100644 (file)
@@ -32,7 +32,7 @@ i2c@00000000 {
                reg = <0x6c>;
                interrupt-parent = <&gpx1>;
                interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
-               vdd-supply = <&ldo15_reg>";
+               vdd-supply = <&ldo15_reg>;
                vid-supply = <&ldo18_reg>;
                reset-gpios = <&gpx1 5 0>;
                touchscreen-size-x = <1080>;
index 5f89fb635a1b3dd409390931571336c9ca9d4137..f97fd8ab5e4594fde9014e3f4614f41069a01f07 100644 (file)
@@ -4,6 +4,7 @@ Required properties:
 
 - compatible : should be "ingenic,<socname>-intc". Valid strings are:
     ingenic,jz4740-intc
+    ingenic,jz4725b-intc
     ingenic,jz4770-intc
     ingenic,jz4775-intc
     ingenic,jz4780-intc
index 1099fe0788fae19c27dd1153e6d9d9e4aba10c6f..f246ccbf8838c2c90496572af8aa4e4d17079be1 100644 (file)
@@ -15,7 +15,7 @@ Required properties:
   include "nvidia,tegra30-ictlr".      
 - reg : Specifies base physical address and size of the registers.
   Each controller must be described separately (Tegra20 has 4 of them,
-  whereas Tegra30 and later have 5"  
+  whereas Tegra30 and later have 5).
 - interrupt-controller : Identifies the node as an interrupt controller.
 - #interrupt-cells : Specifies the number of cells needed to encode an
   interrupt source. The value must be 3.
index 20f121daa9106f177a9224ecd12140e6d5f30781..697ca2f26d1b5d8b0d649709234958a2d0f3ada4 100644 (file)
@@ -7,6 +7,7 @@ Required properties:
     - "renesas,irqc-r8a73a4" (R-Mobile APE6)
     - "renesas,irqc-r8a7743" (RZ/G1M)
     - "renesas,irqc-r8a7745" (RZ/G1E)
+    - "renesas,irqc-r8a77470" (RZ/G1C)
     - "renesas,irqc-r8a7790" (R-Car H2)
     - "renesas,irqc-r8a7791" (R-Car M2-W)
     - "renesas,irqc-r8a7792" (R-Car V2H)
@@ -16,6 +17,7 @@ Required properties:
     - "renesas,intc-ex-r8a7796" (R-Car M3-W)
     - "renesas,intc-ex-r8a77965" (R-Car M3-N)
     - "renesas,intc-ex-r8a77970" (R-Car V3M)
+    - "renesas,intc-ex-r8a77980" (R-Car V3H)
     - "renesas,intc-ex-r8a77995" (R-Car D3)
 - #interrupt-cells: has to be <2>: an interrupt index and flags, as defined in
   interrupts.txt in this directory
index 136bd612bd8359488447b9ae12b335ffb083ba80..6a36bf66d932d42320cfc6b3c998b16488609d61 100644 (file)
@@ -12,7 +12,7 @@ Required properties:
   specifier, shall be 2
 - interrupts: interrupts references to primary interrupt controller
   (only needed for exti controller with multiple exti under
-  same parent interrupt: st,stm32-exti and st,stm32h7-exti")
+  same parent interrupt: st,stm32-exti and st,stm32h7-exti)
 
 Example:
 
index 356c29789cf54862e1ece93dc40449e221481304..3a66d3c483e1aad12298fcf297767931db09051a 100644 (file)
@@ -152,7 +152,7 @@ Required properties:
 - compatible   : should contain one of:
                  "brcm,bcm7425-timers"
                  "brcm,bcm7429-timers"
-                 "brcm,bcm7435-timers and
+                 "brcm,bcm7435-timers" and
                  "brcm,brcmstb-timers"
 - reg          : the timers register range
 - interrupts   : the interrupt line for this timer block
index df873d1f3b7c598b6c30721d3eec915a20ea8621..f8c33890bc2970e08bf44934835a9b8c464675f1 100644 (file)
@@ -238,7 +238,7 @@ PROPERTIES
                Must include one of the following:
                - "fsl,fman-dtsec" for dTSEC MAC
                - "fsl,fman-xgec" for XGEC MAC
-               - "fsl,fman-memac for mEMAC MAC
+               - "fsl,fman-memac" for mEMAC MAC
 
 - cell-index
                Usage: required
index cafe2197dad95f3d273d5c1a107db1c48d1962ee..c3a29c5feea3792a23922ff55ab1260bf7cda172 100644 (file)
@@ -3,7 +3,7 @@
 Required properties:
 - compatible: "qca,ar7100-usb-phy"
 - #phys-cells: should be 0
-- reset-names: "usb-phy"[, "usb-suspend-override"]
+- reset-names: "phy"[, "suspend-override"]
 - resets: references to the reset controllers
 
 Example:
@@ -11,7 +11,7 @@ Example:
        usb-phy {
                compatible = "qca,ar7100-usb-phy";
 
-               reset-names = "usb-phy", "usb-suspend-override";
+               reset-names = "phy", "suspend-override";
                resets = <&rst 4>, <&rst 3>;
 
                #phy-cells = <0>;
index 9b387f861aed166bda522f6e3d4ebb8856a49218..7dec508987c75c70ac194876df9d4f8019586aab 100644 (file)
@@ -133,7 +133,7 @@ located inside a PM domain with index 0 of a power controller represented by a
 node with the label "power".
 In the second example the consumer device are partitioned across two PM domains,
 the first with index 0 and the second with index 1, of a power controller that
-is represented by a node with the label "power.
+is represented by a node with the label "power".
 
 Optional properties:
 - required-opps: This contains phandle to an OPP node in another device's OPP
index ca69f5e3040cfa48299682dd6371f99c90b49ffa..ae326f26359740bce4fe7ac119288447649b6429 100644 (file)
@@ -16,7 +16,7 @@ Required properties:
 Optional properties:
 - ti,enable-ext-control: This is applicable for DCDC1, DCDC2 and DCDC3.
   If DCDCs are externally controlled then this property should be there.
-- "dcdc-ext-control-gpios: This is applicable for DCDC1, DCDC2 and DCDC3.
+- dcdc-ext-control-gpios: This is applicable for DCDC1, DCDC2 and DCDC3.
   If DCDCs are externally controlled and if it is from GPIO then GPIO
   number should be provided. If it is externally controlled and no GPIO
   entry then driver will just configure this rails as external control
index a21658f18fe6d7d593adece10e056e072fa2c5e4..3661e6153a92bf8df66cea43d5f41415cc497786 100644 (file)
@@ -15,7 +15,7 @@ Please refer to reset.txt in this directory for common reset
 controller binding usage.
 
 Required properties:
-- compatible: Should be st,stih407-softreset";
+- compatible: Should be "st,stih407-softreset";
 - #reset-cells: 1, see below
 
 example:
index d330c73de9a2e0103aabc3cf365d02974faee73c..68b7d6207e3d75acd51400da27e5ca292c5026d0 100644 (file)
@@ -39,7 +39,7 @@ Required properties:
 
 Optional property:
 - clock-frequency:     Desired I2C bus clock frequency in Hz.
-                       When missing default to 400000Hz.
+                       When missing default to 100000Hz.
 
 Child nodes should conform to I2C bus binding as described in i2c.txt.
 
index 6a4aadc4ce06b27ff059c64f6c438d0fef863b21..84b28dbe9f15452bbe341f3dbf5e6f5452b72a19 100644 (file)
@@ -30,7 +30,7 @@ Required properties:
 
                          Board connectors:
                          * Headset Mic
-                         * Secondary Mic",
+                         * Secondary Mic
                          * DMIC
                          * Ext Spk
 
index aa54e49fc8a26b397232f5ee340b7472f0b57a1c..c7600a93ab39e58bb62cc02e1f77a2d5132f1b08 100644 (file)
@@ -35,7 +35,7 @@ This binding describes the APQ8096 sound card, which uses qdsp for audio.
                        "Digital Mic3"
 
                Audio pins and MicBias on WCD9335 Codec:
-                       "MIC_BIAS1
+                       "MIC_BIAS1"
                        "MIC_BIAS2"
                        "MIC_BIAS3"
                        "MIC_BIAS4"
index b1fe7e9de1b47b056778d1bafe69e06bac1534c4..18d4d0166c76bff5dc35a3eccdd94781a68ed494 100644 (file)
@@ -1,19 +1,25 @@
-Mediatek MT6577, MT6572 and MT6589 Timers
----------------------------------------
+Mediatek Timers
+---------------
+
+Mediatek SoCs have two different timers on different platforms,
+- GPT (General Purpose Timer)
+- SYST (System Timer)
+
+The proper timer will be selected automatically by driver.
 
 Required properties:
 - compatible should contain:
-       * "mediatek,mt2701-timer" for MT2701 compatible timers
-       * "mediatek,mt6580-timer" for MT6580 compatible timers
-       * "mediatek,mt6589-timer" for MT6589 compatible timers
-       * "mediatek,mt7623-timer" for MT7623 compatible timers
-       * "mediatek,mt8127-timer" for MT8127 compatible timers
-       * "mediatek,mt8135-timer" for MT8135 compatible timers
-       * "mediatek,mt8173-timer" for MT8173 compatible timers
-       * "mediatek,mt6577-timer" for MT6577 and all above compatible timers
-- reg: Should contain location and length for timers register.
-- clocks: Clocks driving the timer hardware. This list should include two
-       clocks. The order is system clock and as second clock the RTC clock.
+       * "mediatek,mt2701-timer" for MT2701 compatible timers (GPT)
+       * "mediatek,mt6580-timer" for MT6580 compatible timers (GPT)
+       * "mediatek,mt6589-timer" for MT6589 compatible timers (GPT)
+       * "mediatek,mt7623-timer" for MT7623 compatible timers (GPT)
+       * "mediatek,mt8127-timer" for MT8127 compatible timers (GPT)
+       * "mediatek,mt8135-timer" for MT8135 compatible timers (GPT)
+       * "mediatek,mt8173-timer" for MT8173 compatible timers (GPT)
+       * "mediatek,mt6577-timer" for MT6577 and all above compatible timers (GPT)
+       * "mediatek,mt6765-timer" for MT6765 compatible timers (SYST)
+- reg: Should contain location and length for timer register.
+- clocks: Should contain system clock.
 
 Examples:
 
@@ -21,5 +27,5 @@ Examples:
                compatible = "mediatek,mt6577-timer";
                reg = <0x10008000 0x80>;
                interrupts = <GIC_SPI 113 IRQ_TYPE_LEVEL_LOW>;
-               clocks = <&system_clk>, <&rtc_clk>;
+               clocks = <&system_clk>;
        };
index 252a05c5d976d56b039bcf04069ec9c6e9595dea..c8c4b00ecb941fe85144fb3efd8c5cfa4ec0e5e4 100644 (file)
@@ -16,7 +16,8 @@ A child node must exist to represent the core DWC3 IP block. The name of
 the node is not important. The content of the node is defined in dwc3.txt.
 
 Phy documentation is provided in the following places:
-Documentation/devicetree/bindings/phy/qcom-dwc3-usb-phy.txt
+Documentation/devicetree/bindings/phy/phy-rockchip-inno-usb2.txt - USB2.0 PHY
+Documentation/devicetree/bindings/phy/phy-rockchip-typec.txt     - Type-C PHY
 
 Example device nodes:
 
index 6e09c35d9f1a281a0046ed2c07dfce1f1312f48f..37091902a0210328e76582426eaec0eaa3a7ae3d 100644 (file)
@@ -15,7 +15,7 @@ Optional properties:
 
 Examples:
 
-       onewire@0 {
+       onewire {
                compatible = "w1-gpio";
                gpios = <&gpio 126 0>, <&gpio 105 0>;
        };
index bee1b9a1702f1cc6c89811aff6b8bdbc1eefb0b0..6172f3cc3d0b2109916cfccda2da1836065f8766 100644 (file)
@@ -49,10 +49,10 @@ Device Drivers Base
 Device Drivers DMA Management
 -----------------------------
 
-.. kernel-doc:: drivers/base/dma-coherent.c
+.. kernel-doc:: kernel/dma/coherent.c
    :export:
 
-.. kernel-doc:: drivers/base/dma-mapping.c
+.. kernel-doc:: kernel/dma/mapping.c
    :export:
 
 Device drivers PnP support
index dbdf629077037acd99fe18bdced8f71b7eb54639..c7858dd1ea8f566032c8f68dcb35d397b7e747d1 100644 (file)
@@ -5,10 +5,10 @@
 #
 # Architecture requirements
 #
-# * arm64
+# * arm/arm64
 #
-# Rely on eret context synchronization when returning from IPI handler, and
-# when returning to user-space.
+# Rely on implicit context synchronization as a result of exception return
+# when returning from IPI handler, and when returning to user-space.
 #
 # * x86
 #
@@ -31,7 +31,7 @@
     -----------------------
     |       alpha: | TODO |
     |         arc: | TODO |
-    |         arm: | TODO |
+    |         arm: |  ok  |
     |       arm64: |  ok  |
     |         c6x: | TODO |
     |       h8300: | TODO |
index 2c391338c6757f505eac6dfcbe98a169452ad305..7cca82ed2ed1a321b2b52a2d408c6164c58aec20 100644 (file)
@@ -64,7 +64,7 @@ prototypes:
        void (*update_time)(struct inode *, struct timespec *, int);
        int (*atomic_open)(struct inode *, struct dentry *,
                                struct file *, unsigned open_flag,
-                               umode_t create_mode, int *opened);
+                               umode_t create_mode);
        int (*tmpfile) (struct inode *, struct dentry *, umode_t);
 
 locking rules:
@@ -441,8 +441,6 @@ prototypes:
        int (*iterate) (struct file *, struct dir_context *);
        int (*iterate_shared) (struct file *, struct dir_context *);
        __poll_t (*poll) (struct file *, struct poll_table_struct *);
-       struct wait_queue_head * (*get_poll_head)(struct file *, __poll_t);
-       __poll_t (*poll_mask) (struct file *, __poll_t);
        long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long);
        long (*compat_ioctl) (struct file *, unsigned int, unsigned long);
        int (*mmap) (struct file *, struct vm_area_struct *);
@@ -473,7 +471,7 @@ prototypes:
 };
 
 locking rules:
-       All except for ->poll_mask may block.
+       All may block.
 
 ->llseek() locking has moved from llseek to the individual llseek
 implementations.  If your fs is not using generic_file_llseek, you
@@ -505,9 +503,6 @@ in sys_read() and friends.
 the lease within the individual filesystem to record the result of the
 operation
 
-->poll_mask can be called with or without the waitqueue lock for the waitqueue
-returned from ->get_poll_head.
-
 --------------------------- dquot_operations -------------------------------
 prototypes:
        int (*write_dquot) (struct dquot *);
index 9f4f87e1624036349533adf9534bfd3c4b08535d..75865da2ce1475c27bea1050b3e80ff7160f6d6f 100644 (file)
@@ -42,9 +42,11 @@ Jeff Layton (many, many fixes, as well as great work on the cifs Kerberos code)
 Scott Lovenberg
 Pavel Shilovsky (for great work adding SMB2 support, and various SMB3 features)
 Aurelien Aptel (for DFS SMB3 work and some key bug fixes)
-Ronnie Sahlberg (for SMB3 xattr work and bug fixes)
+Ronnie Sahlberg (for SMB3 xattr work, bug fixes, and lots of great work on compounding)
 Shirish Pargaonkar (for many ACL patches over the years)
 Sachin Prabhu (many bug fixes, including for reconnect, copy offload and security)
+Paulo Alcantara
+Long Li (some great work on RDMA, SMB Direct)
 
 
 Test case and Bug Report contributors
@@ -58,5 +60,4 @@ mention to the Stanford Checker (SWAT) which pointed out many minor
 bugs in error paths.  Valuable suggestions also have come from Al Viro
 and Dave Miller.
 
-And thanks to the IBM LTC and Power test teams and SuSE testers for
-finding multiple bugs during excellent stress test runs.
+And thanks to the IBM LTC and Power test teams and SuSE and Citrix and RedHat testers for finding multiple bugs during excellent stress test runs.
index bc0025cdd1c9c0d285c32e8d7656103868126ca8..455e1cc494a9f2e78ee1d45b89bfbe5ee55048eb 100644 (file)
@@ -1,3 +1,6 @@
+See https://wiki.samba.org/index.php/LinuxCIFSKernel for
+more current information.
+
 Version 1.62
 ------------
 Add sockopt=TCP_NODELAY mount option. EA (xattr) routines hardened
index c5adf149b57f7f8f6e2d0b104d5b74f6bc7f5f84..852499aed64b52bb321c0b9656b0b606a4710772 100644 (file)
@@ -9,14 +9,14 @@ is a partial list of the known problems and missing features:
 
 a) SMB3 (and SMB3.02) missing optional features:
    - multichannel (started), integration with RDMA
-   - directory leases (improved metadata caching)
-   - T10 copy offload (copy chunk, and "Duplicate Extents" ioctl
+   - directory leases (improved metadata caching), started (root dir only)
+   - T10 copy offload ie "ODX" (copy chunk, and "Duplicate Extents" ioctl
      currently the only two server side copy mechanisms supported)
 
 b) improved sparse file support
 
 c) Directory entry caching relies on a 1 second timer, rather than
-using Directory Leases
+using Directory Leases, currently only the root file handle is cached longer
 
 d) quota support (needs minor kernel change since quota calls
 to make it to network filesystems or deviceless filesystems)
@@ -42,6 +42,8 @@ mount or a per server basis to client UIDs or nobody if no mapping
 exists. Also better integration with winbind for resolving SID owners
 
 k) Add tools to take advantage of more smb3 specific ioctls and features
+(passthrough ioctl/fsctl for sending various SMB3 fsctls to the server
+is in progress)
 
 l) encrypted file support
 
@@ -71,9 +73,8 @@ t) split cifs and smb3 support into separate modules so legacy (and less
 secure) CIFS dialect can be disabled in environments that don't need it
 and simplify the code.
 
-u) Finish up SMB3.1.1 dialect support
-
-v) POSIX Extensions for SMB3.1.1
+v) POSIX Extensions for SMB3.1.1 (started, create and mkdir support added
+so far).
 
 KNOWN BUGS
 ====================================
@@ -92,8 +93,8 @@ Misc testing to do
 1) check out max path names and max path name components against various server
 types. Try nested symlinks (8 deep). Return max path name in stat -f information
 
-2) Improve xfstest's cifs enablement and adapt xfstests where needed to test
-cifs better
+2) Improve xfstest's cifs/smb3 enablement and adapt xfstests where needed to test
+cifs/smb3 better
 
 3) Additional performance testing and optimization using iozone and similar - 
 there are some easy changes that can be done to parallelize sequential writes,
index 17bb4dc28fae03371c328cc2135b576c4235ab03..7b7b845c490a4b187183b5acc7dead9e0141c45d 100644 (file)
@@ -602,3 +602,23 @@ in your dentry operations instead.
        dentry separately, and it now has request_mask and query_flags arguments
        to specify the fields and sync type requested by statx.  Filesystems not
        supporting any statx-specific features may ignore the new arguments.
+--
+[mandatory]
+       ->atomic_open() calling conventions have changed.  Gone is int *opened,
+       along with FILE_OPENED/FILE_CREATED.  In place of those we have
+       FMODE_OPENED/FMODE_CREATED, set in file->f_mode.  Additionally, return
+       value for 'called finish_no_open(), open it yourself' case has become
+       0, not 1.  Since finish_no_open() itself is returning 0 now, that part
+       does not need any changes in ->atomic_open() instances.
+--
+[mandatory]
+       alloc_file() has become static now; two wrappers are to be used instead.
+       alloc_file_pseudo(inode, vfsmount, name, flags, ops) is for the cases
+       when dentry needs to be created; that's the majority of old alloc_file()
+       users.  Calling conventions: on success a reference to new struct file
+       is returned and callers reference to inode is subsumed by that.  On
+       failure, ERR_PTR() is returned and no caller's references are affected,
+       so the caller needs to drop the inode reference it held.
+       alloc_file_clone(file, flags, ops) does not affect any caller's references.
+       On success you get a new struct file sharing the mount/dentry with the
+       original, on failure - ERR_PTR().
index 829a7b7857a46904cfb7f02646212504a3a7f259..85907d5b9c2c5877136d97900a257e2987109c2c 100644 (file)
@@ -386,7 +386,7 @@ struct inode_operations {
        ssize_t (*listxattr) (struct dentry *, char *, size_t);
        void (*update_time)(struct inode *, struct timespec *, int);
        int (*atomic_open)(struct inode *, struct dentry *, struct file *,
-                       unsigned open_flag, umode_t create_mode, int *opened);
+                       unsigned open_flag, umode_t create_mode);
        int (*tmpfile) (struct inode *, struct dentry *, umode_t);
 };
 
@@ -496,13 +496,15 @@ otherwise noted.
 
   atomic_open: called on the last component of an open.  Using this optional
        method the filesystem can look up, possibly create and open the file in
-       one atomic operation.  If it cannot perform this (e.g. the file type
-       turned out to be wrong) it may signal this by returning 1 instead of
-       usual 0 or -ve .  This method is only called if the last component is
-       negative or needs lookup.  Cached positive dentries are still handled by
-       f_op->open().  If the file was created, the FILE_CREATED flag should be
-       set in "opened".  In case of O_EXCL the method must only succeed if the
-       file didn't exist and hence FILE_CREATED shall always be set on success.
+       one atomic operation.  If it wants to leave actual opening to the
+       caller (e.g. if the file turned out to be a symlink, device, or just
+       something filesystem won't do atomic open for), it may signal this by
+       returning finish_no_open(file, dentry).  This method is only called if
+       the last component is negative or needs lookup.  Cached positive dentries
+       are still handled by f_op->open().  If the file was created,
+       FMODE_CREATED flag should be set in file->f_mode.  In case of O_EXCL
+       the method must only succeed if the file didn't exist and hence FMODE_CREATED
+       shall always be set on success.
 
   tmpfile: called in the end of O_TMPFILE open().  Optional, equivalent to
        atomically creating, opening and unlinking a file in given directory.
@@ -857,8 +859,6 @@ struct file_operations {
        ssize_t (*write_iter) (struct kiocb *, struct iov_iter *);
        int (*iterate) (struct file *, struct dir_context *);
        __poll_t (*poll) (struct file *, struct poll_table_struct *);
-       struct wait_queue_head * (*get_poll_head)(struct file *, __poll_t);
-       __poll_t (*poll_mask) (struct file *, __poll_t);
        long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long);
        long (*compat_ioctl) (struct file *, unsigned int, unsigned long);
        int (*mmap) (struct file *, struct vm_area_struct *);
@@ -903,17 +903,6 @@ otherwise noted.
        activity on this file and (optionally) go to sleep until there
        is activity. Called by the select(2) and poll(2) system calls
 
-  get_poll_head: Returns the struct wait_queue_head that callers can
-  wait on.  Callers need to check the returned events using ->poll_mask
-  once woken.  Can return NULL to indicate polling is not supported,
-  or any error code using the ERR_PTR convention to indicate that a
-  grave error occured and ->poll_mask shall not be called.
-
-  poll_mask: return the mask of EPOLL* values describing the file descriptor
-  state.  Called either before going to sleep on the waitqueue returned by
-  get_poll_head, or after it has been woken.  If ->get_poll_head and
-  ->poll_mask are implemented ->poll does not need to be implement.
-
   unlocked_ioctl: called by the ioctl(2) system call.
 
   compat_ioctl: called by the ioctl(2) system call when 32 bit system calls
index 9ba6587b76573e5cfd0d0fa4c05dcf2c27e95e05..b2de8fa49273f3a5f15fd00cc23052ee093d71cb 100644 (file)
@@ -16,6 +16,11 @@ Supported chips:
     Prefixes: 'max34446'
     Addresses scanned: -
     Datasheet: http://datasheets.maximintegrated.com/en/ds/MAX34446.pdf
+  * Maxim MAX34451
+    PMBus 16-Channel V/I Monitor and 12-Channel Sequencer/Marginer
+    Prefixes: 'max34451'
+    Addresses scanned: -
+    Datasheet: http://datasheets.maximintegrated.com/en/ds/MAX34451.pdf
   * Maxim MAX34460
     PMBus 12-Channel Voltage Monitor & Sequencer
     Prefix: 'max34460'
@@ -36,9 +41,10 @@ Description
 This driver supports hardware monitoring for Maxim MAX34440 PMBus 6-Channel
 Power-Supply Manager, MAX34441 PMBus 5-Channel Power-Supply Manager
 and Intelligent Fan Controller, and MAX34446 PMBus Power-Supply Data Logger.
-It also supports the MAX34460 and MAX34461 PMBus Voltage Monitor & Sequencers.
-The MAX34460 supports 12 voltage channels, and the MAX34461 supports 16 voltage
-channels.
+It also supports the MAX34451, MAX34460, and MAX34461 PMBus Voltage Monitor &
+Sequencers. The MAX34451 supports monitoring voltage or current of 12 channels
+based on GIN pins. The MAX34460 supports 12 voltage channels, and the MAX34461
+supports 16 voltage channels.
 
 The driver is a client driver to the core PMBus driver. Please see
 Documentation/hwmon/pmbus for details on PMBus client drivers.
@@ -93,7 +99,7 @@ curr[1-6]_max         Maximum current. From IOUT_OC_WARN_LIMIT register.
 curr[1-6]_crit         Critical maximum current. From IOUT_OC_FAULT_LIMIT register.
 curr[1-6]_max_alarm    Current high alarm. From IOUT_OC_WARNING status.
 curr[1-6]_crit_alarm   Current critical high alarm. From IOUT_OC_FAULT status.
-curr[1-4]_average      Historical average current (MAX34446 only).
+curr[1-4]_average      Historical average current (MAX34446/34451 only).
 curr[1-6]_highest      Historical maximum current.
 curr[1-6]_reset_history        Write any value to reset history.
 
@@ -123,5 +129,7 @@ temp[1-8]_reset_history     Write any value to reset history.
                        temp7 and temp8 attributes only exist for MAX34440.
                        MAX34446 only supports temp[1-3].
 
+MAX34451 supports attribute groups in[1-16] (or curr[1-16] based on input pins)
+and temp[1-5].
 MAX34460 supports attribute groups in[1-12] and temp[1-5].
 MAX34461 supports attribute groups in[1-16] and temp[1-5].
diff --git a/Documentation/hwmon/mlxreg-fan b/Documentation/hwmon/mlxreg-fan
new file mode 100644 (file)
index 0000000..fc531c6
--- /dev/null
@@ -0,0 +1,60 @@
+Kernel driver mlxreg-fan
+========================
+
+Provides FAN control for the next Mellanox systems:
+QMB700, equipped with 40x200GbE InfiniBand ports;
+MSN3700, equipped with 32x200GbE or 16x400GbE Ethernet ports;
+MSN3410, equipped with 6x400GbE plus 48x50GbE Ethernet ports;
+MSN3800, equipped with 64x1000GbE Ethernet ports;
+These are the Top of the Rack systems, equipped with Mellanox switch
+board with Mellanox Quantum or Spectrume-2 devices.
+FAN controller is implemented by the programmable device logic.
+
+The default registers offsets set within the programmable device is as
+following:
+- pwm1                 0xe3
+- fan1 (tacho1)                0xe4
+- fan2 (tacho2)                0xe5
+- fan3 (tacho3)                0xe6
+- fan4 (tacho4)                0xe7
+- fan5 (tacho5)                0xe8
+- fan6 (tacho6)                0xe9
+- fan7 (tacho7)                0xea
+- fan8 (tacho8)                0xeb
+- fan9 (tacho9)                0xec
+- fan10 (tacho10)      0xed
+- fan11 (tacho11)      0xee
+- fan12 (tacho12)      0xef
+This setup can be re-programmed with other registers.
+
+Author: Vadim Pasternak <vadimp@mellanox.com>
+
+Description
+-----------
+
+The driver implements a simple interface for driving a fan connected to
+a PWM output and tachometer inputs.
+This driver obtains PWM and tachometers registers location according to
+the system configuration and creates FAN/PWM hwmon objects and a cooling
+device. PWM and tachometers are sensed through the on-board programmable
+device, which exports its register map. This device could be attached to
+any bus type, for which register mapping is supported.
+Single instance is created with one PWM control, up to 12 tachometers and
+one cooling device. It could be as many instances as programmable device
+supports.
+The driver exposes the fan to the user space through the hwmon's and
+thermal's sysfs interfaces.
+
+/sys files in hwmon subsystem
+-----------------------------
+
+fan[1-12]_fault - RO files for tachometers TACH1-TACH12 fault indication
+fan[1-12]_input - RO files for tachometers TACH1-TACH12 input (in RPM)
+pwm1           - RW file for fan[1-12] target duty cycle (0..255)
+
+/sys files in thermal subsystem
+-------------------------------
+
+cur_state      - RW file for current cooling state of the cooling device
+                 (0..max_state)
+max_state      - RO file for maximum cooling state of the cooling device
diff --git a/Documentation/hwmon/npcm750-pwm-fan b/Documentation/hwmon/npcm750-pwm-fan
new file mode 100644 (file)
index 0000000..6156ef7
--- /dev/null
@@ -0,0 +1,22 @@
+Kernel driver npcm750-pwm-fan
+=============================
+
+Supported chips:
+       NUVOTON NPCM750/730/715/705
+
+Authors:
+       <tomer.maimon@nuvoton.com>
+
+Description:
+------------
+This driver implements support for NUVOTON NPCM7XX PWM and Fan Tacho
+controller. The PWM controller supports up to 8 PWM outputs. The Fan tacho
+controller supports up to 16 tachometer inputs.
+
+The driver provides the following sensor accesses in sysfs:
+
+fanX_input     ro      provide current fan rotation value in RPM as reported
+                       by the fan to the device.
+
+pwmX           rw      get or set PWM fan control value. This is an integer
+                       value between 0(off) and 255(full speed).
index fc337c317c67353a80afc3a88997e2ead759c340..2b9e1005d88b7b14f6725437a5e77b870797bbe4 100644 (file)
@@ -171,6 +171,13 @@ in[0-*]_label      Suggested voltage channel label.
                user-space.
                RO
 
+in[0-*]_enable
+               Enable or disable the sensors.
+               When disabled the sensor read will return -ENODATA.
+               1: Enable
+               0: Disable
+               RW
+
 cpu[0-*]_vid   CPU core reference voltage.
                Unit: millivolt
                RO
@@ -236,6 +243,13 @@ fan[1-*]_label     Suggested fan channel label.
                In all other cases, the label is provided by user-space.
                RO
 
+fan[1-*]_enable
+               Enable or disable the sensors.
+               When disabled the sensor read will return -ENODATA.
+               1: Enable
+               0: Disable
+               RW
+
 Also see the Alarms section for status flags associated with fans.
 
 
@@ -409,6 +423,13 @@ temp_reset_history
                Reset temp_lowest and temp_highest for all sensors
                WO
 
+temp[1-*]_enable
+               Enable or disable the sensors.
+               When disabled the sensor read will return -ENODATA.
+               1: Enable
+               0: Disable
+               RW
+
 Some chips measure temperature using external thermistors and an ADC, and
 report the temperature measurement as a voltage. Converting this voltage
 back to a temperature (or the other way around for limits) requires
@@ -468,6 +489,13 @@ curr_reset_history
                Reset currX_lowest and currX_highest for all sensors
                WO
 
+curr[1-*]_enable
+               Enable or disable the sensors.
+               When disabled the sensor read will return -ENODATA.
+               1: Enable
+               0: Disable
+               RW
+
 Also see the Alarms section for status flags associated with currents.
 
 *********
@@ -566,6 +594,13 @@ power[1-*]_crit                    Critical maximum power.
                                Unit: microWatt
                                RW
 
+power[1-*]_enable              Enable or disable the sensors.
+                               When disabled the sensor read will return
+                               -ENODATA.
+                               1: Enable
+                               0: Disable
+                               RW
+
 Also see the Alarms section for status flags associated with power readings.
 
 **********
@@ -576,6 +611,12 @@ energy[1-*]_input          Cumulative energy use
                                Unit: microJoule
                                RO
 
+energy[1-*]_enable             Enable or disable the sensors.
+                               When disabled the sensor read will return
+                               -ENODATA.
+                               1: Enable
+                               0: Disable
+                               RW
 
 ************
 * Humidity *
@@ -586,6 +627,13 @@ humidity[1-*]_input                Humidity
                                RO
 
 
+humidity[1-*]_enable           Enable or disable the sensors
+                               When disabled the sensor read will return
+                               -ENODATA.
+                               1: Enable
+                               0: Disable
+                               RW
+
 **********
 * Alarms *
 **********
index 6c9c69ec3986be379a86f745f30a9eab9b817d96..114c7ce7b58de2c15e5b1c917c96e4926c605191 100644 (file)
@@ -50,6 +50,11 @@ LDFLAGS_MODULE
 --------------------------------------------------
 Additional options used for $(LD) when linking modules.
 
+KBUILD_KCONFIG
+--------------------------------------------------
+Set the top-level Kconfig file to the value of this environment
+variable.  The default name is "Kconfig".
+
 KBUILD_VERBOSE
 --------------------------------------------------
 Set the kbuild verbosity. Can be assigned same values as "V=...".
@@ -88,7 +93,8 @@ In most cases the name of the architecture is the same as the
 directory name found in the arch/ directory.
 But some architectures such as x86 and sparc have aliases.
 x86: i386 for 32 bit, x86_64 for 64 bit
-sparc: sparc for 32 bit, sparc64 for 64 bit
+sh: sh for 32 bit, sh64 for 64 bit
+sparc: sparc32 for 32 bit, sparc64 for 64 bit
 
 CROSS_COMPILE
 --------------------------------------------------
@@ -148,15 +154,6 @@ stripped after they are installed.  If INSTALL_MOD_STRIP is '1', then
 the default option --strip-debug will be used.  Otherwise,
 INSTALL_MOD_STRIP value will be used as the options to the strip command.
 
-INSTALL_FW_PATH
---------------------------------------------------
-INSTALL_FW_PATH specifies where to install the firmware blobs.
-The default value is:
-
-    $(INSTALL_MOD_PATH)/lib/firmware
-
-The value can be overridden in which case the default value is ignored.
-
 INSTALL_HDR_PATH
 --------------------------------------------------
 INSTALL_HDR_PATH specifies where to install user space headers when
index 3534a84d206caf324423a9422eb985b48c97813b..64e0775a62d4475ec378d033332b5099987954ff 100644 (file)
@@ -430,6 +430,12 @@ This sets the config program's title bar if the config program chooses
 to use it. It should be placed at the top of the configuration, before any
 other statement.
 
+'#' Kconfig source file comment:
+
+An unquoted '#' character anywhere in a source file line indicates
+the beginning of a source file comment.  The remainder of that line
+is a comment.
+
 
 Kconfig hints
 -------------
index 7233118f3a05481247f4c550542b099e9f655245..68c82914c0f3a1e791cab09d7b6a2b7253541443 100644 (file)
@@ -2,9 +2,9 @@ This file contains some assistance for using "make *config".
 
 Use "make help" to list all of the possible configuration targets.
 
-The xconfig ('qconf') and menuconfig ('mconf') programs also
-have embedded help text.  Be sure to check it for navigation,
-search, and other general help text.
+The xconfig ('qconf'), menuconfig ('mconf'), and nconfig ('nconf')
+programs also have embedded help text.  Be sure to check that for
+navigation, search, and other general help text.
 
 ======================================================================
 General
@@ -17,13 +17,16 @@ this happens, using a previously working .config file and running
 for you, so you may find that you need to see what NEW kernel
 symbols have been introduced.
 
-To see a list of new config symbols when using "make oldconfig", use
+To see a list of new config symbols, use
 
        cp user/some/old.config .config
        make listnewconfig
 
 and the config program will list any new symbols, one per line.
 
+Alternatively, you can use the brute force method:
+
+       make oldconfig
        scripts/diffconfig .config.old .config | less
 
 ______________________________________________________________________
@@ -160,7 +163,7 @@ Searching in menuconfig:
                This lists all config symbols that contain "hotplug",
                e.g., HOTPLUG_CPU, MEMORY_HOTPLUG.
 
-       For search help, enter / followed TAB-TAB-TAB (to highlight
+       For search help, enter / followed by TAB-TAB (to highlight
        <Help>) and Enter.  This will tell you that you can also use
        regular expressions (regexes) in the search string, so if you
        are not interested in MEMORY_HOTPLUG, you could try
@@ -202,6 +205,39 @@ Example:
        make MENUCONFIG_MODE=single_menu menuconfig
 
 
+======================================================================
+nconfig
+--------------------------------------------------
+
+nconfig is an alternate text-based configurator.  It lists function
+keys across the bottom of the terminal (window) that execute commands.
+You can also just use the corresponding numeric key to execute the
+commands unless you are in a data entry window.  E.g., instead of F6
+for Save, you can just press 6.
+
+Use F1 for Global help or F3 for the Short help menu.
+
+Searching in nconfig:
+
+       You can search either in the menu entry "prompt" strings
+       or in the configuration symbols.
+
+       Use / to begin a search through the menu entries.  This does
+       not support regular expressions.  Use <Down> or <Up> for
+       Next hit and Previous hit, respectively.  Use <Esc> to
+       terminate the search mode.
+
+       F8 (SymSearch) searches the configuration symbols for the
+       given string or regular expression (regex).
+
+NCONFIG_MODE
+--------------------------------------------------
+This mode shows all sub-menus in one large tree.
+
+Example:
+       make NCONFIG_MODE=single_menu nconfig
+
+
 ======================================================================
 xconfig
 --------------------------------------------------
@@ -230,8 +266,7 @@ gconfig
 
 Searching in gconfig:
 
-       None (gconfig isn't maintained as well as xconfig or menuconfig);
-       however, gconfig does have a few more viewing choices than
-       xconfig does.
+       There is no search command in gconfig.  However, gconfig does
+       have several different viewing choices, modes, and options.
 
 ###
index cb3b0de83fc6db83d9a5739bdaa0eb199ae5250d..10f4499e677c0863475c8583edfe83a224bc2bae 100644 (file)
@@ -80,6 +80,26 @@ After the instruction is single-stepped, Kprobes executes the
 "post_handler," if any, that is associated with the kprobe.
 Execution then continues with the instruction following the probepoint.
 
+Changing Execution Path
+-----------------------
+
+Since kprobes can probe into a running kernel code, it can change the
+register set, including instruction pointer. This operation requires
+maximum care, such as keeping the stack frame, recovering the execution
+path etc. Since it operates on a running kernel and needs deep knowledge
+of computer architecture and concurrent computing, you can easily shoot
+your foot.
+
+If you change the instruction pointer (and set up other related
+registers) in pre_handler, you must return !0 so that kprobes stops
+single stepping and just returns to the given address.
+This also means post_handler should not be called anymore.
+
+Note that this operation may be harder on some architectures which use
+TOC (Table of Contents) for function call, since you have to setup a new
+TOC for your function in your module, and recover the old one after
+returning from it.
+
 Return Probes
 -------------
 
@@ -262,7 +282,7 @@ is optimized, that modification is ignored.  Thus, if you want to
 tweak the kernel's execution path, you need to suppress optimization,
 using one of the following techniques:
 
-- Specify an empty function for the kprobe's post_handler or break_handler.
+- Specify an empty function for the kprobe's post_handler.
 
 or
 
@@ -474,7 +494,7 @@ error occurs during registration, all probes in the array, up to
 the bad probe, are safely unregistered before the register_*probes
 function returns.
 
-- kps/rps/jps: an array of pointers to ``*probe`` data structures
+- kps/rps: an array of pointers to ``*probe`` data structures
 - num: the number of the array entries.
 
 .. note::
@@ -566,12 +586,11 @@ the same handler) may run concurrently on different CPUs.
 Kprobes does not use mutexes or allocate memory except during
 registration and unregistration.
 
-Probe handlers are run with preemption disabled.  Depending on the
-architecture and optimization state, handlers may also run with
-interrupts disabled (e.g., kretprobe handlers and optimized kprobe
-handlers run without interrupt disabled on x86/x86-64).  In any case,
-your handler should not yield the CPU (e.g., by attempting to acquire
-a semaphore).
+Probe handlers are run with preemption disabled or interrupt disabled,
+which depends on the architecture and optimization state.  (e.g.,
+kretprobe handlers and optimized kprobe handlers run without interrupt
+disabled on x86/x86-64).  In any case, your handler should not yield
+the CPU (e.g., by attempting to acquire a semaphore, or waiting I/O).
 
 Since a return probe is implemented by replacing the return
 address with the trampoline's address, stack backtraces and calls
index a02d6bbfc9d0ae1958d3928ba1be441d3a60fa0a..0d8d7ef131e9aa1782c1f591624723e483a5bd04 100644 (file)
@@ -2179,32 +2179,41 @@ or:
        event_indicated = 1;
        wake_up_process(event_daemon);
 
-A write memory barrier is implied by wake_up() and co.  if and only if they
-wake something up.  The barrier occurs before the task state is cleared, and so
-sits between the STORE to indicate the event and the STORE to set TASK_RUNNING:
+A general memory barrier is executed by wake_up() if it wakes something up.
+If it doesn't wake anything up then a memory barrier may or may not be
+executed; you must not rely on it.  The barrier occurs before the task state
+is accessed, in particular, it sits between the STORE to indicate the event
+and the STORE to set TASK_RUNNING:
 
-       CPU 1                           CPU 2
+       CPU 1 (Sleeper)                 CPU 2 (Waker)
        =============================== ===============================
        set_current_state();            STORE event_indicated
          smp_store_mb();               wake_up();
-           STORE current->state          <write barrier>
-           <general barrier>             STORE current->state
-       LOAD event_indicated
+           STORE current->state          ...
+           <general barrier>             <general barrier>
+       LOAD event_indicated              if ((LOAD task->state) & TASK_NORMAL)
+                                           STORE task->state
 
-To repeat, this write memory barrier is present if and only if something
-is actually awakened.  To see this, consider the following sequence of
-events, where X and Y are both initially zero:
+where "task" is the thread being woken up and it equals CPU 1's "current".
+
+To repeat, a general memory barrier is guaranteed to be executed by wake_up()
+if something is actually awakened, but otherwise there is no such guarantee.
+To see this, consider the following sequence of events, where X and Y are both
+initially zero:
 
        CPU 1                           CPU 2
        =============================== ===============================
-       X = 1;                          STORE event_indicated
+       X = 1;                          Y = 1;
        smp_mb();                       wake_up();
-       Y = 1;                          wait_event(wq, Y == 1);
-       wake_up();                        load from Y sees 1, no memory barrier
-                                       load from X might see 0
+       LOAD Y                          LOAD X
+
+If a wakeup does occur, one (at least) of the two loads must see 1.  If, on
+the other hand, a wakeup does not occur, both loads might see 0.
 
-In contrast, if a wakeup does occur, CPU 2's load from X would be guaranteed
-to see 1.
+wake_up_process() always executes a general memory barrier.  The barrier again
+occurs before the task state is accessed.  In particular, if the wake_up() in
+the previous snippet were replaced by a call to wake_up_process() then one of
+the two loads would be guaranteed to see 1.
 
 The available waker functions include:
 
@@ -2224,6 +2233,8 @@ The available waker functions include:
        wake_up_poll();
        wake_up_process();
 
+In terms of memory ordering, these functions all provide the same guarantees of
+a wake_up() (or stronger).
 
 [!] Note that the memory barriers implied by the sleeper and the waker do _not_
 order multiple stores before the wake-up with respect to loads of those stored
index c13214d073a4866f49025033a86fed03275bca5f..d3e5dd26db12d75bc09d25cacbf0f775003cd527 100644 (file)
@@ -1490,7 +1490,7 @@ To remove an ARP target:
 
 To configure the interval between learning packet transmits:
 # echo 12 > /sys/class/net/bond0/bonding/lp_interval
-       NOTE: the lp_inteval is the number of seconds between instances where
+       NOTE: the lp_interval is the number of seconds between instances where
 the bonding driver sends learning packets to each slaves peer switch.  The
 default interval is 1 second.
 
index 79fede4447d616adbb6fb9b3e4f87c01ad8ac831..d638b5a8aadd4da77c24849ebfc2f862ed0bb9c6 100644 (file)
@@ -1,5 +1,6 @@
 .. include:: <isonum.txt>
 
+=========================================================
 DPAA2 (Data Path Acceleration Architecture Gen2) Overview
 =========================================================
 
index d4d8370279254472ed812488ea61d4e3bd64651b..f81111eba9c5dd157aecdd1f1370031d3d1b1f0f 100644 (file)
@@ -1,3 +1,4 @@
+==============================================================
 Linux* Base Driver for the Intel(R) PRO/100 Family of Adapters
 ==============================================================
 
@@ -46,123 +47,131 @@ Driver Configuration Parameters
 The default value for each parameter is generally the recommended setting,
 unless otherwise noted.
 
-Rx Descriptors: Number of receive descriptors. A receive descriptor is a data
+Rx Descriptors:
+   Number of receive descriptors. A receive descriptor is a data
    structure that describes a receive buffer and its attributes to the network
    controller. The data in the descriptor is used by the controller to write
    data from the controller to host memory. In the 3.x.x driver the valid range
    for this parameter is 64-256. The default value is 256. This parameter can be
    changed using the command::
 
-   ethtool -G eth? rx n
+     ethtool -G eth? rx n
 
    Where n is the number of desired Rx descriptors.
 
-Tx Descriptors: Number of transmit descriptors. A transmit descriptor is a data
+Tx Descriptors:
+   Number of transmit descriptors. A transmit descriptor is a data
    structure that describes a transmit buffer and its attributes to the network
    controller. The data in the descriptor is used by the controller to read
    data from the host memory to the controller. In the 3.x.x driver the valid
    range for this parameter is 64-256. The default value is 128. This parameter
    can be changed using the command::
 
-   ethtool -G eth? tx n
+     ethtool -G eth? tx n
 
    Where n is the number of desired Tx descriptors.
 
-Speed/Duplex: The driver auto-negotiates the link speed and duplex settings by
+Speed/Duplex:
+   The driver auto-negotiates the link speed and duplex settings by
    default. The ethtool utility can be used as follows to force speed/duplex.::
 
-   ethtool -s eth?  autoneg off speed {10|100} duplex {full|half}
+     ethtool -s eth?  autoneg off speed {10|100} duplex {full|half}
 
    NOTE: setting the speed/duplex to incorrect values will cause the link to
    fail.
 
-Event Log Message Level:  The driver uses the message level flag to log events
+Event Log Message Level:
+   The driver uses the message level flag to log events
    to syslog. The message level can be set at driver load time. It can also be
    set using the command::
 
-   ethtool -s eth? msglvl n
+     ethtool -s eth? msglvl n
 
 
 Additional Configurations
 =========================
 
-  Configuring the Driver on Different Distributions
-  -------------------------------------------------
+Configuring the Driver on Different Distributions
+-------------------------------------------------
 
-  Configuring a network driver to load properly when the system is started is
-  distribution dependent. Typically, the configuration process involves adding
-  an alias line to /etc/modprobe.d/*.conf as well as editing other system
-  startup scripts and/or configuration files.  Many popular Linux
-  distributions ship with tools to make these changes for you. To learn the
-  proper way to configure a network device for your system, refer to your
-  distribution documentation.  If during this process you are asked for the
-  driver or module name, the name for the Linux Base Driver for the Intel
-  PRO/100 Family of Adapters is e100.
+Configuring a network driver to load properly when the system is started
+is distribution dependent.  Typically, the configuration process involves
+adding an alias line to `/etc/modprobe.d/*.conf` as well as editing other
+system startup scripts and/or configuration files.  Many popular Linux
+distributions ship with tools to make these changes for you.  To learn
+the proper way to configure a network device for your system, refer to
+your distribution documentation.  If during this process you are asked
+for the driver or module name, the name for the Linux Base Driver for
+the Intel PRO/100 Family of Adapters is e100.
 
-  As an example, if you install the e100 driver for two PRO/100 adapters
-  (eth0 and eth1), add the following to a configuration file in /etc/modprobe.d/
+As an example, if you install the e100 driver for two PRO/100 adapters
+(eth0 and eth1), add the following to a configuration file in
+/etc/modprobe.d/::
 
        alias eth0 e100
        alias eth1 e100
 
-  Viewing Link Messages
-  ---------------------
-  In order to see link messages and other Intel driver information on your
-  console, you must set the dmesg level up to six. This can be done by
-  entering the following on the command line before loading the e100 driver::
+Viewing Link Messages
+---------------------
+
+In order to see link messages and other Intel driver information on your
+console, you must set the dmesg level up to six.  This can be done by
+entering the following on the command line before loading the e100
+driver::
 
        dmesg -n 6
 
-  If you wish to see all messages issued by the driver, including debug
-  messages, set the dmesg level to eight.
+If you wish to see all messages issued by the driver, including debug
+messages, set the dmesg level to eight.
 
-  NOTE: This setting is not saved across reboots.
+NOTE: This setting is not saved across reboots.
 
+ethtool
+-------
 
-  ethtool
-  -------
+The driver utilizes the ethtool interface for driver configuration and
+diagnostics, as well as displaying statistical information.  The ethtool
+version 1.6 or later is required for this functionality.
 
-  The driver utilizes the ethtool interface for driver configuration and
-  diagnostics, as well as displaying statistical information.  The ethtool
-  version 1.6 or later is required for this functionality.
+The latest release of ethtool can be found from
+https://www.kernel.org/pub/software/network/ethtool/
 
-  The latest release of ethtool can be found from
-  https://www.kernel.org/pub/software/network/ethtool/
+Enabling Wake on LAN* (WoL)
+---------------------------
+WoL is provided through the ethtool* utility.  For instructions on
+enabling WoL with ethtool, refer to the ethtool man page.  WoL will be
+enabled on the system during the next shut down or reboot.  For this
+driver version, in order to enable WoL, the e100 driver must be loaded
+when shutting down or rebooting the system.
 
-  Enabling Wake on LAN* (WoL)
-  ---------------------------
-  WoL is provided through the ethtool* utility.  For instructions on enabling
-  WoL with ethtool, refer to the ethtool man page.
+NAPI
+----
 
-  WoL will be enabled on the system during the next shut down or reboot. For
-  this driver version, in order to enable WoL, the e100 driver must be
-  loaded when shutting down or rebooting the system.
+NAPI (Rx polling mode) is supported in the e100 driver.
 
-  NAPI
-  ----
+See https://wiki.linuxfoundation.org/networking/napi for more
+information on NAPI.
 
-  NAPI (Rx polling mode) is supported in the e100 driver.
+Multiple Interfaces on Same Ethernet Broadcast Network
+------------------------------------------------------
 
-  See https://wiki.linuxfoundation.org/networking/napi for more information
-  on NAPI.
+Due to the default ARP behavior on Linux, it is not possible to have one
+system on two IP networks in the same Ethernet broadcast domain
+(non-partitioned switch) behave as expected.  All Ethernet interfaces
+will respond to IP traffic for any IP address assigned to the system.
+This results in unbalanced receive traffic.
 
-  Multiple Interfaces on Same Ethernet Broadcast Network
-  ------------------------------------------------------
+If you have multiple interfaces in a server, either turn on ARP
+filtering by
 
-  Due to the default ARP behavior on Linux, it is not possible to have
-  one system on two IP networks in the same Ethernet broadcast domain
-  (non-partitioned switch) behave as expected. All Ethernet interfaces
-  will respond to IP traffic for any IP address assigned to the system.
-  This results in unbalanced receive traffic.
+(1) entering::
 
-  If you have multiple interfaces in a server, either turn on ARP
-  filtering by
+       echo 1 > /proc/sys/net/ipv4/conf/all/arp_filter
 
-  (1) entering:: echo 1 > /proc/sys/net/ipv4/conf/all/arp_filter
-      (this only works if your kernel's version is higher than 2.4.5), or
+    (this only works if your kernel's version is higher than 2.4.5), or
 
-  (2) installing the interfaces in separate broadcast domains (either
-      in different switches or in a switch partitioned to VLANs).
+(2) installing the interfaces in separate broadcast domains (either
+    in different switches or in a switch partitioned to VLANs).
 
 
 Support
index 616848940e63f7303633e0be67febc86bee6ac6f..f10dd40869218cb11e1d29bc5e4b6431d30af946 100644 (file)
@@ -1,3 +1,4 @@
+===========================================================
 Linux* Base Driver for Intel(R) Ethernet Network Connection
 ===========================================================
 
@@ -33,7 +34,8 @@ Command Line Parameters
 The default value for each parameter is generally the recommended setting,
 unless otherwise noted.
 
-NOTES:  For more information about the AutoNeg, Duplex, and Speed
+NOTES:
+       For more information about the AutoNeg, Duplex, and Speed
         parameters, see the "Speed and Duplex Configuration" section in
         this document.
 
@@ -44,22 +46,27 @@ NOTES:  For more information about the AutoNeg, Duplex, and Speed
 
 AutoNeg
 -------
+
 (Supported only on adapters with copper connections)
-Valid Range:   0x01-0x0F, 0x20-0x2F
-Default Value: 0x2F
+
+:Valid Range:   0x01-0x0F, 0x20-0x2F
+:Default Value: 0x2F
 
 This parameter is a bit-mask that specifies the speed and duplex settings
 advertised by the adapter.  When this parameter is used, the Speed and
 Duplex parameters must not be specified.
 
-NOTE:  Refer to the Speed and Duplex section of this readme for more
+NOTE:
+       Refer to the Speed and Duplex section of this readme for more
        information on the AutoNeg parameter.
 
 Duplex
 ------
+
 (Supported only on adapters with copper connections)
-Valid Range:   0-2 (0=auto-negotiate, 1=half, 2=full)
-Default Value: 0
+
+:Valid Range:   0-2 (0=auto-negotiate, 1=half, 2=full)
+:Default Value: 0
 
 This defines the direction in which data is allowed to flow.  Can be
 either one or two-directional.  If both Duplex and the link partner are
@@ -69,18 +76,22 @@ duplex.
 
 FlowControl
 -----------
-Valid Range:   0-3 (0=none, 1=Rx only, 2=Tx only, 3=Rx&Tx)
-Default Value: Reads flow control settings from the EEPROM
+
+:Valid Range:   0-3 (0=none, 1=Rx only, 2=Tx only, 3=Rx&Tx)
+:Default Value: Reads flow control settings from the EEPROM
 
 This parameter controls the automatic generation(Tx) and response(Rx)
 to Ethernet PAUSE frames.
 
 InterruptThrottleRate
 ---------------------
+
 (not supported on Intel(R) 82542, 82543 or 82544-based adapters)
-Valid Range:   0,1,3,4,100-100000 (0=off, 1=dynamic, 3=dynamic conservative,
-                                 4=simplified balancing)
-Default Value: 3
+
+:Valid Range:
+   0,1,3,4,100-100000 (0=off, 1=dynamic, 3=dynamic conservative,
+   4=simplified balancing)
+:Default Value: 3
 
 The driver can limit the amount of interrupts per second that the adapter
 will generate for incoming packets. It does this by writing a value to the
@@ -134,13 +145,15 @@ Setting InterruptThrottleRate to 0 turns off any interrupt moderation
 and may improve small packet latency, but is generally not suitable
 for bulk throughput traffic.
 
-NOTE:  InterruptThrottleRate takes precedence over the TxAbsIntDelay and
+NOTE:
+       InterruptThrottleRate takes precedence over the TxAbsIntDelay and
        RxAbsIntDelay parameters.  In other words, minimizing the receive
        and/or transmit absolute delays does not force the controller to
        generate more interrupts than what the Interrupt Throttle Rate
        allows.
 
-CAUTION:  If you are using the Intel(R) PRO/1000 CT Network Connection
+CAUTION:
+          If you are using the Intel(R) PRO/1000 CT Network Connection
           (controller 82547), setting InterruptThrottleRate to a value
           greater than 75,000, may hang (stop transmitting) adapters
           under certain network conditions.  If this occurs a NETDEV
@@ -150,7 +163,8 @@ CAUTION:  If you are using the Intel(R) PRO/1000 CT Network Connection
           hang, ensure that InterruptThrottleRate is set no greater
           than 75,000 and is not set to 0.
 
-NOTE:  When e1000 is loaded with default settings and multiple adapters
+NOTE:
+       When e1000 is loaded with default settings and multiple adapters
        are in use simultaneously, the CPU utilization may increase non-
        linearly.  In order to limit the CPU utilization without impacting
        the overall throughput, we recommend that you load the driver as
@@ -167,9 +181,11 @@ NOTE:  When e1000 is loaded with default settings and multiple adapters
 
 RxDescriptors
 -------------
-Valid Range:   48-256 for 82542 and 82543-based adapters
-               48-4096 for all other supported adapters
-Default Value: 256
+
+:Valid Range:
+ - 48-256 for 82542 and 82543-based adapters
+ - 48-4096 for all other supported adapters
+:Default Value: 256
 
 This value specifies the number of receive buffer descriptors allocated
 by the driver.  Increasing this value allows the driver to buffer more
@@ -179,15 +195,17 @@ Each descriptor is 16 bytes.  A receive buffer is also allocated for each
 descriptor and can be either 2048, 4096, 8192, or 16384 bytes, depending
 on the MTU setting. The maximum MTU size is 16110.
 
-NOTE:  MTU designates the frame size.  It only needs to be set for Jumbo
+NOTE:
+       MTU designates the frame size.  It only needs to be set for Jumbo
        Frames.  Depending on the available system resources, the request
        for a higher number of receive descriptors may be denied.  In this
        case, use a lower number.
 
 RxIntDelay
 ----------
-Valid Range:   0-65535 (0=off)
-Default Value: 0
+
+:Valid Range:   0-65535 (0=off)
+:Default Value: 0
 
 This value delays the generation of receive interrupts in units of 1.024
 microseconds.  Receive interrupt reduction can improve CPU efficiency if
@@ -197,7 +215,8 @@ of TCP traffic.  If the system is reporting dropped receives, this value
 may be set too high, causing the driver to run out of available receive
 descriptors.
 
-CAUTION:  When setting RxIntDelay to a value other than 0, adapters may
+CAUTION:
+          When setting RxIntDelay to a value other than 0, adapters may
           hang (stop transmitting) under certain network conditions.  If
           this occurs a NETDEV WATCHDOG message is logged in the system
           event log.  In addition, the controller is automatically reset,
@@ -206,9 +225,11 @@ CAUTION:  When setting RxIntDelay to a value other than 0, adapters may
 
 RxAbsIntDelay
 -------------
+
 (This parameter is supported only on 82540, 82545 and later adapters.)
-Valid Range:   0-65535 (0=off)
-Default Value: 128
+
+:Valid Range:   0-65535 (0=off)
+:Default Value: 128
 
 This value, in units of 1.024 microseconds, limits the delay in which a
 receive interrupt is generated.  Useful only if RxIntDelay is non-zero,
@@ -219,9 +240,11 @@ conditions.
 
 Speed
 -----
+
 (This parameter is supported only on adapters with copper connections.)
-Valid Settings: 0, 10, 100, 1000
-Default Value:  0 (auto-negotiate at all supported speeds)
+
+:Valid Settings: 0, 10, 100, 1000
+:Default Value:  0 (auto-negotiate at all supported speeds)
 
 Speed forces the line speed to the specified value in megabits per second
 (Mbps).  If this parameter is not specified or is set to 0 and the link
@@ -230,22 +253,26 @@ speed.  Duplex should also be set when Speed is set to either 10 or 100.
 
 TxDescriptors
 -------------
-Valid Range:   48-256 for 82542 and 82543-based adapters
-               48-4096 for all other supported adapters
-Default Value: 256
+
+:Valid Range:
+  - 48-256 for 82542 and 82543-based adapters
+  - 48-4096 for all other supported adapters
+:Default Value: 256
 
 This value is the number of transmit descriptors allocated by the driver.
 Increasing this value allows the driver to queue more transmits.  Each
 descriptor is 16 bytes.
 
-NOTE:  Depending on the available system resources, the request for a
+NOTE:
+       Depending on the available system resources, the request for a
        higher number of transmit descriptors may be denied.  In this case,
        use a lower number.
 
 TxIntDelay
 ----------
-Valid Range:   0-65535 (0=off)
-Default Value: 8
+
+:Valid Range:   0-65535 (0=off)
+:Default Value: 8
 
 This value delays the generation of transmit interrupts in units of
 1.024 microseconds.  Transmit interrupt reduction can improve CPU
@@ -255,9 +282,11 @@ causing the driver to run out of available transmit descriptors.
 
 TxAbsIntDelay
 -------------
+
 (This parameter is supported only on 82540, 82545 and later adapters.)
-Valid Range:   0-65535 (0=off)
-Default Value: 32
+
+:Valid Range:   0-65535 (0=off)
+:Default Value: 32
 
 This value, in units of 1.024 microseconds, limits the delay in which a
 transmit interrupt is generated.  Useful only if TxIntDelay is non-zero,
@@ -268,18 +297,21 @@ network conditions.
 
 XsumRX
 ------
+
 (This parameter is NOT supported on the 82542-based adapter.)
-Valid Range:   0-1
-Default Value: 1
+
+:Valid Range:   0-1
+:Default Value: 1
 
 A value of '1' indicates that the driver should enable IP checksum
 offload for received packets (both UDP and TCP) to the adapter hardware.
 
 Copybreak
 ---------
-Valid Range:   0-xxxxxxx (0=off)
-Default Value: 256
-Usage: modprobe e1000.ko copybreak=128
+
+:Valid Range:   0-xxxxxxx (0=off)
+:Default Value: 256
+:Usage: modprobe e1000.ko copybreak=128
 
 Driver copies all packets below or equaling this size to a fresh RX
 buffer before handing it up the stack.
@@ -291,8 +323,9 @@ it is also available during runtime at
 
 SmartPowerDownEnable
 --------------------
-Valid Range: 0-1
-Default Value:  0 (disabled)
+
+:Valid Range: 0-1
+:Default Value:  0 (disabled)
 
 Allows PHY to turn off in lower power states. The user can turn off
 this parameter in supported chipsets.
@@ -308,14 +341,14 @@ fiber interface board only links at 1000 Mbps full-duplex.
 
 For copper-based boards, the keywords interact as follows:
 
-  The default operation is auto-negotiate.  The board advertises all
+- The default operation is auto-negotiate.  The board advertises all
   supported speed and duplex combinations, and it links at the highest
   common speed and duplex mode IF the link partner is set to auto-negotiate.
 
-  If Speed = 1000, limited auto-negotiation is enabled and only 1000 Mbps
+- If Speed = 1000, limited auto-negotiation is enabled and only 1000 Mbps
   is advertised (The 1000BaseT spec requires auto-negotiation.)
 
-  If Speed = 10 or 100, then both Speed and Duplex should be set.  Auto-
+- If Speed = 10 or 100, then both Speed and Duplex should be set.  Auto-
   negotiation is disabled, and the AutoNeg parameter is ignored.  Partner
   SHOULD also be forced.
 
@@ -327,13 +360,15 @@ process.
 The parameter may be specified as either a decimal or hexadecimal value as
 determined by the bitmap below.
 
+============== ====== ====== ======= ======= ====== ====== ======= ======
 Bit position   7      6      5       4       3      2      1       0
 Decimal Value  128    64     32      16      8      4      2       1
 Hex value      80     40     20      10      8      4      2       1
 Speed (Mbps)   N/A    N/A    1000    N/A     100    100    10      10
 Duplex                       Full            Full   Half   Full    Half
+============== ====== ====== ======= ======= ====== ====== ======= ======
 
-Some examples of using AutoNeg:
+Some examples of using AutoNeg::
 
   modprobe e1000 AutoNeg=0x01 (Restricts autonegotiation to 10 Half)
   modprobe e1000 AutoNeg=1 (Same as above)
@@ -354,8 +389,9 @@ previously mentioned to force the adapter to the same speed and duplex.
 Additional Configurations
 =========================
 
-  Jumbo Frames
-  ------------
+Jumbo Frames
+------------
+
   Jumbo Frames support is enabled by changing the MTU to a value larger than
   the default of 1500.  Use the ifconfig command to increase the MTU size.
   For example::
@@ -367,11 +403,11 @@ Additional Configurations
 
        MTU=9000
 
-   to the file /etc/sysconfig/network-scripts/ifcfg-eth<x>.  This example
-   applies to the Red Hat distributions; other distributions may store this
-   setting in a different location.
+  to the file /etc/sysconfig/network-scripts/ifcfg-eth<x>.  This example
+  applies to the Red Hat distributions; other distributions may store this
+  setting in a different location.
 
-  Notes:
+Notes:
   Degradation in throughput performance may be observed in some Jumbo frames
   environments. If this is observed, increasing the application's socket buffer
   size and/or increasing the /proc/sys/net/ipv4/tcp_*mem entry values may help.
@@ -385,12 +421,14 @@ Additional Configurations
     poor performance or loss of link.
 
   - Adapters based on the Intel(R) 82542 and 82573V/E controller do not
-    support Jumbo Frames. These correspond to the following product names:
+    support Jumbo Frames. These correspond to the following product names::
+
      Intel(R) PRO/1000 Gigabit Server Adapter
      Intel(R) PRO/1000 PM Network Connection
 
-  ethtool
-  -------
+ethtool
+-------
+
   The driver utilizes the ethtool interface for driver configuration and
   diagnostics, as well as displaying statistical information.  The ethtool
   version 1.6 or later is required for this functionality.
@@ -398,8 +436,9 @@ Additional Configurations
   The latest release of ethtool can be found from
   https://www.kernel.org/pub/software/network/ethtool/
 
-  Enabling Wake on LAN* (WoL)
-  ---------------------------
+Enabling Wake on LAN* (WoL)
+---------------------------
+
   WoL is configured through the ethtool* utility.
 
   WoL will be enabled on the system during the next shut down or reboot.
index 13081b3decefa834824b544182d0986e83bc50b4..a7d354ddda7baeb59760215cb41222e3b4698a8d 100644 (file)
@@ -48,7 +48,7 @@ void strp_pause(struct strparser *strp)
      Temporarily pause a stream parser. Message parsing is suspended
      and no new messages are delivered to the upper layer.
 
-void strp_pause(struct strparser *strp)
+void strp_unpause(struct strparser *strp)
 
      Unpause a paused stream parser.
 
index e73bcf9cb5f31cc756521702bbc15fd142e09c71..7ffea6aa22e3c89d4b6e6c7359d40a55c4241176 100644 (file)
@@ -1729,35 +1729,35 @@ If a variable isn't a key variable or prefixed with 'vals=', the
 associated event field will be saved in a variable but won't be summed
 as a value:
 
-  # echo 'hist:keys=next_pid:ts1=common_timestamp ... >> event/trigger
+  # echo 'hist:keys=next_pid:ts1=common_timestamp ...' >> event/trigger
 
 Multiple variables can be assigned at the same time.  The below would
 result in both ts0 and b being created as variables, with both
 common_timestamp and field1 additionally being summed as values:
 
-  # echo 'hist:keys=pid:vals=$ts0,$b:ts0=common_timestamp,b=field1 ... >> \
+  # echo 'hist:keys=pid:vals=$ts0,$b:ts0=common_timestamp,b=field1 ...' >> \
        event/trigger
 
 Note that variable assignments can appear either preceding or
 following their use.  The command below behaves identically to the
 command above:
 
-  # echo 'hist:keys=pid:ts0=common_timestamp,b=field1:vals=$ts0,$b ... >> \
+  # echo 'hist:keys=pid:ts0=common_timestamp,b=field1:vals=$ts0,$b ...' >> \
        event/trigger
 
 Any number of variables not bound to a 'vals=' prefix can also be
 assigned by simply separating them with colons.  Below is the same
 thing but without the values being summed in the histogram:
 
-  # echo 'hist:keys=pid:ts0=common_timestamp:b=field1 ... >> event/trigger
+  # echo 'hist:keys=pid:ts0=common_timestamp:b=field1 ...' >> event/trigger
 
 Variables set as above can be referenced and used in expressions on
 another event.
 
 For example, here's how a latency can be calculated:
 
-  # echo 'hist:keys=pid,prio:ts0=common_timestamp ... >> event1/trigger
-  # echo 'hist:keys=next_pid:wakeup_lat=common_timestamp-$ts0 ... >> event2/trigger
+  # echo 'hist:keys=pid,prio:ts0=common_timestamp ...' >> event1/trigger
+  # echo 'hist:keys=next_pid:wakeup_lat=common_timestamp-$ts0 ...' >> event2/trigger
 
 In the first line above, the event's timetamp is saved into the
 variable ts0.  In the next line, ts0 is subtracted from the second
@@ -1766,7 +1766,7 @@ yet another variable, 'wakeup_lat'.  The hist trigger below in turn
 makes use of the wakeup_lat variable to compute a combined latency
 using the same key and variable from yet another event:
 
-  # echo 'hist:key=pid:wakeupswitch_lat=$wakeup_lat+$switchtime_lat ... >> event3/trigger
+  # echo 'hist:key=pid:wakeupswitch_lat=$wakeup_lat+$switchtime_lat ...' >> event3/trigger
 
 2.2.2 Synthetic Events
 ----------------------
@@ -1807,10 +1807,11 @@ the command that defined it with a '!':
 At this point, there isn't yet an actual 'wakeup_latency' event
 instantiated in the event subsytem - for this to happen, a 'hist
 trigger action' needs to be instantiated and bound to actual fields
-and variables defined on other events (see Section 6.3.3 below).
+and variables defined on other events (see Section 2.2.3 below on
+how that is done using hist trigger 'onmatch' action). Once that is
+done, the 'wakeup_latency' synthetic event instance is created.
 
-Once that is done, an event instance is created, and a histogram can
-be defined using it:
+A histogram can now be defined for the new synthetic event:
 
   # echo 'hist:keys=pid,prio,lat.log2:sort=pid,lat' >> \
         /sys/kernel/debug/tracing/events/synthetic/wakeup_latency/trigger
@@ -1960,7 +1961,7 @@ hist trigger specification.
     back to that pid, the timestamp difference is calculated.  If the
     resulting latency, stored in wakeup_lat, exceeds the current
     maximum latency, the values specified in the save() fields are
-    recoreded:
+    recorded:
 
     # echo 'hist:keys=pid:ts0=common_timestamp.usecs \
             if comm=="cyclictest"' >> \
index 921739d00f6902118909acf58a4ccd287964267c..7f01fb1c10842dbd87bf8b1da834272c2ac92a47 100644 (file)
@@ -1891,22 +1891,22 @@ Mandatory 배리어들은 SMP 시스템에서도 UP 시스템에서도 SMP 효
                /* 소유권을 수정 */
                desc->status = DEVICE_OWN;
 
-               /* MMIO 를 통해 디바이스에 공지를 하기 전에 메모리를 동기화 */
-               wmb();
-
                /* 업데이트된 디스크립터의 디바이스에 공지 */
                writel(DESC_NOTIFY, doorbell);
        }
 
      dma_rmb() 는 디스크립터로부터 데이터를 읽어오기 전에 디바이스가 소유권을
-     내놓았음을 보장하게 하고, dma_wmb() 는 디바이스가 자신이 소유권을 다시
-     가졌음을 보기 전에 디스크립터에 데이터가 쓰였음을 보장합니다.  wmb() 는
-     캐시 일관성이 없는 (cache incoherent) MMIO 영역에 쓰기를 시도하기 전에
-     캐시 일관성이 있는 메모리 (cache coherent memory) 쓰기가 완료되었음을
-     보장해주기 위해 필요합니다.
-
-     consistent memory 에 대한 자세한 내용을 위해선 Documentation/DMA-API.txt
-     문서를 참고하세요.
+     내려놓았을 것을 보장하고, dma_wmb() 는 디바이스가 자신이 소유권을 다시
+     가졌음을 보기 전에 디스크립터에 데이터가 쓰였을 것을 보장합니다.  참고로,
+     writel() 을 사용하면 캐시 일관성이 있는 메모리 (cache coherent memory)
+     쓰기가 MMIO 영역에의 쓰기 전에 완료되었을 것을 보장하므로 writel() 앞에
+     wmb() 를 실행할 필요가 없음을 알아두시기 바랍니다.  writel() 보다 비용이
+     저렴한 writel_relaxed() 는 이런 보장을 제공하지 않으므로 여기선 사용되지
+     않아야 합니다.
+
+     writel_relaxed() 와 같은 완화된 I/O 접근자들에 대한 자세한 내용을 위해서는
+     "커널 I/O 배리어의 효과" 섹션을, consistent memory 에 대한 자세한 내용을
+     위해선 Documentation/DMA-API.txt 문서를 참고하세요.
 
 
 MMIO 쓰기 배리어
index 635e57493709e16fbecc0235723dc742a608ae0e..b8cb38a98c1989eef926795b79b7399d65700135 100644 (file)
@@ -226,7 +226,7 @@ $ rm configs/<config name>.<number>/<function>
 where <config name>.<number> specify the configuration and <function> is
 a symlink to a function being removed from the configuration, e.g.:
 
-$ rm configfs/c.1/ncm.usb0
+$ rm configs/c.1/ncm.usb0
 
 ...
 ...
index 495b7742ab58086b5c81fff88eeb884769391b49..cb8db4f9d09794d6bc5cef908406fcc3ee76e213 100644 (file)
@@ -4391,6 +4391,22 @@ all such vmexits.
 
 Do not enable KVM_FEATURE_PV_UNHALT if you disable HLT exits.
 
+7.14 KVM_CAP_S390_HPAGE_1M
+
+Architectures: s390
+Parameters: none
+Returns: 0 on success, -EINVAL if hpage module parameter was not set
+        or cmma is enabled
+
+With this capability the KVM support for memory backing with 1m pages
+through hugetlbfs can be enabled for a VM. After the capability is
+enabled, cmma can't be enabled anymore and pfmfi and the storage key
+interpretation are disabled. If cmma has already been enabled or the
+hpage module parameter is not set to 1, -EINVAL is returned.
+
+While it is generally possible to create a huge page backed VM without
+this capability, the VM will not be able to run.
+
 8. Other capabilities.
 ----------------------
 
@@ -4610,7 +4626,7 @@ This capability indicates that kvm will implement the interfaces to handle
 reset, migration and nested KVM for branch prediction blocking. The stfle
 facility 82 should not be provided to the guest without this capability.
 
-8.14 KVM_CAP_HYPERV_TLBFLUSH
+8.18 KVM_CAP_HYPERV_TLBFLUSH
 
 Architectures: x86
 
index a16aa21138402aa96fd0788a6d6fdab86577964c..f662d3c530e5066f35e1e5744e2b03d0e7feae18 100644 (file)
@@ -29,7 +29,11 @@ mount options are:
 L2 and L3 CDP are controlled seperately.
 
 RDT features are orthogonal. A particular system may support only
-monitoring, only control, or both monitoring and control.
+monitoring, only control, or both monitoring and control.  Cache
+pseudo-locking is a unique way of using cache control to "pin" or
+"lock" data in the cache. Details can be found in
+"Cache Pseudo-Locking".
+
 
 The mount succeeds if either of allocation or monitoring is present, but
 only those files and directories supported by the system will be created.
@@ -65,6 +69,29 @@ related to allocation:
                        some platforms support devices that have their
                        own settings for cache use which can over-ride
                        these bits.
+"bit_usage":           Annotated capacity bitmasks showing how all
+                       instances of the resource are used. The legend is:
+                       "0" - Corresponding region is unused. When the system's
+                             resources have been allocated and a "0" is found
+                             in "bit_usage" it is a sign that resources are
+                             wasted.
+                       "H" - Corresponding region is used by hardware only
+                             but available for software use. If a resource
+                             has bits set in "shareable_bits" but not all
+                             of these bits appear in the resource groups'
+                             schematas then the bits appearing in
+                             "shareable_bits" but no resource group will
+                             be marked as "H".
+                       "X" - Corresponding region is available for sharing and
+                             used by hardware and software. These are the
+                             bits that appear in "shareable_bits" as
+                             well as a resource group's allocation.
+                       "S" - Corresponding region is used by software
+                             and available for sharing.
+                       "E" - Corresponding region is used exclusively by
+                             one resource group. No sharing allowed.
+                       "P" - Corresponding region is pseudo-locked. No
+                             sharing allowed.
 
 Memory bandwitdh(MB) subdirectory contains the following files
 with respect to allocation:
@@ -151,6 +178,9 @@ All groups contain the following files:
        CPUs to/from this group. As with the tasks file a hierarchy is
        maintained where MON groups may only include CPUs owned by the
        parent CTRL_MON group.
+       When the resouce group is in pseudo-locked mode this file will
+       only be readable, reflecting the CPUs associated with the
+       pseudo-locked region.
 
 
 "cpus_list":
@@ -163,6 +193,21 @@ When control is enabled all CTRL_MON groups will also contain:
        A list of all the resources available to this group.
        Each resource has its own line and format - see below for details.
 
+"size":
+       Mirrors the display of the "schemata" file to display the size in
+       bytes of each allocation instead of the bits representing the
+       allocation.
+
+"mode":
+       The "mode" of the resource group dictates the sharing of its
+       allocations. A "shareable" resource group allows sharing of its
+       allocations while an "exclusive" resource group does not. A
+       cache pseudo-locked region is created by first writing
+       "pseudo-locksetup" to the "mode" file before writing the cache
+       pseudo-locked region's schemata to the resource group's "schemata"
+       file. On successful pseudo-locked region creation the mode will
+       automatically change to "pseudo-locked".
+
 When monitoring is enabled all MON groups will also contain:
 
 "mon_data":
@@ -379,6 +424,170 @@ L3CODE:0=fffff;1=fffff;2=fffff;3=fffff
 L3DATA:0=fffff;1=fffff;2=3c0;3=fffff
 L3CODE:0=fffff;1=fffff;2=fffff;3=fffff
 
+Cache Pseudo-Locking
+--------------------
+CAT enables a user to specify the amount of cache space that an
+application can fill. Cache pseudo-locking builds on the fact that a
+CPU can still read and write data pre-allocated outside its current
+allocated area on a cache hit. With cache pseudo-locking, data can be
+preloaded into a reserved portion of cache that no application can
+fill, and from that point on will only serve cache hits. The cache
+pseudo-locked memory is made accessible to user space where an
+application can map it into its virtual address space and thus have
+a region of memory with reduced average read latency.
+
+The creation of a cache pseudo-locked region is triggered by a request
+from the user to do so that is accompanied by a schemata of the region
+to be pseudo-locked. The cache pseudo-locked region is created as follows:
+- Create a CAT allocation CLOSNEW with a CBM matching the schemata
+  from the user of the cache region that will contain the pseudo-locked
+  memory. This region must not overlap with any current CAT allocation/CLOS
+  on the system and no future overlap with this cache region is allowed
+  while the pseudo-locked region exists.
+- Create a contiguous region of memory of the same size as the cache
+  region.
+- Flush the cache, disable hardware prefetchers, disable preemption.
+- Make CLOSNEW the active CLOS and touch the allocated memory to load
+  it into the cache.
+- Set the previous CLOS as active.
+- At this point the closid CLOSNEW can be released - the cache
+  pseudo-locked region is protected as long as its CBM does not appear in
+  any CAT allocation. Even though the cache pseudo-locked region will from
+  this point on not appear in any CBM of any CLOS an application running with
+  any CLOS will be able to access the memory in the pseudo-locked region since
+  the region continues to serve cache hits.
+- The contiguous region of memory loaded into the cache is exposed to
+  user-space as a character device.
+
+Cache pseudo-locking increases the probability that data will remain
+in the cache via carefully configuring the CAT feature and controlling
+application behavior. There is no guarantee that data is placed in
+cache. Instructions like INVD, WBINVD, CLFLUSH, etc. can still evict
+“locked” data from cache. Power management C-states may shrink or
+power off cache. Deeper C-states will automatically be restricted on
+pseudo-locked region creation.
+
+It is required that an application using a pseudo-locked region runs
+with affinity to the cores (or a subset of the cores) associated
+with the cache on which the pseudo-locked region resides. A sanity check
+within the code will not allow an application to map pseudo-locked memory
+unless it runs with affinity to cores associated with the cache on which the
+pseudo-locked region resides. The sanity check is only done during the
+initial mmap() handling, there is no enforcement afterwards and the
+application self needs to ensure it remains affine to the correct cores.
+
+Pseudo-locking is accomplished in two stages:
+1) During the first stage the system administrator allocates a portion
+   of cache that should be dedicated to pseudo-locking. At this time an
+   equivalent portion of memory is allocated, loaded into allocated
+   cache portion, and exposed as a character device.
+2) During the second stage a user-space application maps (mmap()) the
+   pseudo-locked memory into its address space.
+
+Cache Pseudo-Locking Interface
+------------------------------
+A pseudo-locked region is created using the resctrl interface as follows:
+
+1) Create a new resource group by creating a new directory in /sys/fs/resctrl.
+2) Change the new resource group's mode to "pseudo-locksetup" by writing
+   "pseudo-locksetup" to the "mode" file.
+3) Write the schemata of the pseudo-locked region to the "schemata" file. All
+   bits within the schemata should be "unused" according to the "bit_usage"
+   file.
+
+On successful pseudo-locked region creation the "mode" file will contain
+"pseudo-locked" and a new character device with the same name as the resource
+group will exist in /dev/pseudo_lock. This character device can be mmap()'ed
+by user space in order to obtain access to the pseudo-locked memory region.
+
+An example of cache pseudo-locked region creation and usage can be found below.
+
+Cache Pseudo-Locking Debugging Interface
+---------------------------------------
+The pseudo-locking debugging interface is enabled by default (if
+CONFIG_DEBUG_FS is enabled) and can be found in /sys/kernel/debug/resctrl.
+
+There is no explicit way for the kernel to test if a provided memory
+location is present in the cache. The pseudo-locking debugging interface uses
+the tracing infrastructure to provide two ways to measure cache residency of
+the pseudo-locked region:
+1) Memory access latency using the pseudo_lock_mem_latency tracepoint. Data
+   from these measurements are best visualized using a hist trigger (see
+   example below). In this test the pseudo-locked region is traversed at
+   a stride of 32 bytes while hardware prefetchers and preemption
+   are disabled. This also provides a substitute visualization of cache
+   hits and misses.
+2) Cache hit and miss measurements using model specific precision counters if
+   available. Depending on the levels of cache on the system the pseudo_lock_l2
+   and pseudo_lock_l3 tracepoints are available.
+   WARNING: triggering this  measurement uses from two (for just L2
+   measurements) to four (for L2 and L3 measurements) precision counters on
+   the system, if any other measurements are in progress the counters and
+   their corresponding event registers will be clobbered.
+
+When a pseudo-locked region is created a new debugfs directory is created for
+it in debugfs as /sys/kernel/debug/resctrl/<newdir>. A single
+write-only file, pseudo_lock_measure, is present in this directory. The
+measurement on the pseudo-locked region depends on the number, 1 or 2,
+written to this debugfs file. Since the measurements are recorded with the
+tracing infrastructure the relevant tracepoints need to be enabled before the
+measurement is triggered.
+
+Example of latency debugging interface:
+In this example a pseudo-locked region named "newlock" was created. Here is
+how we can measure the latency in cycles of reading from this region and
+visualize this data with a histogram that is available if CONFIG_HIST_TRIGGERS
+is set:
+# :> /sys/kernel/debug/tracing/trace
+# echo 'hist:keys=latency' > /sys/kernel/debug/tracing/events/resctrl/pseudo_lock_mem_latency/trigger
+# echo 1 > /sys/kernel/debug/tracing/events/resctrl/pseudo_lock_mem_latency/enable
+# echo 1 > /sys/kernel/debug/resctrl/newlock/pseudo_lock_measure
+# echo 0 > /sys/kernel/debug/tracing/events/resctrl/pseudo_lock_mem_latency/enable
+# cat /sys/kernel/debug/tracing/events/resctrl/pseudo_lock_mem_latency/hist
+
+# event histogram
+#
+# trigger info: hist:keys=latency:vals=hitcount:sort=hitcount:size=2048 [active]
+#
+
+{ latency:        456 } hitcount:          1
+{ latency:         50 } hitcount:         83
+{ latency:         36 } hitcount:         96
+{ latency:         44 } hitcount:        174
+{ latency:         48 } hitcount:        195
+{ latency:         46 } hitcount:        262
+{ latency:         42 } hitcount:        693
+{ latency:         40 } hitcount:       3204
+{ latency:         38 } hitcount:       3484
+
+Totals:
+    Hits: 8192
+    Entries: 9
+   Dropped: 0
+
+Example of cache hits/misses debugging:
+In this example a pseudo-locked region named "newlock" was created on the L2
+cache of a platform. Here is how we can obtain details of the cache hits
+and misses using the platform's precision counters.
+
+# :> /sys/kernel/debug/tracing/trace
+# echo 1 > /sys/kernel/debug/tracing/events/resctrl/pseudo_lock_l2/enable
+# echo 2 > /sys/kernel/debug/resctrl/newlock/pseudo_lock_measure
+# echo 0 > /sys/kernel/debug/tracing/events/resctrl/pseudo_lock_l2/enable
+# cat /sys/kernel/debug/tracing/trace
+
+# tracer: nop
+#
+#                              _-----=> irqs-off
+#                             / _----=> need-resched
+#                            | / _---=> hardirq/softirq
+#                            || / _--=> preempt-depth
+#                            ||| /     delay
+#           TASK-PID   CPU#  ||||    TIMESTAMP  FUNCTION
+#              | |       |   ||||       |         |
+ pseudo_lock_mea-1672  [002] ....  3132.860500: pseudo_lock_l2: hits=4097 miss=0
+
+
 Examples for RDT allocation usage:
 
 Example 1
@@ -502,7 +711,172 @@ siblings and only the real time threads are scheduled on the cores 4-7.
 
 # echo F0 > p0/cpus
 
-4) Locking between applications
+Example 4
+---------
+
+The resource groups in previous examples were all in the default "shareable"
+mode allowing sharing of their cache allocations. If one resource group
+configures a cache allocation then nothing prevents another resource group
+to overlap with that allocation.
+
+In this example a new exclusive resource group will be created on a L2 CAT
+system with two L2 cache instances that can be configured with an 8-bit
+capacity bitmask. The new exclusive resource group will be configured to use
+25% of each cache instance.
+
+# mount -t resctrl resctrl /sys/fs/resctrl/
+# cd /sys/fs/resctrl
+
+First, we observe that the default group is configured to allocate to all L2
+cache:
+
+# cat schemata
+L2:0=ff;1=ff
+
+We could attempt to create the new resource group at this point, but it will
+fail because of the overlap with the schemata of the default group:
+# mkdir p0
+# echo 'L2:0=0x3;1=0x3' > p0/schemata
+# cat p0/mode
+shareable
+# echo exclusive > p0/mode
+-sh: echo: write error: Invalid argument
+# cat info/last_cmd_status
+schemata overlaps
+
+To ensure that there is no overlap with another resource group the default
+resource group's schemata has to change, making it possible for the new
+resource group to become exclusive.
+# echo 'L2:0=0xfc;1=0xfc' > schemata
+# echo exclusive > p0/mode
+# grep . p0/*
+p0/cpus:0
+p0/mode:exclusive
+p0/schemata:L2:0=03;1=03
+p0/size:L2:0=262144;1=262144
+
+A new resource group will on creation not overlap with an exclusive resource
+group:
+# mkdir p1
+# grep . p1/*
+p1/cpus:0
+p1/mode:shareable
+p1/schemata:L2:0=fc;1=fc
+p1/size:L2:0=786432;1=786432
+
+The bit_usage will reflect how the cache is used:
+# cat info/L2/bit_usage
+0=SSSSSSEE;1=SSSSSSEE
+
+A resource group cannot be forced to overlap with an exclusive resource group:
+# echo 'L2:0=0x1;1=0x1' > p1/schemata
+-sh: echo: write error: Invalid argument
+# cat info/last_cmd_status
+overlaps with exclusive group
+
+Example of Cache Pseudo-Locking
+-------------------------------
+Lock portion of L2 cache from cache id 1 using CBM 0x3. Pseudo-locked
+region is exposed at /dev/pseudo_lock/newlock that can be provided to
+application for argument to mmap().
+
+# mount -t resctrl resctrl /sys/fs/resctrl/
+# cd /sys/fs/resctrl
+
+Ensure that there are bits available that can be pseudo-locked, since only
+unused bits can be pseudo-locked the bits to be pseudo-locked needs to be
+removed from the default resource group's schemata:
+# cat info/L2/bit_usage
+0=SSSSSSSS;1=SSSSSSSS
+# echo 'L2:1=0xfc' > schemata
+# cat info/L2/bit_usage
+0=SSSSSSSS;1=SSSSSS00
+
+Create a new resource group that will be associated with the pseudo-locked
+region, indicate that it will be used for a pseudo-locked region, and
+configure the requested pseudo-locked region capacity bitmask:
+
+# mkdir newlock
+# echo pseudo-locksetup > newlock/mode
+# echo 'L2:1=0x3' > newlock/schemata
+
+On success the resource group's mode will change to pseudo-locked, the
+bit_usage will reflect the pseudo-locked region, and the character device
+exposing the pseudo-locked region will exist:
+
+# cat newlock/mode
+pseudo-locked
+# cat info/L2/bit_usage
+0=SSSSSSSS;1=SSSSSSPP
+# ls -l /dev/pseudo_lock/newlock
+crw------- 1 root root 243, 0 Apr  3 05:01 /dev/pseudo_lock/newlock
+
+/*
+ * Example code to access one page of pseudo-locked cache region
+ * from user space.
+ */
+#define _GNU_SOURCE
+#include <fcntl.h>
+#include <sched.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <sys/mman.h>
+
+/*
+ * It is required that the application runs with affinity to only
+ * cores associated with the pseudo-locked region. Here the cpu
+ * is hardcoded for convenience of example.
+ */
+static int cpuid = 2;
+
+int main(int argc, char *argv[])
+{
+       cpu_set_t cpuset;
+       long page_size;
+       void *mapping;
+       int dev_fd;
+       int ret;
+
+       page_size = sysconf(_SC_PAGESIZE);
+
+       CPU_ZERO(&cpuset);
+       CPU_SET(cpuid, &cpuset);
+       ret = sched_setaffinity(0, sizeof(cpuset), &cpuset);
+       if (ret < 0) {
+               perror("sched_setaffinity");
+               exit(EXIT_FAILURE);
+       }
+
+       dev_fd = open("/dev/pseudo_lock/newlock", O_RDWR);
+       if (dev_fd < 0) {
+               perror("open");
+               exit(EXIT_FAILURE);
+       }
+
+       mapping = mmap(0, page_size, PROT_READ | PROT_WRITE, MAP_SHARED,
+                      dev_fd, 0);
+       if (mapping == MAP_FAILED) {
+               perror("mmap");
+               close(dev_fd);
+               exit(EXIT_FAILURE);
+       }
+
+       /* Application interacts with pseudo-locked memory @mapping */
+
+       ret = munmap(mapping, page_size);
+       if (ret < 0) {
+               perror("munmap");
+               close(dev_fd);
+               exit(EXIT_FAILURE);
+       }
+
+       close(dev_fd);
+       exit(EXIT_SUCCESS);
+}
+
+Locking between applications
+----------------------------
 
 Certain operations on the resctrl filesystem, composed of read/writes
 to/from multiple files, must be atomic.
@@ -510,7 +884,7 @@ to/from multiple files, must be atomic.
 As an example, the allocation of an exclusive reservation of L3 cache
 involves:
 
-  1. Read the cbmmasks from each directory
+  1. Read the cbmmasks from each directory or the per-resource "bit_usage"
   2. Find a contiguous set of bits in the global CBM bitmask that is clear
      in any of the directory cbmmasks
   3. Create a new directory
index 8d109ef67ab6bef2d0d575502308d10d5079c85c..ad6d2a80cf05af5590f3acfc5f756fcde4a78a9c 100644 (file)
@@ -92,9 +92,7 @@ APICs
 Timing
 
   notsc
-  Don't use the CPU time stamp counter to read the wall time.
-  This can be used to work around timing problems on multiprocessor systems
-  with not properly synchronized CPUs.
+  Deprecated, use tsc=unstable instead.
 
   nohpet
   Don't use the HPET timer.
@@ -156,6 +154,10 @@ NUMA
                If given as an integer, fills all system RAM with N fake nodes
                interleaved over physical nodes.
 
+  numa=fake=<N>U
+               If given as an integer followed by 'U', it will divide each
+               physical node into N emulated nodes.
+
 ACPI
 
   acpi=off     Don't enable ACPI
index 9d5eeff51b5fd32979f64d288375b6489ff25712..0a2342770dee2e8db0a99178f10dfd0c43fd3768 100644 (file)
@@ -581,7 +581,7 @@ W:  https://www.infradead.org/~dhowells/kafs/
 
 AGPGART DRIVER
 M:     David Airlie <airlied@linux.ie>
-T:     git git://people.freedesktop.org/~airlied/linux (part of drm maint)
+T:     git git://anongit.freedesktop.org/drm/drm
 S:     Maintained
 F:     drivers/char/agp/
 F:     include/linux/agp*
@@ -2523,7 +2523,7 @@ S:        Supported
 F:     drivers/scsi/esas2r
 
 ATUSB IEEE 802.15.4 RADIO DRIVER
-M:     Stefan Schmidt <stefan@osg.samsung.com>
+M:     Stefan Schmidt <stefan@datenfreihafen.org>
 L:     linux-wpan@vger.kernel.org
 S:     Maintained
 F:     drivers/net/ieee802154/atusb.c
@@ -2971,9 +2971,13 @@ N:       bcm585*
 N:     bcm586*
 N:     bcm88312
 N:     hr2
-F:     arch/arm64/boot/dts/broadcom/ns2*
+N:     stingray
+F:     arch/arm64/boot/dts/broadcom/northstar2/*
+F:     arch/arm64/boot/dts/broadcom/stingray/*
 F:     drivers/clk/bcm/clk-ns*
+F:     drivers/clk/bcm/clk-sr*
 F:     drivers/pinctrl/bcm/pinctrl-ns*
+F:     include/dt-bindings/clock/bcm-sr*
 
 BROADCOM KONA GPIO DRIVER
 M:     Ray Jui <rjui@broadcom.com>
@@ -4360,12 +4364,7 @@ L:       iommu@lists.linux-foundation.org
 T:     git git://git.infradead.org/users/hch/dma-mapping.git
 W:     http://git.infradead.org/users/hch/dma-mapping.git
 S:     Supported
-F:     lib/dma-debug.c
-F:     lib/dma-direct.c
-F:     lib/dma-noncoherent.c
-F:     lib/dma-virt.c
-F:     drivers/base/dma-mapping.c
-F:     drivers/base/dma-coherent.c
+F:     kernel/dma/
 F:     include/asm-generic/dma-mapping.h
 F:     include/linux/dma-direct.h
 F:     include/linux/dma-mapping.h
@@ -4461,6 +4460,7 @@ F:        Documentation/blockdev/drbd/
 
 DRIVER CORE, KOBJECTS, DEBUGFS AND SYSFS
 M:     Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+R:     "Rafael J. Wysocki" <rafael@kernel.org>
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core.git
 S:     Supported
 F:     Documentation/kobject.txt
@@ -4631,7 +4631,7 @@ F:        include/uapi/drm/vmwgfx_drm.h
 DRM DRIVERS
 M:     David Airlie <airlied@linux.ie>
 L:     dri-devel@lists.freedesktop.org
-T:     git git://people.freedesktop.org/~airlied/linux
+T:     git git://anongit.freedesktop.org/drm/drm
 B:     https://bugs.freedesktop.org/
 C:     irc://chat.freenode.net/dri-devel
 S:     Maintained
@@ -5444,6 +5444,7 @@ F:        drivers/iommu/exynos-iommu.c
 
 EZchip NPS platform support
 M:     Vineet Gupta <vgupta@synopsys.com>
+M:     Ofer Levi <oferle@mellanox.com>
 S:     Supported
 F:     arch/arc/plat-eznps
 F:     arch/arc/boot/dts/eznps.dts
@@ -5674,7 +5675,7 @@ F:        drivers/crypto/caam/
 F:     Documentation/devicetree/bindings/crypto/fsl-sec4.txt
 
 FREESCALE DIU FRAMEBUFFER DRIVER
-M:     Timur Tabi <timur@tabi.org>
+M:     Timur Tabi <timur@kernel.org>
 L:     linux-fbdev@vger.kernel.org
 S:     Maintained
 F:     drivers/video/fbdev/fsl-diu-fb.*
@@ -5774,7 +5775,7 @@ S:        Maintained
 F:     drivers/net/wan/fsl_ucc_hdlc*
 
 FREESCALE QUICC ENGINE UCC UART DRIVER
-M:     Timur Tabi <timur@tabi.org>
+M:     Timur Tabi <timur@kernel.org>
 L:     linuxppc-dev@lists.ozlabs.org
 S:     Maintained
 F:     drivers/tty/serial/ucc_uart.c
@@ -5790,7 +5791,6 @@ F:        include/linux/fsl/
 
 FREESCALE SOC FS_ENET DRIVER
 M:     Pantelis Antoniou <pantelis.antoniou@gmail.com>
-M:     Vitaly Bordug <vbordug@ru.mvista.com>
 L:     linuxppc-dev@lists.ozlabs.org
 L:     netdev@vger.kernel.org
 S:     Maintained
@@ -5798,7 +5798,7 @@ F:        drivers/net/ethernet/freescale/fs_enet/
 F:     include/linux/fs_enet_pd.h
 
 FREESCALE SOC SOUND DRIVERS
-M:     Timur Tabi <timur@tabi.org>
+M:     Timur Tabi <timur@kernel.org>
 M:     Nicolin Chen <nicoleotsuka@gmail.com>
 M:     Xiubo Li <Xiubo.Lee@gmail.com>
 R:     Fabio Estevam <fabio.estevam@nxp.com>
@@ -5930,7 +5930,7 @@ F:        Documentation/dev-tools/gcov.rst
 
 GDB KERNEL DEBUGGING HELPER SCRIPTS
 M:     Jan Kiszka <jan.kiszka@siemens.com>
-M:     Kieran Bingham <kieran@bingham.xyz>
+M:     Kieran Bingham <kbingham@kernel.org>
 S:     Supported
 F:     scripts/gdb/
 
@@ -6909,7 +6909,7 @@ F:        drivers/clk/clk-versaclock5.c
 
 IEEE 802.15.4 SUBSYSTEM
 M:     Alexander Aring <alex.aring@gmail.com>
-M:     Stefan Schmidt <stefan@osg.samsung.com>
+M:     Stefan Schmidt <stefan@datenfreihafen.org>
 L:     linux-wpan@vger.kernel.org
 W:     http://wpan.cakelab.org/
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/sschmidt/wpan.git
@@ -7096,6 +7096,7 @@ F:        include/uapi/linux/input.h
 F:     include/uapi/linux/input-event-codes.h
 F:     include/linux/input/
 F:     Documentation/devicetree/bindings/input/
+F:     Documentation/devicetree/bindings/serio/
 F:     Documentation/input/
 
 INPUT MULTITOUCH (MT) PROTOCOL
@@ -7985,7 +7986,7 @@ F:        lib/test_kmod.c
 F:     tools/testing/selftests/kmod/
 
 KPROBES
-M:     Ananth N Mavinakayanahalli <ananth@linux.vnet.ibm.com>
+M:     Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
 M:     Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
 M:     "David S. Miller" <davem@davemloft.net>
 M:     Masami Hiramatsu <mhiramat@kernel.org>
@@ -8316,10 +8317,16 @@ M:      Jade Alglave <j.alglave@ucl.ac.uk>
 M:     Luc Maranget <luc.maranget@inria.fr>
 M:     "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
 R:     Akira Yokosawa <akiyks@gmail.com>
+R:     Daniel Lustig <dlustig@nvidia.com>
 L:     linux-kernel@vger.kernel.org
+L:     linux-arch@vger.kernel.org
 S:     Supported
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git
 F:     tools/memory-model/
+F:     Documentation/atomic_bitops.txt
+F:     Documentation/atomic_t.txt
+F:     Documentation/core-api/atomic_ops.rst
+F:     Documentation/core-api/refcount-vs-atomic.rst
 F:     Documentation/memory-barriers.txt
 
 LINUX SECURITY MODULE (LSM) FRAMEWORK
@@ -8629,7 +8636,7 @@ MARVELL MWIFIEX WIRELESS DRIVER
 M:     Amitkumar Karwar <amitkarwar@gmail.com>
 M:     Nishant Sarmukadam <nishants@marvell.com>
 M:     Ganapathi Bhat <gbhat@marvell.com>
-M:     Xinming Hu <huxm@marvell.com>
+M:     Xinming Hu <huxinming820@gmail.com>
 L:     linux-wireless@vger.kernel.org
 S:     Maintained
 F:     drivers/net/wireless/marvell/mwifiex/
@@ -9075,7 +9082,7 @@ S:        Maintained
 F:     drivers/usb/mtu3/
 
 MEGACHIPS STDPXXXX-GE-B850V3-FW LVDS/DP++ BRIDGES
-M:     Peter Senna Tschudin <peter.senna@collabora.com>
+M:     Peter Senna Tschudin <peter.senna@gmail.com>
 M:     Martin Donnelly <martin.donnelly@ge.com>
 M:     Martyn Welch <martyn.welch@collabora.co.uk>
 S:     Maintained
@@ -9756,6 +9763,11 @@ L:       linux-scsi@vger.kernel.org
 S:     Maintained
 F:     drivers/scsi/NCR_D700.*
 
+NCSI LIBRARY:
+M:     Samuel Mendoza-Jonas <sam@mendozajonas.com>
+S:     Maintained
+F:     net/ncsi/
+
 NCT6775 HARDWARE MONITOR DRIVER
 M:     Guenter Roeck <linux@roeck-us.net>
 L:     linux-hwmon@vger.kernel.org
@@ -9882,6 +9894,7 @@ M:        Andrew Lunn <andrew@lunn.ch>
 M:     Vivien Didelot <vivien.didelot@savoirfairelinux.com>
 M:     Florian Fainelli <f.fainelli@gmail.com>
 S:     Maintained
+F:     Documentation/devicetree/bindings/net/dsa/
 F:     net/dsa/
 F:     include/net/dsa.h
 F:     include/linux/dsa/
@@ -10208,11 +10221,13 @@ F:    sound/soc/codecs/sgtl5000*
 
 NXP TDA998X DRM DRIVER
 M:     Russell King <linux@armlinux.org.uk>
-S:     Supported
+S:     Maintained
 T:     git git://git.armlinux.org.uk/~rmk/linux-arm.git drm-tda998x-devel
 T:     git git://git.armlinux.org.uk/~rmk/linux-arm.git drm-tda998x-fixes
 F:     drivers/gpu/drm/i2c/tda998x_drv.c
 F:     include/drm/i2c/tda998x.h
+F:     include/dt-bindings/display/tda998x.h
+K:     "nxp,tda998x"
 
 NXP TFA9879 DRIVER
 M:     Peter Rosin <peda@axentia.se>
@@ -11476,6 +11491,15 @@ W:     http://wireless.kernel.org/en/users/Drivers/p54
 S:     Obsolete
 F:     drivers/net/wireless/intersil/prism54/
 
+PROC FILESYSTEM
+R:     Alexey Dobriyan <adobriyan@gmail.com>
+L:     linux-kernel@vger.kernel.org
+L:     linux-fsdevel@vger.kernel.org
+S:     Maintained
+F:     fs/proc/
+F:     include/linux/proc_fs.h
+F:     tools/testing/selftests/proc/
+
 PROC SYSCTL
 M:     "Luis R. Rodriguez" <mcgrof@kernel.org>
 M:     Kees Cook <keescook@chromium.org>
@@ -11808,9 +11832,9 @@ F:  Documentation/devicetree/bindings/opp/kryo-cpufreq.txt
 F:  drivers/cpufreq/qcom-cpufreq-kryo.c
 
 QUALCOMM EMAC GIGABIT ETHERNET DRIVER
-M:     Timur Tabi <timur@codeaurora.org>
+M:     Timur Tabi <timur@kernel.org>
 L:     netdev@vger.kernel.org
-S:     Supported
+S:     Maintained
 F:     drivers/net/ethernet/qualcomm/emac/
 
 QUALCOMM HEXAGON ARCHITECTURE
@@ -11821,7 +11845,7 @@ S:      Supported
 F:     arch/hexagon/
 
 QUALCOMM HIDMA DRIVER
-M:     Sinan Kaya <okaya@codeaurora.org>
+M:     Sinan Kaya <okaya@kernel.org>
 L:     linux-arm-kernel@lists.infradead.org
 L:     linux-arm-msm@vger.kernel.org
 L:     dmaengine@vger.kernel.org
@@ -12021,9 +12045,9 @@ T:      git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git
 F:     Documentation/RCU/
 X:     Documentation/RCU/torture.txt
 F:     include/linux/rcu*
-X:     include/linux/srcu.h
+X:     include/linux/srcu*.h
 F:     kernel/rcu/
-X:     kernel/torture.c
+X:     kernel/rcu/srcu*.c
 
 REAL TIME CLOCK (RTC) SUBSYSTEM
 M:     Alessandro Zummo <a.zummo@towertech.it>
@@ -12384,7 +12408,6 @@ F:      drivers/pci/hotplug/s390_pci_hpc.c
 
 S390 VFIO-CCW DRIVER
 M:     Cornelia Huck <cohuck@redhat.com>
-M:     Dong Jia Shi <bjsdjshi@linux.ibm.com>
 M:     Halil Pasic <pasic@linux.ibm.com>
 L:     linux-s390@vger.kernel.org
 L:     kvm@vger.kernel.org
@@ -13060,8 +13083,8 @@ L:      linux-kernel@vger.kernel.org
 W:     http://www.rdrop.com/users/paulmck/RCU/
 S:     Supported
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git
-F:     include/linux/srcu.h
-F:     kernel/rcu/srcu.c
+F:     include/linux/srcu*.h
+F:     kernel/rcu/srcu*.c
 
 SERIAL LOW-POWER INTER-CHIP MEDIA BUS (SLIMbus)
 M:     Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
@@ -13648,7 +13671,7 @@ M:      Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
 L:     iommu@lists.linux-foundation.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/konrad/swiotlb.git
 S:     Supported
-F:     lib/swiotlb.c
+F:     kernel/dma/swiotlb.c
 F:     arch/*/kernel/pci-swiotlb.c
 F:     include/linux/swiotlb.h
 
@@ -14420,6 +14443,7 @@ T:      git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git
 F:     Documentation/RCU/torture.txt
 F:     kernel/torture.c
 F:     kernel/rcu/rcutorture.c
+F:     kernel/rcu/rcuperf.c
 F:     kernel/locking/locktorture.c
 
 TOSHIBA ACPI EXTRAS DRIVER
@@ -15572,9 +15596,17 @@ M:     x86@kernel.org
 L:     linux-kernel@vger.kernel.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git x86/core
 S:     Maintained
+F:     Documentation/devicetree/bindings/x86/
 F:     Documentation/x86/
 F:     arch/x86/
 
+X86 ENTRY CODE
+M:     Andy Lutomirski <luto@kernel.org>
+L:     linux-kernel@vger.kernel.org
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git x86/asm
+S:     Maintained
+F:     arch/x86/entry/
+
 X86 MCE INFRASTRUCTURE
 M:     Tony Luck <tony.luck@intel.com>
 M:     Borislav Petkov <bp@alien8.de>
@@ -15597,7 +15629,7 @@ F:      drivers/platform/x86/
 F:     drivers/platform/olpc/
 
 X86 VDSO
-M:     Andy Lutomirski <luto@amacapital.net>
+M:     Andy Lutomirski <luto@kernel.org>
 L:     linux-kernel@vger.kernel.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git x86/vdso
 S:     Maintained
index ca2af1ab91ebadf6ac5c62150b4e72f2a1f1441d..863f58503beed45ca5ec40606a6b313fe4ae6b14 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
 VERSION = 4
 PATCHLEVEL = 18
 SUBLEVEL = 0
-EXTRAVERSION = -rc1
+EXTRAVERSION =
 NAME = Merciless Moray
 
 # *DOCUMENTATION*
@@ -353,9 +353,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
          else if [ -x /bin/bash ]; then echo /bin/bash; \
          else echo sh; fi ; fi)
 
-HOST_LFS_CFLAGS := $(shell getconf LFS_CFLAGS)
-HOST_LFS_LDFLAGS := $(shell getconf LFS_LDFLAGS)
-HOST_LFS_LIBS := $(shell getconf LFS_LIBS)
+HOST_LFS_CFLAGS := $(shell getconf LFS_CFLAGS 2>/dev/null)
+HOST_LFS_LDFLAGS := $(shell getconf LFS_LDFLAGS 2>/dev/null)
+HOST_LFS_LIBS := $(shell getconf LFS_LIBS 2>/dev/null)
 
 HOSTCC       = gcc
 HOSTCXX      = g++
@@ -507,11 +507,6 @@ ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC) $(KBUILD_CFLA
   KBUILD_AFLAGS += -DCC_HAVE_ASM_GOTO
 endif
 
-ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/cc-can-link.sh $(CC)), y)
-  CC_CAN_LINK := y
-  export CC_CAN_LINK
-endif
-
 # The expansion should be delayed until arch/$(SRCARCH)/Makefile is included.
 # Some architectures define CROSS_COMPILE in arch/$(SRCARCH)/Makefile.
 # CC_VERSION_TEXT is referenced from Kconfig (so it needs export),
@@ -1717,6 +1712,6 @@ endif     # skip-makefile
 PHONY += FORCE
 FORCE:
 
-# Declare the contents of the .PHONY variable as phony.  We keep that
+# Declare the contents of the PHONY variable as phony.  We keep that
 # information in a variable so we can use it in if_changed and friends.
 .PHONY: $(PHONY)
index 0c4805a572c8739ff9d657c63961747e3ea08ff3..04a4a138ed131c7256aeb4108453400516b8965a 100644 (file)
@@ -555,11 +555,6 @@ config SMP
 
          If you don't know what to do here, say N.
 
-config HAVE_DEC_LOCK
-       bool
-       depends on SMP
-       default y
-
 config NR_CPUS
        int "Maximum number of CPUs (2-32)"
        range 2 32
index 767bfdd42992de7fb4084dfecb4caa6c13655129..150a1c5d6a2c9b145f51031a300223b0cacd4236 100644 (file)
  * To ensure dependency ordering is preserved for the _relaxed and
  * _release atomics, an smp_read_barrier_depends() is unconditionally
  * inserted into the _relaxed variants, which are used to build the
- * barriered versions. To avoid redundant back-to-back fences, we can
- * define the _acquire and _fence versions explicitly.
+ * barriered versions. Avoid redundant back-to-back fences in the
+ * _acquire and _fence versions.
  */
-#define __atomic_op_acquire(op, args...)       op##_relaxed(args)
-#define __atomic_op_fence                      __atomic_op_release
+#define __atomic_acquire_fence()
+#define __atomic_post_full_fence()
 
 #define ATOMIC_INIT(i)         { (i) }
 #define ATOMIC64_INIT(i)       { (i) }
@@ -206,7 +206,7 @@ ATOMIC_OPS(xor, xor)
 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
 
 /**
- * __atomic_add_unless - add unless the number is a given value
+ * atomic_fetch_add_unless - add unless the number is a given value
  * @v: pointer of type atomic_t
  * @a: the amount to add to v...
  * @u: ...unless v is equal to u.
@@ -214,7 +214,7 @@ ATOMIC_OPS(xor, xor)
  * Atomically adds @a to @v, so long as it was not @u.
  * Returns the old value of @v.
  */
-static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
+static __inline__ int atomic_fetch_add_unless(atomic_t *v, int a, int u)
 {
        int c, new, old;
        smp_mb();
@@ -235,38 +235,39 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
        smp_mb();
        return old;
 }
-
+#define atomic_fetch_add_unless atomic_fetch_add_unless
 
 /**
- * atomic64_add_unless - add unless the number is a given value
+ * atomic64_fetch_add_unless - add unless the number is a given value
  * @v: pointer of type atomic64_t
  * @a: the amount to add to v...
  * @u: ...unless v is equal to u.
  *
  * Atomically adds @a to @v, so long as it was not @u.
- * Returns true iff @v was not @u.
+ * Returns the old value of @v.
  */
-static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
+static __inline__ long atomic64_fetch_add_unless(atomic64_t *v, long a, long u)
 {
-       long c, tmp;
+       long c, new, old;
        smp_mb();
        __asm__ __volatile__(
-       "1:     ldq_l   %[tmp],%[mem]\n"
-       "       cmpeq   %[tmp],%[u],%[c]\n"
-       "       addq    %[tmp],%[a],%[tmp]\n"
+       "1:     ldq_l   %[old],%[mem]\n"
+       "       cmpeq   %[old],%[u],%[c]\n"
+       "       addq    %[old],%[a],%[new]\n"
        "       bne     %[c],2f\n"
-       "       stq_c   %[tmp],%[mem]\n"
-       "       beq     %[tmp],3f\n"
+       "       stq_c   %[new],%[mem]\n"
+       "       beq     %[new],3f\n"
        "2:\n"
        ".subsection 2\n"
        "3:     br      1b\n"
        ".previous"
-       : [tmp] "=&r"(tmp), [c] "=&r"(c)
+       : [old] "=&r"(old), [new] "=&r"(new), [c] "=&r"(c)
        : [mem] "m"(*v), [a] "rI"(a), [u] "rI"(u)
        : "memory");
        smp_mb();
-       return !c;
+       return old;
 }
+#define atomic64_fetch_add_unless atomic64_fetch_add_unless
 
 /*
  * atomic64_dec_if_positive - decrement by 1 if old value positive
@@ -295,31 +296,6 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
        smp_mb();
        return old - 1;
 }
-
-#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
-
-#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
-#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
-
-#define atomic_dec_return(v) atomic_sub_return(1,(v))
-#define atomic64_dec_return(v) atomic64_sub_return(1,(v))
-
-#define atomic_inc_return(v) atomic_add_return(1,(v))
-#define atomic64_inc_return(v) atomic64_add_return(1,(v))
-
-#define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
-#define atomic64_sub_and_test(i,v) (atomic64_sub_return((i), (v)) == 0)
-
-#define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0)
-#define atomic64_inc_and_test(v) (atomic64_add_return(1, (v)) == 0)
-
-#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
-#define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0)
-
-#define atomic_inc(v) atomic_add(1,(v))
-#define atomic64_inc(v) atomic64_add(1,(v))
-
-#define atomic_dec(v) atomic_sub(1,(v))
-#define atomic64_dec(v) atomic64_sub(1,(v))
+#define atomic64_dec_if_positive atomic64_dec_if_positive
 
 #endif /* _ALPHA_ATOMIC_H */
index 6e921754c8fc747be6d6b6b3c28d57d48bddce8d..c210a25dd6daad4a99f40ce729cde4db707de6cf 100644 (file)
@@ -1180,13 +1180,10 @@ SYSCALL_DEFINE2(osf_getrusage, int, who, struct rusage32 __user *, ru)
 SYSCALL_DEFINE4(osf_wait4, pid_t, pid, int __user *, ustatus, int, options,
                struct rusage32 __user *, ur)
 {
-       unsigned int status = 0;
        struct rusage r;
-       long err = kernel_wait4(pid, &status, options, &r);
+       long err = kernel_wait4(pid, ustatus, options, &r);
        if (err <= 0)
                return err;
-       if (put_user(status, ustatus))
-               return -EFAULT;
        if (!ur)
                return err;
        if (put_tv_to_tv32(&ur->ru_utime, &r.ru_utime))
index 04f9729de57c351c7e142b9aab9d9bca0878a2f3..854d5e79979e4ce929d7998bf1135a12880238f8 100644 (file)
@@ -35,8 +35,6 @@ lib-y =       __divqu.o __remqu.o __divlu.o __remlu.o \
        callback_srm.o srm_puts.o srm_printk.o \
        fls.o
 
-lib-$(CONFIG_SMP) += dec_and_lock.o
-
 # The division routines are built from single source, with different defines.
 AFLAGS___divqu.o = -DDIV
 AFLAGS___remqu.o =       -DREM
diff --git a/arch/alpha/lib/dec_and_lock.c b/arch/alpha/lib/dec_and_lock.c
deleted file mode 100644 (file)
index a117707..0000000
+++ /dev/null
@@ -1,44 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * arch/alpha/lib/dec_and_lock.c
- *
- * ll/sc version of atomic_dec_and_lock()
- * 
- */
-
-#include <linux/spinlock.h>
-#include <linux/atomic.h>
-#include <linux/export.h>
-
-  asm (".text                                  \n\
-       .global _atomic_dec_and_lock            \n\
-       .ent _atomic_dec_and_lock               \n\
-       .align  4                               \n\
-_atomic_dec_and_lock:                          \n\
-       .prologue 0                             \n\
-1:     ldl_l   $1, 0($16)                      \n\
-       subl    $1, 1, $1                       \n\
-       beq     $1, 2f                          \n\
-       stl_c   $1, 0($16)                      \n\
-       beq     $1, 4f                          \n\
-       mb                                      \n\
-       clr     $0                              \n\
-       ret                                     \n\
-2:     br      $29, 3f                         \n\
-3:     ldgp    $29, 0($29)                     \n\
-       br      $atomic_dec_and_lock_1..ng      \n\
-       .subsection 2                           \n\
-4:     br      1b                              \n\
-       .previous                               \n\
-       .end _atomic_dec_and_lock");
-
-static int __used atomic_dec_and_lock_1(atomic_t *atomic, spinlock_t *lock)
-{
-       /* Slow path */
-       spin_lock(lock);
-       if (atomic_dec_and_test(atomic))
-               return 1;
-       spin_unlock(lock);
-       return 0;
-}
-EXPORT_SYMBOL(_atomic_dec_and_lock);
index e81bcd271be72e7b1e2bbece5b2b7442ebd7b462..5151d81476a1b709c2c3112765fb3f134ca36083 100644 (file)
@@ -50,6 +50,9 @@ config ARC
        select HAVE_KERNEL_LZMA
        select ARCH_HAS_PTE_SPECIAL
 
+config ARCH_HAS_CACHE_LINE_SIZE
+       def_bool y
+
 config MIGHT_HAVE_PCI
        bool
 
@@ -413,7 +416,7 @@ config ARC_HAS_DIV_REM
 
 config ARC_HAS_ACCL_REGS
        bool "Reg Pair ACCL:ACCH (FPU and/or MPY > 6)"
-       default n
+       default y
        help
          Depending on the configuration, CPU can contain accumulator reg-pair
          (also referred to as r58:r59). These can also be used by gcc as GPR so
index d37f49d6a27f40f65d3e34bd3e2df5343a97d1e4..6c1b20dd76ad902655d7317eb44580923d98c690 100644 (file)
@@ -16,7 +16,7 @@ endif
 
 KBUILD_DEFCONFIG := nsim_700_defconfig
 
-cflags-y       += -fno-common -pipe -fno-builtin -D__linux__
+cflags-y       += -fno-common -pipe -fno-builtin -mmedium-calls -D__linux__
 cflags-$(CONFIG_ISA_ARCOMPACT) += -mA7
 cflags-$(CONFIG_ISA_ARCV2)     += -mcpu=archs
 
@@ -140,16 +140,3 @@ dtbs: scripts
 
 archclean:
        $(Q)$(MAKE) $(clean)=$(boot)
-
-# Hacks to enable final link due to absence of link-time branch relexation
-# and gcc choosing optimal(shorter) branches at -O3
-#
-# vineetg Feb 2010: -mlong-calls switched off for overall kernel build
-# However lib/decompress_inflate.o (.init.text) calls
-# zlib_inflate_workspacesize (.text) causing relocation errors.
-# Thus forcing all exten calls in this file to be long calls
-export CFLAGS_decompress_inflate.o = -mmedium-calls
-export CFLAGS_initramfs.o = -mmedium-calls
-ifdef CONFIG_SMP
-export CFLAGS_core.o = -mmedium-calls
-endif
index 09f85154c5a4bf6609f1dabb4595ac00291d0668..a635ea972304e3531b205c23a0e3ef814608e313 100644 (file)
@@ -11,7 +11,6 @@ CONFIG_NAMESPACES=y
 # CONFIG_UTS_NS is not set
 # CONFIG_PID_NS is not set
 CONFIG_BLK_DEV_INITRD=y
-CONFIG_INITRAMFS_SOURCE="../arc_initramfs/"
 CONFIG_EMBEDDED=y
 CONFIG_PERF_EVENTS=y
 # CONFIG_VM_EVENT_COUNTERS is not set
index 09fed3ef22b6a0c4ea3bcc508b3817c8814b675b..aa507e423075b16be125d95fbb29b55b5b08683c 100644 (file)
@@ -11,7 +11,6 @@ CONFIG_NAMESPACES=y
 # CONFIG_UTS_NS is not set
 # CONFIG_PID_NS is not set
 CONFIG_BLK_DEV_INITRD=y
-CONFIG_INITRAMFS_SOURCE="../../arc_initramfs_hs/"
 CONFIG_EMBEDDED=y
 CONFIG_PERF_EVENTS=y
 # CONFIG_VM_EVENT_COUNTERS is not set
index ea2f6d817d1ae0c241bb63e9b264c09004eeb215..eba07f4686545ed00383756ae53ba404b2b2b25e 100644 (file)
@@ -11,7 +11,6 @@ CONFIG_NAMESPACES=y
 # CONFIG_UTS_NS is not set
 # CONFIG_PID_NS is not set
 CONFIG_BLK_DEV_INITRD=y
-CONFIG_INITRAMFS_SOURCE="../../arc_initramfs_hs/"
 CONFIG_EMBEDDED=y
 CONFIG_PERF_EVENTS=y
 # CONFIG_VM_EVENT_COUNTERS is not set
index ab231c040efe55db40d6811c723f0f44fd601e05..098b19fbaa51f0116e7f0328eb3a17feb72f0123 100644 (file)
@@ -11,7 +11,6 @@ CONFIG_NAMESPACES=y
 # CONFIG_UTS_NS is not set
 # CONFIG_PID_NS is not set
 CONFIG_BLK_DEV_INITRD=y
-CONFIG_INITRAMFS_SOURCE="../../arc_initramfs_hs/"
 CONFIG_EXPERT=y
 CONFIG_PERF_EVENTS=y
 # CONFIG_COMPAT_BRK is not set
index cf449cbf440dfe32c4169b7e64ec9149dd14b692..0104c404d8970ee44ecb0ced17fe137363e4cf5b 100644 (file)
@@ -11,7 +11,6 @@ CONFIG_NAMESPACES=y
 # CONFIG_UTS_NS is not set
 # CONFIG_PID_NS is not set
 CONFIG_BLK_DEV_INITRD=y
-CONFIG_INITRAMFS_SOURCE="../../arc_initramfs_hs/"
 CONFIG_EMBEDDED=y
 CONFIG_PERF_EVENTS=y
 # CONFIG_VM_EVENT_COUNTERS is not set
index 1b54c72f4296fc2a03bd1c70f1550dedbcbc23e9..6491be0ddbc9e9cfd457dccc452d5bebf28c1183 100644 (file)
@@ -9,7 +9,6 @@ CONFIG_NAMESPACES=y
 # CONFIG_UTS_NS is not set
 # CONFIG_PID_NS is not set
 CONFIG_BLK_DEV_INITRD=y
-CONFIG_INITRAMFS_SOURCE="../../arc_initramfs_hs/"
 CONFIG_EMBEDDED=y
 CONFIG_PERF_EVENTS=y
 # CONFIG_VM_EVENT_COUNTERS is not set
index 31c2c70b34a172cf89ba35759593f12777574052..99e05cf63fca2c6d953b952386b0cf1649ae7332 100644 (file)
@@ -11,7 +11,6 @@ CONFIG_NAMESPACES=y
 # CONFIG_UTS_NS is not set
 # CONFIG_PID_NS is not set
 CONFIG_BLK_DEV_INITRD=y
-CONFIG_INITRAMFS_SOURCE="../arc_initramfs/"
 CONFIG_KALLSYMS_ALL=y
 CONFIG_EMBEDDED=y
 CONFIG_PERF_EVENTS=y
index a578c721d50fb62829a02aeaa3b57e9524332734..0dc4f9b737e7a4f48b41ae7caaedaa2ce89c5b40 100644 (file)
@@ -11,7 +11,6 @@ CONFIG_NAMESPACES=y
 # CONFIG_UTS_NS is not set
 # CONFIG_PID_NS is not set
 CONFIG_BLK_DEV_INITRD=y
-CONFIG_INITRAMFS_SOURCE="../../arc_initramfs_hs/"
 CONFIG_KALLSYMS_ALL=y
 CONFIG_EMBEDDED=y
 CONFIG_PERF_EVENTS=y
index 37d7395f3272af75a5b5bbcd4c5d4d0dc8d0e6d8..be3c30a15e54c09db51112ca88fd9b32d73a0d34 100644 (file)
@@ -9,7 +9,6 @@ CONFIG_NAMESPACES=y
 # CONFIG_UTS_NS is not set
 # CONFIG_PID_NS is not set
 CONFIG_BLK_DEV_INITRD=y
-CONFIG_INITRAMFS_SOURCE="../arc_initramfs_hs/"
 CONFIG_KALLSYMS_ALL=y
 CONFIG_EMBEDDED=y
 CONFIG_PERF_EVENTS=y
index 1e1470e2a7f00f558c160fdda5abce07c9441fd7..3a74b9b217723d2c2c75a91510ef1aadeab7b89a 100644 (file)
@@ -11,7 +11,6 @@ CONFIG_NAMESPACES=y
 # CONFIG_UTS_NS is not set
 # CONFIG_PID_NS is not set
 CONFIG_BLK_DEV_INITRD=y
-CONFIG_INITRAMFS_SOURCE="../arc_initramfs/"
 CONFIG_KALLSYMS_ALL=y
 CONFIG_EMBEDDED=y
 CONFIG_PERF_EVENTS=y
index 084a6e42685bfd9aa16c398bd8279ed198ca613e..ea2834b4dc1dad187193549b7b146da413726c37 100644 (file)
@@ -11,7 +11,6 @@ CONFIG_NAMESPACES=y
 # CONFIG_UTS_NS is not set
 # CONFIG_PID_NS is not set
 CONFIG_BLK_DEV_INITRD=y
-CONFIG_INITRAMFS_SOURCE="../arc_initramfs_hs/"
 CONFIG_KALLSYMS_ALL=y
 CONFIG_EMBEDDED=y
 CONFIG_PERF_EVENTS=y
index f36d479904152da8ce167f2874a0b10b68a3975b..80a5a1b4924bcf086ed57c34d7778304288f35a2 100644 (file)
@@ -9,7 +9,6 @@ CONFIG_IKCONFIG_PROC=y
 # CONFIG_UTS_NS is not set
 # CONFIG_PID_NS is not set
 CONFIG_BLK_DEV_INITRD=y
-CONFIG_INITRAMFS_SOURCE="../arc_initramfs_hs/"
 CONFIG_PERF_EVENTS=y
 # CONFIG_COMPAT_BRK is not set
 CONFIG_KPROBES=y
index 1aca2e8fd1ba2fb08b647142ee7eb28a4dc84dfa..2cc87f909747c1818385de9ba99c0bbeda6197b8 100644 (file)
@@ -56,7 +56,6 @@ CONFIG_STMMAC_ETH=y
 # CONFIG_INPUT is not set
 # CONFIG_SERIO is not set
 # CONFIG_VT is not set
-CONFIG_DEVPTS_MULTIPLE_INSTANCES=y
 # CONFIG_LEGACY_PTYS is not set
 # CONFIG_DEVKMEM is not set
 CONFIG_SERIAL_8250=y
index 11859287c52af55df317e7a6d3ab9403706bd172..4e0072730241220c84ddc5019bba91c856de6f34 100644 (file)
@@ -187,7 +187,8 @@ static inline int atomic_fetch_##op(int i, atomic_t *v)                     \
 ATOMIC_OPS(add, +=, add)
 ATOMIC_OPS(sub, -=, sub)
 
-#define atomic_andnot atomic_andnot
+#define atomic_andnot          atomic_andnot
+#define atomic_fetch_andnot    atomic_fetch_andnot
 
 #undef ATOMIC_OPS
 #define ATOMIC_OPS(op, c_op, asm_op)                                   \
@@ -296,8 +297,6 @@ ATOMIC_OPS(add, +=, CTOP_INST_AADD_DI_R2_R2_R3)
        ATOMIC_FETCH_OP(op, c_op, asm_op)
 
 ATOMIC_OPS(and, &=, CTOP_INST_AAND_DI_R2_R2_R3)
-#define atomic_andnot(mask, v) atomic_and(~(mask), (v))
-#define atomic_fetch_andnot(mask, v) atomic_fetch_and(~(mask), (v))
 ATOMIC_OPS(or, |=, CTOP_INST_AOR_DI_R2_R2_R3)
 ATOMIC_OPS(xor, ^=, CTOP_INST_AXOR_DI_R2_R2_R3)
 
@@ -308,48 +307,6 @@ ATOMIC_OPS(xor, ^=, CTOP_INST_AXOR_DI_R2_R2_R3)
 #undef ATOMIC_OP_RETURN
 #undef ATOMIC_OP
 
-/**
- * __atomic_add_unless - add unless the number is a given value
- * @v: pointer of type atomic_t
- * @a: the amount to add to v...
- * @u: ...unless v is equal to u.
- *
- * Atomically adds @a to @v, so long as it was not @u.
- * Returns the old value of @v
- */
-#define __atomic_add_unless(v, a, u)                                   \
-({                                                                     \
-       int c, old;                                                     \
-                                                                       \
-       /*                                                              \
-        * Explicit full memory barrier needed before/after as          \
-        * LLOCK/SCOND thmeselves don't provide any such semantics      \
-        */                                                             \
-       smp_mb();                                                       \
-                                                                       \
-       c = atomic_read(v);                                             \
-       while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c)\
-               c = old;                                                \
-                                                                       \
-       smp_mb();                                                       \
-                                                                       \
-       c;                                                              \
-})
-
-#define atomic_inc_not_zero(v)         atomic_add_unless((v), 1, 0)
-
-#define atomic_inc(v)                  atomic_add(1, v)
-#define atomic_dec(v)                  atomic_sub(1, v)
-
-#define atomic_inc_and_test(v)         (atomic_add_return(1, v) == 0)
-#define atomic_dec_and_test(v)         (atomic_sub_return(1, v) == 0)
-#define atomic_inc_return(v)           atomic_add_return(1, (v))
-#define atomic_dec_return(v)           atomic_sub_return(1, (v))
-#define atomic_sub_and_test(i, v)      (atomic_sub_return(i, v) == 0)
-
-#define atomic_add_negative(i, v)      (atomic_add_return(i, v) < 0)
-
-
 #ifdef CONFIG_GENERIC_ATOMIC64
 
 #include <asm-generic/atomic64.h>
@@ -472,7 +429,8 @@ static inline long long atomic64_fetch_##op(long long a, atomic64_t *v)     \
        ATOMIC64_OP_RETURN(op, op1, op2)                                \
        ATOMIC64_FETCH_OP(op, op1, op2)
 
-#define atomic64_andnot atomic64_andnot
+#define atomic64_andnot                atomic64_andnot
+#define atomic64_fetch_andnot  atomic64_fetch_andnot
 
 ATOMIC64_OPS(add, add.f, adc)
 ATOMIC64_OPS(sub, sub.f, sbc)
@@ -559,53 +517,43 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
 
        return val;
 }
+#define atomic64_dec_if_positive atomic64_dec_if_positive
 
 /**
- * atomic64_add_unless - add unless the number is a given value
+ * atomic64_fetch_add_unless - add unless the number is a given value
  * @v: pointer of type atomic64_t
  * @a: the amount to add to v...
  * @u: ...unless v is equal to u.
  *
- * if (v != u) { v += a; ret = 1} else {ret = 0}
- * Returns 1 iff @v was not @u (i.e. if add actually happened)
+ * Atomically adds @a to @v, if it was not @u.
+ * Returns the old value of @v
  */
-static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
+static inline long long atomic64_fetch_add_unless(atomic64_t *v, long long a,
+                                                 long long u)
 {
-       long long val;
-       int op_done;
+       long long old, temp;
 
        smp_mb();
 
        __asm__ __volatile__(
        "1:     llockd  %0, [%2]        \n"
-       "       mov     %1, 1           \n"
        "       brne    %L0, %L4, 2f    # continue to add since v != u \n"
        "       breq.d  %H0, %H4, 3f    # return since v == u \n"
-       "       mov     %1, 0           \n"
        "2:                             \n"
-       "       add.f   %L0, %L0, %L3   \n"
-       "       adc     %H0, %H0, %H3   \n"
-       "       scondd  %0, [%2]        \n"
+       "       add.f   %L1, %L0, %L3   \n"
+       "       adc     %H1, %H0, %H3   \n"
+       "       scondd  %1, [%2]        \n"
        "       bnz     1b              \n"
        "3:                             \n"
-       : "=&r"(val), "=&r" (op_done)
+       : "=&r"(old), "=&r" (temp)
        : "r"(&v->counter), "r"(a), "r"(u)
        : "cc");        /* memory clobber comes from smp_mb() */
 
        smp_mb();
 
-       return op_done;
+       return old;
 }
-
-#define atomic64_add_negative(a, v)    (atomic64_add_return((a), (v)) < 0)
-#define atomic64_inc(v)                        atomic64_add(1LL, (v))
-#define atomic64_inc_return(v)         atomic64_add_return(1LL, (v))
-#define atomic64_inc_and_test(v)       (atomic64_inc_return(v) == 0)
-#define atomic64_sub_and_test(a, v)    (atomic64_sub_return((a), (v)) == 0)
-#define atomic64_dec(v)                        atomic64_sub(1LL, (v))
-#define atomic64_dec_return(v)         atomic64_sub_return(1LL, (v))
-#define atomic64_dec_and_test(v)       (atomic64_dec_return((v)) == 0)
-#define atomic64_inc_not_zero(v)       atomic64_add_unless((v), 1LL, 0LL)
+#define atomic64_fetch_add_unless atomic64_fetch_add_unless
 
 #endif /* !CONFIG_GENERIC_ATOMIC64 */
 
index 8486f328cc5d2aea087812ab19be949001e64b73..ff7d3232764a29a41503a213d3bd385e232acf42 100644 (file)
@@ -48,7 +48,9 @@
 })
 
 /* Largest line length for either L1 or L2 is 128 bytes */
-#define ARCH_DMA_MINALIGN      128
+#define SMP_CACHE_BYTES                128
+#define cache_line_size()      SMP_CACHE_BYTES
+#define ARCH_DMA_MINALIGN      SMP_CACHE_BYTES
 
 extern void arc_cache_init(void);
 extern char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len);
index d5da2115d78a678e343da2abec51f6c8efbbe0a4..03d6bb0f4e13a2dd49708f12dfb6f2f4788bf8ae 100644 (file)
 #ifndef __ASM_ARC_UDELAY_H
 #define __ASM_ARC_UDELAY_H
 
+#include <asm-generic/types.h>
 #include <asm/param.h>         /* HZ */
 
+extern unsigned long loops_per_jiffy;
+
 static inline void __delay(unsigned long loops)
 {
        __asm__ __volatile__(
index ec36d5b6d435bfca7a05148e05cd1f801c5cdecf..29f3988c94249408f7b3828957c26f6849e025c5 100644 (file)
        POP     gp
        RESTORE_R12_TO_R0
 
+#ifdef CONFIG_ARC_CURR_IN_REG
+       ld      r25, [sp, 12]
+#endif
        ld  sp, [sp] /* restore original sp */
        /* orig_r0, ECR, user_r25 skipped automatically */
 .endm
        POP     gp
        RESTORE_R12_TO_R0
 
+#ifdef CONFIG_ARC_CURR_IN_REG
+       ld      r25, [sp, 12]
+#endif
        ld  sp, [sp] /* restore original sp */
        /* orig_r0, ECR, user_r25 skipped automatically */
 .endm
index 51597f344a62aced8c98cb2035c9ec40eff55e6e..302b0db8ea2bd9afc0d55116e24eb1dcee92eea4 100644 (file)
@@ -86,9 +86,6 @@
        POP     r1
        POP     r0
 
-#ifdef CONFIG_ARC_CURR_IN_REG
-       ld      r25, [sp, 12]
-#endif
 .endm
 
 /*--------------------------------------------------------------
index 2e52d18e6bc7ee3661d055c2ae6d98806478bb50..2c1b479d5aea9d34a3c666118ab0041d5f9ea334 100644 (file)
@@ -45,8 +45,6 @@ struct prev_kprobe {
 
 struct kprobe_ctlblk {
        unsigned int kprobe_status;
-       struct pt_regs jprobe_saved_regs;
-       char jprobes_stack[MAX_STACK_SIZE];
        struct prev_kprobe prev_kprobe;
 };
 
index c28e6c347b4900217ad48053c69679bb3da8b607..871f3cb16af9f2ec58c76192ffc098d914588b9d 100644 (file)
@@ -34,9 +34,7 @@ struct machine_desc {
        const char              *name;
        const char              **dt_compat;
        void                    (*init_early)(void);
-#ifdef CONFIG_SMP
        void                    (*init_per_cpu)(unsigned int);
-#endif
        void                    (*init_machine)(void);
        void                    (*init_late)(void);
 
index 109baa06831cecc38cf1d9f11ba447a31c0c4b14..09ddddf71cc5049a570d11a5114ccec945df56ad 100644 (file)
@@ -105,7 +105,7 @@ typedef pte_t * pgtable_t;
 #define virt_addr_valid(kaddr)  pfn_valid(virt_to_pfn(kaddr))
 
 /* Default Permissions for stack/heaps pages (Non Executable) */
-#define VM_DATA_DEFAULT_FLAGS   (VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE)
+#define VM_DATA_DEFAULT_FLAGS   (VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
 
 #define WANT_PAGE_VIRTUAL   1
 
index 8ec5599a0957e3f2314a63450f46588125d94a82..cf4be70d589259df60bfa2198da9f8c7c0a543c7 100644 (file)
@@ -377,7 +377,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
 
 /* Decode a PTE containing swap "identifier "into constituents */
 #define __swp_type(pte_lookalike)      (((pte_lookalike).val) & 0x1f)
-#define __swp_offset(pte_lookalike)    ((pte_lookalike).val << 13)
+#define __swp_offset(pte_lookalike)    ((pte_lookalike).val >> 13)
 
 /* NOPs, to keep generic kernel happy */
 #define __pte_to_swp_entry(pte)        ((swp_entry_t) { pte_val(pte) })
index 538b36afe89e7c9871e2c37d2322d839a9b27a26..62b185057c040157132386aaee6ff56eaaebcf25 100644 (file)
@@ -31,10 +31,10 @@ void __init init_IRQ(void)
        /* a SMP H/w block could do IPI IRQ request here */
        if (plat_smp_ops.init_per_cpu)
                plat_smp_ops.init_per_cpu(smp_processor_id());
+#endif
 
        if (machine_desc->init_per_cpu)
                machine_desc->init_per_cpu(smp_processor_id());
-#endif
 }
 
 /*
index 42b05046fad9f13b3cffecc1cb041e8febed3840..df35d4c0b0b84f9490d70010384bde7f9ec164c8 100644 (file)
@@ -225,24 +225,18 @@ int __kprobes arc_kprobe_handler(unsigned long addr, struct pt_regs *regs)
 
                /* If we have no pre-handler or it returned 0, we continue with
                 * normal processing. If we have a pre-handler and it returned
-                * non-zero - which is expected from setjmp_pre_handler for
-                * jprobe, we return without single stepping and leave that to
-                * the break-handler which is invoked by a kprobe from
-                * jprobe_return
+                * non-zero - which means user handler setup registers to exit
+                * to another instruction, we must skip the single stepping.
                 */
                if (!p->pre_handler || !p->pre_handler(p, regs)) {
                        setup_singlestep(p, regs);
                        kcb->kprobe_status = KPROBE_HIT_SS;
+               } else {
+                       reset_current_kprobe();
+                       preempt_enable_no_resched();
                }
 
                return 1;
-       } else if (kprobe_running()) {
-               p = __this_cpu_read(current_kprobe);
-               if (p->break_handler && p->break_handler(p, regs)) {
-                       setup_singlestep(p, regs);
-                       kcb->kprobe_status = KPROBE_HIT_SS;
-                       return 1;
-               }
        }
 
        /* no_kprobe: */
@@ -386,38 +380,6 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
        return ret;
 }
 
-int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
-{
-       struct jprobe *jp = container_of(p, struct jprobe, kp);
-       struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
-       unsigned long sp_addr = regs->sp;
-
-       kcb->jprobe_saved_regs = *regs;
-       memcpy(kcb->jprobes_stack, (void *)sp_addr, MIN_STACK_SIZE(sp_addr));
-       regs->ret = (unsigned long)(jp->entry);
-
-       return 1;
-}
-
-void __kprobes jprobe_return(void)
-{
-       __asm__ __volatile__("unimp_s");
-       return;
-}
-
-int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
-{
-       struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
-       unsigned long sp_addr;
-
-       *regs = kcb->jprobe_saved_regs;
-       sp_addr = regs->sp;
-       memcpy((void *)sp_addr, kcb->jprobes_stack, MIN_STACK_SIZE(sp_addr));
-       preempt_enable_no_resched();
-
-       return 1;
-}
-
 static void __used kretprobe_trampoline_holder(void)
 {
        __asm__ __volatile__(".global kretprobe_trampoline\n"
@@ -483,9 +445,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
        kretprobe_assert(ri, orig_ret_address, trampoline_address);
        regs->ret = orig_ret_address;
 
-       reset_current_kprobe();
        kretprobe_hash_unlock(current, &flags);
-       preempt_enable_no_resched();
 
        hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
                hlist_del(&ri->hlist);
index 5ac3b547453fd5b4b5393fdc10dfff34a047941f..4674541eba3fd019a51aeb02db27b2bc04569412 100644 (file)
@@ -47,7 +47,8 @@ SYSCALL_DEFINE0(arc_gettls)
 SYSCALL_DEFINE3(arc_usr_cmpxchg, int *, uaddr, int, expected, int, new)
 {
        struct pt_regs *regs = current_pt_regs();
-       int uval = -EFAULT;
+       u32 uval;
+       int ret;
 
        /*
         * This is only for old cores lacking LLOCK/SCOND, which by defintion
@@ -60,23 +61,47 @@ SYSCALL_DEFINE3(arc_usr_cmpxchg, int *, uaddr, int, expected, int, new)
        /* Z indicates to userspace if operation succeded */
        regs->status32 &= ~STATUS_Z_MASK;
 
-       if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
-               return -EFAULT;
+       ret = access_ok(VERIFY_WRITE, uaddr, sizeof(*uaddr));
+       if (!ret)
+                goto fail;
 
+again:
        preempt_disable();
 
-       if (__get_user(uval, uaddr))
-               goto done;
+       ret = __get_user(uval, uaddr);
+       if (ret)
+                goto fault;
 
-       if (uval == expected) {
-               if (!__put_user(new, uaddr))
-                       regs->status32 |= STATUS_Z_MASK;
-       }
+       if (uval != expected)
+                goto out;
 
-done:
-       preempt_enable();
+       ret = __put_user(new, uaddr);
+       if (ret)
+                goto fault;
+
+       regs->status32 |= STATUS_Z_MASK;
 
+out:
+       preempt_enable();
        return uval;
+
+fault:
+       preempt_enable();
+
+       if (unlikely(ret != -EFAULT))
+                goto fail;
+
+       down_read(&current->mm->mmap_sem);
+       ret = fixup_user_fault(current, current->mm, (unsigned long) uaddr,
+                              FAULT_FLAG_WRITE, NULL);
+       up_read(&current->mm->mmap_sem);
+
+       if (likely(!ret))
+                goto again;
+
+fail:
+       force_sig(SIGSEGV, current);
+       return ret;
 }
 
 #ifdef CONFIG_ISA_ARCV2
index 9dbe645ee127ea10f831654f6a558a9645c71632..25c631942500ffe2802654f6690d9a223e2fbfaf 100644 (file)
@@ -1038,7 +1038,7 @@ void flush_cache_mm(struct mm_struct *mm)
 void flush_cache_page(struct vm_area_struct *vma, unsigned long u_vaddr,
                      unsigned long pfn)
 {
-       unsigned int paddr = pfn << PAGE_SHIFT;
+       phys_addr_t paddr = pfn << PAGE_SHIFT;
 
        u_vaddr &= PAGE_MASK;
 
@@ -1058,8 +1058,9 @@ void flush_anon_page(struct vm_area_struct *vma, struct page *page,
                     unsigned long u_vaddr)
 {
        /* TBD: do we really need to clear the kernel mapping */
-       __flush_dcache_page(page_address(page), u_vaddr);
-       __flush_dcache_page(page_address(page), page_address(page));
+       __flush_dcache_page((phys_addr_t)page_address(page), u_vaddr);
+       __flush_dcache_page((phys_addr_t)page_address(page),
+                           (phys_addr_t)page_address(page));
 
 }
 
@@ -1246,6 +1247,16 @@ void __init arc_cache_init_master(void)
                }
        }
 
+       /*
+        * Check that SMP_CACHE_BYTES (and hence ARCH_DMA_MINALIGN) is larger
+        * or equal to any cache line length.
+        */
+       BUILD_BUG_ON_MSG(L1_CACHE_BYTES > SMP_CACHE_BYTES,
+                        "SMP_CACHE_BYTES must be >= any cache line length");
+       if (is_isa_arcv2() && (l2_line_sz > SMP_CACHE_BYTES))
+               panic("L2 Cache line [%d] > kernel Config [%d]\n",
+                     l2_line_sz, SMP_CACHE_BYTES);
+
        /* Note that SLC disable not formally supported till HS 3.0 */
        if (is_isa_arcv2() && l2_line_sz && !slc_enable)
                arc_slc_disable();
index 8c10718409795a23261da9b4f479fd435e52db0a..ec47e6079f5d08371a65ea21277b2985bec989d5 100644 (file)
@@ -129,14 +129,59 @@ int arch_dma_mmap(struct device *dev, struct vm_area_struct *vma,
        return ret;
 }
 
+/*
+ * Cache operations depending on function and direction argument, inspired by
+ * https://lkml.org/lkml/2018/5/18/979
+ * "dma_sync_*_for_cpu and direction=TO_DEVICE (was Re: [PATCH 02/20]
+ * dma-mapping: provide a generic dma-noncoherent implementation)"
+ *
+ *          |   map          ==  for_device     |   unmap     ==  for_cpu
+ *          |----------------------------------------------------------------
+ * TO_DEV   |   writeback        writeback      |   none          none
+ * FROM_DEV |   invalidate       invalidate     |   invalidate*   invalidate*
+ * BIDIR    |   writeback+inv    writeback+inv  |   invalidate    invalidate
+ *
+ *     [*] needed for CPU speculative prefetches
+ *
+ * NOTE: we don't check the validity of direction argument as it is done in
+ * upper layer functions (in include/linux/dma-mapping.h)
+ */
+
 void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
                size_t size, enum dma_data_direction dir)
 {
-       dma_cache_wback(paddr, size);
+       switch (dir) {
+       case DMA_TO_DEVICE:
+               dma_cache_wback(paddr, size);
+               break;
+
+       case DMA_FROM_DEVICE:
+               dma_cache_inv(paddr, size);
+               break;
+
+       case DMA_BIDIRECTIONAL:
+               dma_cache_wback_inv(paddr, size);
+               break;
+
+       default:
+               break;
+       }
 }
 
 void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
                size_t size, enum dma_data_direction dir)
 {
-       dma_cache_inv(paddr, size);
+       switch (dir) {
+       case DMA_TO_DEVICE:
+               break;
+
+       /* FROM_DEVICE invalidate needed if speculative CPU prefetch only */
+       case DMA_FROM_DEVICE:
+       case DMA_BIDIRECTIONAL:
+               dma_cache_inv(paddr, size);
+               break;
+
+       default:
+               break;
+       }
 }
index 0c7d11022d0f8875256e64162d2ee0f1f0aa85e1..4f6a1673b3a6eaacc80473108dec4cc8c1e4236c 100644 (file)
@@ -21,6 +21,7 @@
 #error "Incorrect ctop.h include"
 #endif
 
+#include <linux/types.h>
 #include <soc/nps/common.h>
 
 /* core auxiliary registers */
@@ -143,6 +144,15 @@ struct nps_host_reg_gim_p_int_dst {
 };
 
 /* AUX registers definition */
+struct nps_host_reg_aux_dpc {
+       union {
+               struct {
+                       u32 ien:1, men:1, hen:1, reserved:29;
+               };
+               u32 value;
+       };
+};
+
 struct nps_host_reg_aux_udmc {
        union {
                struct {
index 2388de3d09ef9e71f710ec4625e20a9e85cbdc1e..ed0077ef666eb7bdb8930bb2b52eddbe46f0b946 100644 (file)
@@ -15,6 +15,8 @@
  */
 
 #include <linux/smp.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
 #include <linux/io.h>
 #include <linux/log2.h>
 #include <asm/arcregs.h>
@@ -157,10 +159,10 @@ void mtm_enable_core(unsigned int cpu)
 /* Verify and set the value of the mtm hs counter */
 static int __init set_mtm_hs_ctr(char *ctr_str)
 {
-       long hs_ctr;
+       int hs_ctr;
        int ret;
 
-       ret = kstrtol(ctr_str, 0, &hs_ctr);
+       ret = kstrtoint(ctr_str, 0, &hs_ctr);
 
        if (ret || hs_ctr > MT_HS_CNT_MAX || hs_ctr < MT_HS_CNT_MIN) {
                pr_err("** Invalid @nps_mtm_hs_ctr [%d] needs to be [%d:%d] (incl)\n",
index 19ab3cf98f0f34904b8431a6d4cf36642066c513..9356753c2ed83fc8f9ee7dbf55f173401499e2e4 100644 (file)
@@ -7,5 +7,8 @@
 
 menuconfig ARC_SOC_HSDK
        bool "ARC HS Development Kit SOC"
+       depends on ISA_ARCV2
+       select ARC_HAS_ACCL_REGS
        select CLK_HSDK
        select RESET_HSDK
+       select MIGHT_HAVE_PCI
index 2958aedb649ab183edcce1ca858006f67fd8ff21..2588b842407c281df0051b814fefd3cfcd9c31fe 100644 (file)
@@ -42,6 +42,66 @@ static void __init hsdk_init_per_cpu(unsigned int cpu)
 #define SDIO_UHS_REG_EXT       (SDIO_BASE + 0x108)
 #define SDIO_UHS_REG_EXT_DIV_2 (2 << 30)
 
+#define HSDK_GPIO_INTC          (ARC_PERIPHERAL_BASE + 0x3000)
+
+static void __init hsdk_enable_gpio_intc_wire(void)
+{
+       /*
+        * Peripherals on CPU Card are wired to cpu intc via intermediate
+        * DW APB GPIO blocks (mainly for debouncing)
+        *
+        *         ---------------------
+        *        |  snps,archs-intc  |
+        *        ---------------------
+        *                  |
+        *        ----------------------
+        *        | snps,archs-idu-intc |
+        *        ----------------------
+        *         |   |     |   |    |
+        *         | [eth] [USB]    [... other peripherals]
+        *         |
+        * -------------------
+        * | snps,dw-apb-intc |
+        * -------------------
+        *  |      |   |   |
+        * [Bt] [HAPS]   [... other peripherals]
+        *
+        * Current implementation of "irq-dw-apb-ictl" driver doesn't work well
+        * with stacked INTCs. In particular problem happens if its master INTC
+        * not yet instantiated. See discussion here -
+        * https://lkml.org/lkml/2015/3/4/755
+        *
+        * So setup the first gpio block as a passive pass thru and hide it from
+        * DT hardware topology - connect intc directly to cpu intc
+        * The GPIO "wire" needs to be init nevertheless (here)
+        *
+        * One side adv is that peripheral interrupt handling avoids one nested
+        * intc ISR hop
+        *
+        * According to HSDK User's Manual [1], "Table 2 Interrupt Mapping"
+        * we have the following GPIO input lines used as sources of interrupt:
+        * - GPIO[0] - Bluetooth interrupt of RS9113 module
+        * - GPIO[2] - HAPS interrupt (on HapsTrak 3 connector)
+        * - GPIO[3] - Audio codec (MAX9880A) interrupt
+        * - GPIO[8-23] - Available on Arduino and PMOD_x headers
+        * For now there's no use of Arduino and PMOD_x headers in Linux
+        * use-case so we only enable lines 0, 2 and 3.
+        *
+        * [1] https://github.com/foss-for-synopsys-dwc-arc-processors/ARC-Development-Systems-Forum/wiki/docs/ARC_HSDK_User_Guide.pdf
+        */
+#define GPIO_INTEN              (HSDK_GPIO_INTC + 0x30)
+#define GPIO_INTMASK            (HSDK_GPIO_INTC + 0x34)
+#define GPIO_INTTYPE_LEVEL      (HSDK_GPIO_INTC + 0x38)
+#define GPIO_INT_POLARITY       (HSDK_GPIO_INTC + 0x3c)
+#define GPIO_INT_CONNECTED_MASK        0x0d
+
+       iowrite32(0xffffffff, (void __iomem *) GPIO_INTMASK);
+       iowrite32(~GPIO_INT_CONNECTED_MASK, (void __iomem *) GPIO_INTMASK);
+       iowrite32(0x00000000, (void __iomem *) GPIO_INTTYPE_LEVEL);
+       iowrite32(0xffffffff, (void __iomem *) GPIO_INT_POLARITY);
+       iowrite32(GPIO_INT_CONNECTED_MASK, (void __iomem *) GPIO_INTEN);
+}
+
 static void __init hsdk_init_early(void)
 {
        /*
@@ -62,6 +122,8 @@ static void __init hsdk_init_early(void)
         * minimum possible div-by-2.
         */
        iowrite32(SDIO_UHS_REG_EXT_DIV_2, (void __iomem *) SDIO_UHS_REG_EXT);
+
+       hsdk_enable_gpio_intc_wire();
 }
 
 static const char *hsdk_compat[] __initconst = {
index 54eeb8d00bc62a9f818aa9a833cbc15e7a1d9324..0f328d639d51d5f34156d112c8cfa521b9e24404 100644 (file)
@@ -9,6 +9,7 @@ config ARM
        select ARCH_HAS_ELF_RANDOMIZE
        select ARCH_HAS_FORTIFY_SOURCE
        select ARCH_HAS_KCOV
+       select ARCH_HAS_MEMBARRIER_SYNC_CORE
        select ARCH_HAS_PTE_SPECIAL if ARM_LPAE
        select ARCH_HAS_PHYS_TO_DMA
        select ARCH_HAS_SET_MEMORY
@@ -337,8 +338,8 @@ config ARCH_MULTIPLATFORM
        select TIMER_OF
        select COMMON_CLK
        select GENERIC_CLOCKEVENTS
+       select GENERIC_IRQ_MULTI_HANDLER
        select MIGHT_HAVE_PCI
-       select MULTI_IRQ_HANDLER
        select PCI_DOMAINS if PCI
        select SPARSE_IRQ
        select USE_OF
@@ -465,9 +466,9 @@ config ARCH_DOVE
        bool "Marvell Dove"
        select CPU_PJ4
        select GENERIC_CLOCKEVENTS
+       select GENERIC_IRQ_MULTI_HANDLER
        select GPIOLIB
        select MIGHT_HAVE_PCI
-       select MULTI_IRQ_HANDLER
        select MVEBU_MBUS
        select PINCTRL
        select PINCTRL_DOVE
@@ -512,8 +513,8 @@ config ARCH_LPC32XX
        select COMMON_CLK
        select CPU_ARM926T
        select GENERIC_CLOCKEVENTS
+       select GENERIC_IRQ_MULTI_HANDLER
        select GPIOLIB
-       select MULTI_IRQ_HANDLER
        select SPARSE_IRQ
        select USE_OF
        help
@@ -532,11 +533,11 @@ config ARCH_PXA
        select TIMER_OF
        select CPU_XSCALE if !CPU_XSC3
        select GENERIC_CLOCKEVENTS
+       select GENERIC_IRQ_MULTI_HANDLER
        select GPIO_PXA
        select GPIOLIB
        select HAVE_IDE
        select IRQ_DOMAIN
-       select MULTI_IRQ_HANDLER
        select PLAT_PXA
        select SPARSE_IRQ
        help
@@ -572,11 +573,11 @@ config ARCH_SA1100
        select CPU_FREQ
        select CPU_SA1100
        select GENERIC_CLOCKEVENTS
+       select GENERIC_IRQ_MULTI_HANDLER
        select GPIOLIB
        select HAVE_IDE
        select IRQ_DOMAIN
        select ISA
-       select MULTI_IRQ_HANDLER
        select NEED_MACH_MEMORY_H
        select SPARSE_IRQ
        help
@@ -590,10 +591,10 @@ config ARCH_S3C24XX
        select GENERIC_CLOCKEVENTS
        select GPIO_SAMSUNG
        select GPIOLIB
+       select GENERIC_IRQ_MULTI_HANDLER
        select HAVE_S3C2410_I2C if I2C
        select HAVE_S3C2410_WATCHDOG if WATCHDOG
        select HAVE_S3C_RTC if RTC_CLASS
-       select MULTI_IRQ_HANDLER
        select NEED_MACH_IO_H
        select SAMSUNG_ATAGS
        select USE_OF
@@ -627,10 +628,10 @@ config ARCH_OMAP1
        select CLKSRC_MMIO
        select GENERIC_CLOCKEVENTS
        select GENERIC_IRQ_CHIP
+       select GENERIC_IRQ_MULTI_HANDLER
        select GPIOLIB
        select HAVE_IDE
        select IRQ_DOMAIN
-       select MULTI_IRQ_HANDLER
        select NEED_MACH_IO_H if PCCARD
        select NEED_MACH_MEMORY_H
        select SPARSE_IRQ
@@ -921,11 +922,6 @@ config IWMMXT
          Enable support for iWMMXt context switching at run time if
          running on a CPU that supports it.
 
-config MULTI_IRQ_HANDLER
-       bool
-       help
-         Allow each machine to specify it's own IRQ handler at run time.
-
 if !MMU
 source "arch/arm/Kconfig-nommu"
 endif
@@ -1245,8 +1241,14 @@ config PCI
          VESA. If you have PCI, say Y, otherwise N.
 
 config PCI_DOMAINS
-       bool
+       bool "Support for multiple PCI domains"
        depends on PCI
+       help
+         Enable PCI domains kernel management. Say Y if your machine
+         has a PCI bus hierarchy that requires more than one PCI
+         domain (aka segment) to be correctly managed. Say N otherwise.
+
+         If you don't know what to do here, say N.
 
 config PCI_DOMAINS_GENERIC
        def_bool PCI_DOMAINS
index fc26c3d7b9b66a1ebc52a6761441f7a1da9fb422..62ebeae9f837d8b302c72f79ccb0957fc184e89c 100644 (file)
@@ -46,12 +46,12 @@ ifeq ($(CONFIG_CPU_BIG_ENDIAN),y)
 KBUILD_CPPFLAGS        += -mbig-endian
 CHECKFLAGS     += -D__ARMEB__
 AS             += -EB
-LD             += -EB
+LDFLAGS                += -EB
 else
 KBUILD_CPPFLAGS        += -mlittle-endian
 CHECKFLAGS     += -D__ARMEL__
 AS             += -EL
-LD             += -EL
+LDFLAGS                += -EL
 endif
 
 #
index f9e8667f5886db82027643130ca87b58cbea8f62..73b514dddf65b281b0c3093f40b05496240b5455 100644 (file)
                        AM33XX_IOPAD(0x8f0, PIN_INPUT_PULLUP | MUX_MODE0)       /* mmc0_dat3.mmc0_dat3 */
                        AM33XX_IOPAD(0x904, PIN_INPUT_PULLUP | MUX_MODE0)       /* mmc0_cmd.mmc0_cmd */
                        AM33XX_IOPAD(0x900, PIN_INPUT_PULLUP | MUX_MODE0)       /* mmc0_clk.mmc0_clk */
-                       AM33XX_IOPAD(0x9a0, PIN_INPUT | MUX_MODE4)              /* mcasp0_aclkr.mmc0_sdwp */
                >;
        };
 
index ca294914bbb131b9725c43b8e7c768466bf0c775..23ea381d363fd12e6d9ac7f08e8613f1bf12443e 100644 (file)
@@ -39,6 +39,8 @@
                        ti,davinci-ctrl-ram-size = <0x2000>;
                        ti,davinci-rmii-en = /bits/ 8 <1>;
                        local-mac-address = [ 00 00 00 00 00 00 ];
+                       clocks = <&emac_ick>;
+                       clock-names = "ick";
                };
 
                davinci_mdio: ethernet@5c030000 {
@@ -49,6 +51,8 @@
                        bus_freq = <1000000>;
                        #address-cells = <1>;
                        #size-cells = <0>;
+                       clocks = <&emac_fck>;
+                       clock-names = "fck";
                };
 
                uart4: serial@4809e000 {
        };
 };
 
+/* Table Table 5-79 of the TRM shows 480ab000 is reserved */
+&usb_otg_hs {
+       status = "disabled";
+};
+
 &iva {
        status = "disabled";
 };
index 440351ad0b80686d06126df39b02d8de104a95d0..d4be3fd0b6f4094643ef98660e0f2dbcb5edca9d 100644 (file)
 
                touchscreen-size-x = <480>;
                touchscreen-size-y = <272>;
+
+               wakeup-source;
        };
 
        tlv320aic3106: tlv320aic3106@1b {
index 6782ce481ac967ded05bbfc124355aa894790d6f..d8769956cbfcff7b4a38e72959ba8b88f397a1c0 100644 (file)
                                              3700 5
                                              3900 6
                                              4000 7>;
-                       cooling-cells = <2>;
+                       #cooling-cells = <2>;
                };
 
                gpio-leds {
index 18edc9bc79273b794ce263ce7d9674f43732c58e..929459c42760592c00d4ea0d585be4da2ad7e301 100644 (file)
 
                        thermal: thermal@e8078 {
                                compatible = "marvell,armada380-thermal";
-                               reg = <0xe4078 0x4>, <0xe4074 0x4>;
+                               reg = <0xe4078 0x4>, <0xe4070 0x8>;
                                status = "okay";
                        };
 
index 9fe4f5a6379e3b60d79a6ed8a0327f680434861e..2c4df2d2d4a6e1165fe27565a19d681c47a32cfa 100644 (file)
                        reg = <0x18008000 0x100>;
                        #address-cells = <1>;
                        #size-cells = <0>;
-                       interrupts = <GIC_SPI 85 IRQ_TYPE_NONE>;
+                       interrupts = <GIC_SPI 85 IRQ_TYPE_LEVEL_HIGH>;
                        clock-frequency = <100000>;
                        status = "disabled";
                };
                        reg = <0x1800b000 0x100>;
                        #address-cells = <1>;
                        #size-cells = <0>;
-                       interrupts = <GIC_SPI 86 IRQ_TYPE_NONE>;
+                       interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>;
                        clock-frequency = <100000>;
                        status = "disabled";
                };
 
                        #interrupt-cells = <1>;
                        interrupt-map-mask = <0 0 0 0>;
-                       interrupt-map = <0 0 0 0 &gic GIC_SPI 100 IRQ_TYPE_NONE>;
+                       interrupt-map = <0 0 0 0 &gic GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>;
 
                        linux,pci-domain = <0>;
 
                                compatible = "brcm,iproc-msi";
                                msi-controller;
                                interrupt-parent = <&gic>;
-                               interrupts = <GIC_SPI 96 IRQ_TYPE_NONE>,
-                                            <GIC_SPI 97 IRQ_TYPE_NONE>,
-                                            <GIC_SPI 98 IRQ_TYPE_NONE>,
-                                            <GIC_SPI 99 IRQ_TYPE_NONE>;
+                               interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>,
+                                            <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>,
+                                            <GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>,
+                                            <GIC_SPI 99 IRQ_TYPE_LEVEL_HIGH>;
                        };
                };
 
 
                        #interrupt-cells = <1>;
                        interrupt-map-mask = <0 0 0 0>;
-                       interrupt-map = <0 0 0 0 &gic GIC_SPI 106 IRQ_TYPE_NONE>;
+                       interrupt-map = <0 0 0 0 &gic GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>;
 
                        linux,pci-domain = <1>;
 
                                compatible = "brcm,iproc-msi";
                                msi-controller;
                                interrupt-parent = <&gic>;
-                               interrupts = <GIC_SPI 102 IRQ_TYPE_NONE>,
-                                            <GIC_SPI 103 IRQ_TYPE_NONE>,
-                                            <GIC_SPI 104 IRQ_TYPE_NONE>,
-                                            <GIC_SPI 105 IRQ_TYPE_NONE>;
+                               interrupts = <GIC_SPI 102 IRQ_TYPE_LEVEL_HIGH>,
+                                            <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>,
+                                            <GIC_SPI 104 IRQ_TYPE_LEVEL_HIGH>,
+                                            <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>;
                        };
                };
 
index 3f9cedd8011f0c22fb05b6a50d1705fc5ceab05d..3084a7c957339f0edc2fef97d203b08635c96790 100644 (file)
                        reg = <0x38000 0x50>;
                        #address-cells = <1>;
                        #size-cells = <0>;
-                       interrupts = <GIC_SPI 95 IRQ_TYPE_NONE>;
+                       interrupts = <GIC_SPI 95 IRQ_TYPE_LEVEL_HIGH>;
                        clock-frequency = <100000>;
                };
 
                        reg = <0x3b000 0x50>;
                        #address-cells = <1>;
                        #size-cells = <0>;
-                       interrupts = <GIC_SPI 96 IRQ_TYPE_NONE>;
+                       interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>;
                        clock-frequency = <100000>;
                };
        };
 
                #interrupt-cells = <1>;
                interrupt-map-mask = <0 0 0 0>;
-               interrupt-map = <0 0 0 0 &gic GIC_SPI 186 IRQ_TYPE_NONE>;
+               interrupt-map = <0 0 0 0 &gic GIC_SPI 186 IRQ_TYPE_LEVEL_HIGH>;
 
                linux,pci-domain = <0>;
 
                        compatible = "brcm,iproc-msi";
                        msi-controller;
                        interrupt-parent = <&gic>;
-                       interrupts = <GIC_SPI 182 IRQ_TYPE_NONE>,
-                                    <GIC_SPI 183 IRQ_TYPE_NONE>,
-                                    <GIC_SPI 184 IRQ_TYPE_NONE>,
-                                    <GIC_SPI 185 IRQ_TYPE_NONE>;
+                       interrupts = <GIC_SPI 182 IRQ_TYPE_LEVEL_HIGH>,
+                                    <GIC_SPI 183 IRQ_TYPE_LEVEL_HIGH>,
+                                    <GIC_SPI 184 IRQ_TYPE_LEVEL_HIGH>,
+                                    <GIC_SPI 185 IRQ_TYPE_LEVEL_HIGH>;
                        brcm,pcie-msi-inten;
                };
        };
 
                #interrupt-cells = <1>;
                interrupt-map-mask = <0 0 0 0>;
-               interrupt-map = <0 0 0 0 &gic GIC_SPI 192 IRQ_TYPE_NONE>;
+               interrupt-map = <0 0 0 0 &gic GIC_SPI 192 IRQ_TYPE_LEVEL_HIGH>;
 
                linux,pci-domain = <1>;
 
                        compatible = "brcm,iproc-msi";
                        msi-controller;
                        interrupt-parent = <&gic>;
-                       interrupts = <GIC_SPI 188 IRQ_TYPE_NONE>,
-                                    <GIC_SPI 189 IRQ_TYPE_NONE>,
-                                    <GIC_SPI 190 IRQ_TYPE_NONE>,
-                                    <GIC_SPI 191 IRQ_TYPE_NONE>;
+                       interrupts = <GIC_SPI 188 IRQ_TYPE_LEVEL_HIGH>,
+                                    <GIC_SPI 189 IRQ_TYPE_LEVEL_HIGH>,
+                                    <GIC_SPI 190 IRQ_TYPE_LEVEL_HIGH>,
+                                    <GIC_SPI 191 IRQ_TYPE_LEVEL_HIGH>;
                        brcm,pcie-msi-inten;
                };
        };
index dcc55aa84583cdd18f7ef6ecd780eb947be1ef1f..09ba8504632284532e3b17c6d1531e2d732fadc4 100644 (file)
                        reg = <0x38000 0x50>;
                        #address-cells = <1>;
                        #size-cells = <0>;
-                       interrupts = <GIC_SPI 89 IRQ_TYPE_NONE>;
+                       interrupts = <GIC_SPI 89 IRQ_TYPE_LEVEL_HIGH>;
                        clock-frequency = <100000>;
                        dma-coherent;
                        status = "disabled";
 
                #interrupt-cells = <1>;
                interrupt-map-mask = <0 0 0 0>;
-               interrupt-map = <0 0 0 0 &gic GIC_SPI 131 IRQ_TYPE_NONE>;
+               interrupt-map = <0 0 0 0 &gic GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>;
 
                linux,pci-domain = <0>;
 
                        compatible = "brcm,iproc-msi";
                        msi-controller;
                        interrupt-parent = <&gic>;
-                       interrupts = <GIC_SPI 127 IRQ_TYPE_NONE>,
-                                    <GIC_SPI 128 IRQ_TYPE_NONE>,
-                                    <GIC_SPI 129 IRQ_TYPE_NONE>,
-                                    <GIC_SPI 130 IRQ_TYPE_NONE>;
+                       interrupts = <GIC_SPI 127 IRQ_TYPE_LEVEL_HIGH>,
+                                    <GIC_SPI 128 IRQ_TYPE_LEVEL_HIGH>,
+                                    <GIC_SPI 129 IRQ_TYPE_LEVEL_HIGH>,
+                                    <GIC_SPI 130 IRQ_TYPE_LEVEL_HIGH>;
                        brcm,pcie-msi-inten;
                };
        };
 
                #interrupt-cells = <1>;
                interrupt-map-mask = <0 0 0 0>;
-               interrupt-map = <0 0 0 0 &gic GIC_SPI 137 IRQ_TYPE_NONE>;
+               interrupt-map = <0 0 0 0 &gic GIC_SPI 137 IRQ_TYPE_LEVEL_HIGH>;
 
                linux,pci-domain = <1>;
 
                        compatible = "brcm,iproc-msi";
                        msi-controller;
                        interrupt-parent = <&gic>;
-                       interrupts = <GIC_SPI 133 IRQ_TYPE_NONE>,
-                                    <GIC_SPI 134 IRQ_TYPE_NONE>,
-                                    <GIC_SPI 135 IRQ_TYPE_NONE>,
-                                    <GIC_SPI 136 IRQ_TYPE_NONE>;
+                       interrupts = <GIC_SPI 133 IRQ_TYPE_LEVEL_HIGH>,
+                                    <GIC_SPI 134 IRQ_TYPE_LEVEL_HIGH>,
+                                    <GIC_SPI 135 IRQ_TYPE_LEVEL_HIGH>,
+                                    <GIC_SPI 136 IRQ_TYPE_LEVEL_HIGH>;
                        brcm,pcie-msi-inten;
                };
        };
 
                #interrupt-cells = <1>;
                interrupt-map-mask = <0 0 0 0>;
-               interrupt-map = <0 0 0 0 &gic GIC_SPI 143 IRQ_TYPE_NONE>;
+               interrupt-map = <0 0 0 0 &gic GIC_SPI 143 IRQ_TYPE_LEVEL_HIGH>;
 
                linux,pci-domain = <2>;
 
                        compatible = "brcm,iproc-msi";
                        msi-controller;
                        interrupt-parent = <&gic>;
-                       interrupts = <GIC_SPI 139 IRQ_TYPE_NONE>,
-                                    <GIC_SPI 140 IRQ_TYPE_NONE>,
-                                    <GIC_SPI 141 IRQ_TYPE_NONE>,
-                                    <GIC_SPI 142 IRQ_TYPE_NONE>;
+                       interrupts = <GIC_SPI 139 IRQ_TYPE_LEVEL_HIGH>,
+                                    <GIC_SPI 140 IRQ_TYPE_LEVEL_HIGH>,
+                                    <GIC_SPI 141 IRQ_TYPE_LEVEL_HIGH>,
+                                    <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>;
                        brcm,pcie-msi-inten;
                };
        };
index 9a076c409f4ed35fcf5fbe79807ede6e7e8466d5..ef995e50ee12bfd8b3d90d9e07062a41e04f4ff3 100644 (file)
        i2c0: i2c@18009000 {
                compatible = "brcm,iproc-i2c";
                reg = <0x18009000 0x50>;
-               interrupts = <GIC_SPI 121 IRQ_TYPE_NONE>;
+               interrupts = <GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>;
                #address-cells = <1>;
                #size-cells = <0>;
                clock-frequency = <100000>;
index f6f1597b03df931a1dea057921a43cea9f929a31..0f4f817a9e229c58f973f935a6f5906b1e0f8979 100644 (file)
                        gpio-controller;
                        #gpio-cells = <2>;
                        reg = <0x226000 0x1000>;
-                       interrupts = <42 IRQ_TYPE_EDGE_BOTH
-                               43 IRQ_TYPE_EDGE_BOTH 44 IRQ_TYPE_EDGE_BOTH
-                               45 IRQ_TYPE_EDGE_BOTH 46 IRQ_TYPE_EDGE_BOTH
-                               47 IRQ_TYPE_EDGE_BOTH 48 IRQ_TYPE_EDGE_BOTH
-                               49 IRQ_TYPE_EDGE_BOTH 50 IRQ_TYPE_EDGE_BOTH>;
+                       interrupts = <42 43 44 45 46 47 48 49 50>;
                        ti,ngpio = <144>;
                        ti,davinci-gpio-unbanked = <0>;
                        status = "disabled";
index 9dcd14edc20287f80c73a3b95d21e303ffa6d39c..e03495a799ce8d034feab58e263177794e706013 100644 (file)
                                dr_mode = "otg";
                                snps,dis_u3_susphy_quirk;
                                snps,dis_u2_susphy_quirk;
-                               snps,dis_metastability_quirk;
                        };
                };
 
                                dr_mode = "otg";
                                snps,dis_u3_susphy_quirk;
                                snps,dis_u2_susphy_quirk;
+                               snps,dis_metastability_quirk;
                        };
                };
 
index df9eca94d812290afe03affd59f76663ea1ab0ee..8a878687197b35a8e056ba55c4aaec56293123e1 100644 (file)
 
        pinctrl_ts: tsgrp {
                fsl,pins = <
-                       MX51_PAD_CSI1_D8__GPIO3_12              0x85
+                       MX51_PAD_CSI1_D8__GPIO3_12              0x04
                        MX51_PAD_CSI1_D9__GPIO3_13              0x85
                >;
        };
index 70483ce72ba6cf648809acb7f24be3af11817674..77f8f030dd0772aba631f57b704a7e60a9bd0532 100644 (file)
@@ -90,7 +90,7 @@
                                        clocks = <&clks IMX6Q_CLK_ECSPI5>,
                                                 <&clks IMX6Q_CLK_ECSPI5>;
                                        clock-names = "ipg", "per";
-                                       dmas = <&sdma 11 7 1>, <&sdma 12 7 2>;
+                                       dmas = <&sdma 11 8 1>, <&sdma 12 8 2>;
                                        dma-names = "rx", "tx";
                                        status = "disabled";
                                };
index 19a075aee19eabfb5ac2b127db45199bca2f0d6a..f14df0baf2ab42867e30f3b93520b75652561afa 100644 (file)
                        dsa,member = <0 0>;
                        eeprom-length = <512>;
                        interrupt-parent = <&gpio6>;
-                       interrupts = <3 IRQ_TYPE_EDGE_FALLING>;
+                       interrupts = <3 IRQ_TYPE_LEVEL_LOW>;
                        interrupt-controller;
                        #interrupt-cells = <2>;
 
index d8b94f47498b67051ade669f23d2796a0b1e7433..4e4a55aad5c9ca9aa6fff90deb0ae1c5e99c3a13 100644 (file)
                        ranges = <0x81000000 0 0          0x08f80000 0 0x00010000 /* downstream I/O */
                                  0x82000000 0 0x08000000 0x08000000 0 0x00f00000>; /* non-prefetchable memory */
                        num-lanes = <1>;
-                       interrupts = <GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>;
+                       interrupts = <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>;
                        interrupt-names = "msi";
                        #interrupt-cells = <1>;
                        interrupt-map-mask = <0 0 0 0x7>;
index bdf73cbcec3a5c1df0bb55344049aafa3a3cc404..e7c3c563ff8f5d05bb022ed1c3be513f2e97ff66 100644 (file)
 
                dais = <&mcbsp2_port>, <&mcbsp3_port>;
        };
-};
-
-&dss {
-       status = "okay";
-};
 
-&gpio6 {
        pwm8: dmtimer-pwm-8 {
                pinctrl-names = "default";
                pinctrl-0 = <&vibrator_direction_pin>;
                pwm-names = "enable", "direction";
                direction-duty-cycle-ns = <10000000>;
        };
+};
 
+&dss {
+       status = "okay";
 };
 
 &dsi1 {
index 486d4e7433ed32d2662fabcf9b25fe54eab0f187..b38f8c24055800c45e1e81aef451f08ac9e27be5 100644 (file)
                nand0: nand@ff900000 {
                        #address-cells = <0x1>;
                        #size-cells = <0x1>;
-                       compatible = "denali,denali-nand-dt";
+                       compatible = "altr,socfpga-denali-nand";
                        reg = <0xff900000 0x100000>,
                              <0xffb80000 0x10000>;
                        reg-names = "nand_data", "denali_reg";
                        interrupts = <0x0 0x90 0x4>;
                        dma-mask = <0xffffffff>;
-                       clocks = <&nand_clk>;
+                       clocks = <&nand_x_clk>;
                        status = "disabled";
                };
 
index bead79e4b2aa2b624b8f7d21cef4751d6536b724..791ca15c799eba98850cbc3d4b96be7a509c422f 100644 (file)
                        #size-cells = <0>;
                        reg = <0xffda5000 0x100>;
                        interrupts = <0 102 4>;
-                       num-chipselect = <4>;
-                       bus-num = <0>;
+                       num-cs = <4>;
                        /*32bit_access;*/
                        tx-dma-channel = <&pdma 16>;
                        rx-dma-channel = <&pdma 17>;
                nand: nand@ffb90000 {
                        #address-cells = <1>;
                        #size-cells = <1>;
-                       compatible = "denali,denali-nand-dt", "altr,socfpga-denali-nand";
+                       compatible = "altr,socfpga-denali-nand";
                        reg = <0xffb90000 0x72000>,
                              <0xffb80000 0x10000>;
                        reg-names = "nand_data", "denali_reg";
index 1e9f7af8f70ff6ba23d9403f930f09dd6e0dda7e..3157be413297e5d22ad3174e2082b5199fc3083c 100644 (file)
@@ -10,7 +10,7 @@ obj-$(CONFIG_DMABOUNCE)               += dmabounce.o
 obj-$(CONFIG_SHARP_LOCOMO)     += locomo.o
 obj-$(CONFIG_SHARP_PARAM)      += sharpsl_param.o
 obj-$(CONFIG_SHARP_SCOOP)      += scoop.o
-obj-$(CONFIG_SMP)              += secure_cntvoff.o
+obj-$(CONFIG_CPU_V7)           += secure_cntvoff.o
 obj-$(CONFIG_PCI_HOST_ITE8152)  += it8152.o
 obj-$(CONFIG_MCPM)             += mcpm_head.o mcpm_entry.o mcpm_platsmp.o vlock.o
 CFLAGS_REMOVE_mcpm_entry.o     = -pg
index 054591dc9a0020dcdaa907f6b3cded43408075d0..4cd2f4a2bff4e20beb76fd524348aae58fbc3590 100644 (file)
@@ -141,9 +141,11 @@ CONFIG_USB_STORAGE=y
 CONFIG_USB_CHIPIDEA=y
 CONFIG_USB_CHIPIDEA_UDC=y
 CONFIG_USB_CHIPIDEA_HOST=y
+CONFIG_USB_CHIPIDEA_ULPI=y
 CONFIG_NOP_USB_XCEIV=y
 CONFIG_USB_GADGET=y
 CONFIG_USB_ETH=m
+CONFIG_USB_ULPI_BUS=y
 CONFIG_MMC=y
 CONFIG_MMC_SDHCI=y
 CONFIG_MMC_SDHCI_PLTFM=y
index f70507ab91eeb1b59a0857cb9e6f55ff2825fe9f..200ebda47e0c3bee90eadd948b6f8f522fcfbedc 100644 (file)
@@ -302,6 +302,7 @@ CONFIG_USB_STORAGE=y
 CONFIG_USB_CHIPIDEA=y
 CONFIG_USB_CHIPIDEA_UDC=y
 CONFIG_USB_CHIPIDEA_HOST=y
+CONFIG_USB_CHIPIDEA_ULPI=y
 CONFIG_USB_SERIAL=m
 CONFIG_USB_SERIAL_GENERIC=y
 CONFIG_USB_SERIAL_FTDI_SIO=m
@@ -338,6 +339,7 @@ CONFIG_USB_GADGETFS=m
 CONFIG_USB_FUNCTIONFS=m
 CONFIG_USB_MASS_STORAGE=m
 CONFIG_USB_G_SERIAL=m
+CONFIG_USB_ULPI_BUS=y
 CONFIG_MMC=y
 CONFIG_MMC_SDHCI=y
 CONFIG_MMC_SDHCI_PLTFM=y
index 7e1c543162c3ab16f11f6be6ccec5a16abae31d0..8f6be19825456496ef471b3b03a78d32354d9736 100644 (file)
@@ -1,5 +1,4 @@
 CONFIG_SYSVIPC=y
-CONFIG_FHANDLE=y
 CONFIG_NO_HZ=y
 CONFIG_HIGH_RES_TIMERS=y
 CONFIG_CGROUPS=y
@@ -10,20 +9,10 @@ CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
 CONFIG_PARTITION_ADVANCED=y
 CONFIG_CMDLINE_PARTITION=y
-CONFIG_ARCH_MULTI_V7=y
-# CONFIG_ARCH_MULTI_V5 is not set
-# CONFIG_ARCH_MULTI_V4 is not set
 CONFIG_ARCH_VIRT=y
 CONFIG_ARCH_ALPINE=y
 CONFIG_ARCH_ARTPEC=y
 CONFIG_MACH_ARTPEC6=y
-CONFIG_ARCH_MVEBU=y
-CONFIG_MACH_ARMADA_370=y
-CONFIG_MACH_ARMADA_375=y
-CONFIG_MACH_ARMADA_38X=y
-CONFIG_MACH_ARMADA_39X=y
-CONFIG_MACH_ARMADA_XP=y
-CONFIG_MACH_DOVE=y
 CONFIG_ARCH_AT91=y
 CONFIG_SOC_SAMA5D2=y
 CONFIG_SOC_SAMA5D3=y
@@ -32,9 +21,9 @@ CONFIG_ARCH_BCM=y
 CONFIG_ARCH_BCM_CYGNUS=y
 CONFIG_ARCH_BCM_HR2=y
 CONFIG_ARCH_BCM_NSP=y
-CONFIG_ARCH_BCM_21664=y
-CONFIG_ARCH_BCM_281XX=y
 CONFIG_ARCH_BCM_5301X=y
+CONFIG_ARCH_BCM_281XX=y
+CONFIG_ARCH_BCM_21664=y
 CONFIG_ARCH_BCM2835=y
 CONFIG_ARCH_BCM_63XX=y
 CONFIG_ARCH_BRCMSTB=y
@@ -43,14 +32,14 @@ CONFIG_MACH_BERLIN_BG2=y
 CONFIG_MACH_BERLIN_BG2CD=y
 CONFIG_MACH_BERLIN_BG2Q=y
 CONFIG_ARCH_DIGICOLOR=y
+CONFIG_ARCH_EXYNOS=y
+CONFIG_EXYNOS5420_MCPM=y
 CONFIG_ARCH_HIGHBANK=y
 CONFIG_ARCH_HISI=y
 CONFIG_ARCH_HI3xxx=y
-CONFIG_ARCH_HIX5HD2=y
 CONFIG_ARCH_HIP01=y
 CONFIG_ARCH_HIP04=y
-CONFIG_ARCH_KEYSTONE=y
-CONFIG_ARCH_MESON=y
+CONFIG_ARCH_HIX5HD2=y
 CONFIG_ARCH_MXC=y
 CONFIG_SOC_IMX50=y
 CONFIG_SOC_IMX51=y
@@ -60,29 +49,30 @@ CONFIG_SOC_IMX6SL=y
 CONFIG_SOC_IMX6SX=y
 CONFIG_SOC_IMX6UL=y
 CONFIG_SOC_IMX7D=y
-CONFIG_SOC_VF610=y
 CONFIG_SOC_LS1021A=y
+CONFIG_SOC_VF610=y
+CONFIG_ARCH_KEYSTONE=y
+CONFIG_ARCH_MEDIATEK=y
+CONFIG_ARCH_MESON=y
+CONFIG_ARCH_MVEBU=y
+CONFIG_MACH_ARMADA_370=y
+CONFIG_MACH_ARMADA_375=y
+CONFIG_MACH_ARMADA_38X=y
+CONFIG_MACH_ARMADA_39X=y
+CONFIG_MACH_ARMADA_XP=y
+CONFIG_MACH_DOVE=y
 CONFIG_ARCH_OMAP3=y
 CONFIG_ARCH_OMAP4=y
 CONFIG_SOC_OMAP5=y
 CONFIG_SOC_AM33XX=y
 CONFIG_SOC_AM43XX=y
 CONFIG_SOC_DRA7XX=y
+CONFIG_ARCH_SIRF=y
 CONFIG_ARCH_QCOM=y
-CONFIG_ARCH_MEDIATEK=y
 CONFIG_ARCH_MSM8X60=y
 CONFIG_ARCH_MSM8960=y
 CONFIG_ARCH_MSM8974=y
 CONFIG_ARCH_ROCKCHIP=y
-CONFIG_ARCH_SOCFPGA=y
-CONFIG_PLAT_SPEAR=y
-CONFIG_ARCH_SPEAR13XX=y
-CONFIG_MACH_SPEAR1310=y
-CONFIG_MACH_SPEAR1340=y
-CONFIG_ARCH_STI=y
-CONFIG_ARCH_STM32=y
-CONFIG_ARCH_EXYNOS=y
-CONFIG_EXYNOS5420_MCPM=y
 CONFIG_ARCH_RENESAS=y
 CONFIG_ARCH_EMEV2=y
 CONFIG_ARCH_R7S72100=y
@@ -99,40 +89,33 @@ CONFIG_ARCH_R8A7792=y
 CONFIG_ARCH_R8A7793=y
 CONFIG_ARCH_R8A7794=y
 CONFIG_ARCH_SH73A0=y
+CONFIG_ARCH_SOCFPGA=y
+CONFIG_PLAT_SPEAR=y
+CONFIG_ARCH_SPEAR13XX=y
+CONFIG_MACH_SPEAR1310=y
+CONFIG_MACH_SPEAR1340=y
+CONFIG_ARCH_STI=y
+CONFIG_ARCH_STM32=y
 CONFIG_ARCH_SUNXI=y
-CONFIG_ARCH_SIRF=y
 CONFIG_ARCH_TEGRA=y
-CONFIG_ARCH_TEGRA_2x_SOC=y
-CONFIG_ARCH_TEGRA_3x_SOC=y
-CONFIG_ARCH_TEGRA_114_SOC=y
-CONFIG_ARCH_TEGRA_124_SOC=y
 CONFIG_ARCH_UNIPHIER=y
 CONFIG_ARCH_U8500=y
-CONFIG_MACH_HREFV60=y
-CONFIG_MACH_SNOWBALL=y
 CONFIG_ARCH_VEXPRESS=y
 CONFIG_ARCH_VEXPRESS_TC2_PM=y
 CONFIG_ARCH_WM8850=y
 CONFIG_ARCH_ZYNQ=y
-CONFIG_TRUSTED_FOUNDATIONS=y
-CONFIG_PCI=y
-CONFIG_PCI_HOST_GENERIC=y
-CONFIG_PCI_DRA7XX=y
-CONFIG_PCI_DRA7XX_EP=y
-CONFIG_PCI_KEYSTONE=y
-CONFIG_PCI_MSI=y
+CONFIG_PCIEPORTBUS=y
 CONFIG_PCI_MVEBU=y
 CONFIG_PCI_TEGRA=y
 CONFIG_PCI_RCAR_GEN2=y
 CONFIG_PCIE_RCAR=y
-CONFIG_PCIEPORTBUS=y
+CONFIG_PCI_DRA7XX_EP=y
+CONFIG_PCI_KEYSTONE=y
 CONFIG_PCI_ENDPOINT=y
 CONFIG_PCI_ENDPOINT_CONFIGFS=y
 CONFIG_PCI_EPF_TEST=m
 CONFIG_SMP=y
 CONFIG_NR_CPUS=16
-CONFIG_HIGHPTE=y
-CONFIG_CMA=y
 CONFIG_SECCOMP=y
 CONFIG_ARM_APPENDED_DTB=y
 CONFIG_ARM_ATAG_DTB_COMPAT=y
@@ -145,14 +128,14 @@ CONFIG_CPU_FREQ_GOV_POWERSAVE=m
 CONFIG_CPU_FREQ_GOV_USERSPACE=m
 CONFIG_CPU_FREQ_GOV_CONSERVATIVE=m
 CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y
+CONFIG_CPUFREQ_DT=y
 CONFIG_ARM_IMX6Q_CPUFREQ=y
 CONFIG_QORIQ_CPUFREQ=y
 CONFIG_CPU_IDLE=y
 CONFIG_ARM_CPUIDLE=y
-CONFIG_NEON=y
-CONFIG_KERNEL_MODE_NEON=y
 CONFIG_ARM_ZYNQ_CPUIDLE=y
 CONFIG_ARM_EXYNOS_CPUIDLE=y
+CONFIG_KERNEL_MODE_NEON=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
@@ -170,23 +153,13 @@ CONFIG_IPV6_MIP6=m
 CONFIG_IPV6_TUNNEL=m
 CONFIG_IPV6_MULTIPLE_TABLES=y
 CONFIG_NET_DSA=m
-CONFIG_NET_SWITCHDEV=y
 CONFIG_CAN=y
-CONFIG_CAN_RAW=y
-CONFIG_CAN_BCM=y
-CONFIG_CAN_DEV=y
 CONFIG_CAN_AT91=m
 CONFIG_CAN_FLEXCAN=m
-CONFIG_CAN_RCAR=m
+CONFIG_CAN_SUN4I=y
 CONFIG_CAN_XILINXCAN=y
+CONFIG_CAN_RCAR=m
 CONFIG_CAN_MCP251X=y
-CONFIG_NET_DSA_BCM_SF2=m
-CONFIG_B53=m
-CONFIG_B53_SPI_DRIVER=m
-CONFIG_B53_MDIO_DRIVER=m
-CONFIG_B53_MMAP_DRIVER=m
-CONFIG_B53_SRAB_DRIVER=m
-CONFIG_CAN_SUN4I=y
 CONFIG_BT=m
 CONFIG_BT_HCIUART=m
 CONFIG_BT_HCIUART_BCM=y
@@ -199,11 +172,9 @@ CONFIG_RFKILL_INPUT=y
 CONFIG_RFKILL_GPIO=y
 CONFIG_DEVTMPFS=y
 CONFIG_DEVTMPFS_MOUNT=y
-CONFIG_DMA_CMA=y
 CONFIG_CMA_SIZE_MBYTES=64
 CONFIG_OMAP_OCP2SCP=y
 CONFIG_SIMPLE_PM_BUS=y
-CONFIG_SUNXI_RSB=y
 CONFIG_MTD=y
 CONFIG_MTD_CMDLINE_PARTS=y
 CONFIG_MTD_BLOCK=y
@@ -236,7 +207,6 @@ CONFIG_PCI_ENDPOINT_TEST=m
 CONFIG_EEPROM_AT24=y
 CONFIG_BLK_DEV_SD=y
 CONFIG_BLK_DEV_SR=y
-CONFIG_SCSI_MULTI_LUN=y
 CONFIG_ATA=y
 CONFIG_SATA_AHCI=y
 CONFIG_SATA_AHCI_PLATFORM=y
@@ -251,14 +221,20 @@ CONFIG_SATA_MV=y
 CONFIG_SATA_RCAR=y
 CONFIG_NETDEVICES=y
 CONFIG_VIRTIO_NET=y
-CONFIG_HIX5HD2_GMAC=y
+CONFIG_B53_SPI_DRIVER=m
+CONFIG_B53_MDIO_DRIVER=m
+CONFIG_B53_MMAP_DRIVER=m
+CONFIG_B53_SRAB_DRIVER=m
+CONFIG_NET_DSA_BCM_SF2=m
 CONFIG_SUN4I_EMAC=y
-CONFIG_MACB=y
 CONFIG_BCMGENET=m
 CONFIG_BGMAC_BCMA=y
 CONFIG_SYSTEMPORT=m
+CONFIG_MACB=y
 CONFIG_NET_CALXEDA_XGMAC=y
 CONFIG_GIANFAR=y
+CONFIG_HIX5HD2_GMAC=y
+CONFIG_E1000E=y
 CONFIG_IGB=y
 CONFIG_MV643XX_ETH=y
 CONFIG_MVNETA=y
@@ -268,19 +244,17 @@ CONFIG_R8169=y
 CONFIG_SH_ETH=y
 CONFIG_SMSC911X=y
 CONFIG_STMMAC_ETH=y
-CONFIG_STMMAC_PLATFORM=y
 CONFIG_DWMAC_DWC_QOS_ETH=y
 CONFIG_TI_CPSW=y
 CONFIG_XILINX_EMACLITE=y
 CONFIG_AT803X_PHY=y
-CONFIG_MARVELL_PHY=y
-CONFIG_SMSC_PHY=y
 CONFIG_BROADCOM_PHY=y
 CONFIG_ICPLUS_PHY=y
-CONFIG_REALTEK_PHY=y
+CONFIG_MARVELL_PHY=y
 CONFIG_MICREL_PHY=y
-CONFIG_FIXED_PHY=y
+CONFIG_REALTEK_PHY=y
 CONFIG_ROCKCHIP_PHY=y
+CONFIG_SMSC_PHY=y
 CONFIG_USB_PEGASUS=y
 CONFIG_USB_RTL8152=m
 CONFIG_USB_LAN78XX=m
@@ -288,29 +262,29 @@ CONFIG_USB_USBNET=y
 CONFIG_USB_NET_SMSC75XX=y
 CONFIG_USB_NET_SMSC95XX=y
 CONFIG_BRCMFMAC=m
-CONFIG_RT2X00=m
-CONFIG_RT2800USB=m
 CONFIG_MWIFIEX=m
 CONFIG_MWIFIEX_SDIO=m
+CONFIG_RT2X00=m
+CONFIG_RT2800USB=m
 CONFIG_INPUT_JOYDEV=y
 CONFIG_INPUT_EVDEV=y
 CONFIG_KEYBOARD_QT1070=m
 CONFIG_KEYBOARD_GPIO=y
 CONFIG_KEYBOARD_TEGRA=y
-CONFIG_KEYBOARD_SPEAR=y
+CONFIG_KEYBOARD_SAMSUNG=m
 CONFIG_KEYBOARD_ST_KEYSCAN=y
+CONFIG_KEYBOARD_SPEAR=y
 CONFIG_KEYBOARD_CROS_EC=m
-CONFIG_KEYBOARD_SAMSUNG=m
 CONFIG_MOUSE_PS2_ELANTECH=y
 CONFIG_MOUSE_CYAPA=m
 CONFIG_MOUSE_ELAN_I2C=y
 CONFIG_INPUT_TOUCHSCREEN=y
 CONFIG_TOUCHSCREEN_ATMEL_MXT=m
 CONFIG_TOUCHSCREEN_MMS114=m
+CONFIG_TOUCHSCREEN_WM97XX=m
 CONFIG_TOUCHSCREEN_ST1232=m
 CONFIG_TOUCHSCREEN_STMPE=y
 CONFIG_TOUCHSCREEN_SUN4I=y
-CONFIG_TOUCHSCREEN_WM97XX=m
 CONFIG_INPUT_MISC=y
 CONFIG_INPUT_MAX77693_HAPTIC=m
 CONFIG_INPUT_MAX8997_HAPTIC=m
@@ -327,13 +301,12 @@ CONFIG_SERIAL_8250_DW=y
 CONFIG_SERIAL_8250_EM=y
 CONFIG_SERIAL_8250_MT6577=y
 CONFIG_SERIAL_8250_UNIPHIER=y
+CONFIG_SERIAL_OF_PLATFORM=y
 CONFIG_SERIAL_AMBA_PL011=y
 CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
 CONFIG_SERIAL_ATMEL=y
 CONFIG_SERIAL_ATMEL_CONSOLE=y
 CONFIG_SERIAL_ATMEL_TTYAT=y
-CONFIG_SERIAL_BCM63XX=y
-CONFIG_SERIAL_BCM63XX_CONSOLE=y
 CONFIG_SERIAL_MESON=y
 CONFIG_SERIAL_MESON_CONSOLE=y
 CONFIG_SERIAL_SAMSUNG=y
@@ -345,15 +318,14 @@ CONFIG_SERIAL_IMX=y
 CONFIG_SERIAL_IMX_CONSOLE=y
 CONFIG_SERIAL_SH_SCI=y
 CONFIG_SERIAL_SH_SCI_NR_UARTS=20
-CONFIG_SERIAL_SH_SCI_CONSOLE=y
-CONFIG_SERIAL_SH_SCI_DMA=y
 CONFIG_SERIAL_MSM=y
 CONFIG_SERIAL_MSM_CONSOLE=y
 CONFIG_SERIAL_VT8500=y
 CONFIG_SERIAL_VT8500_CONSOLE=y
-CONFIG_SERIAL_OF_PLATFORM=y
 CONFIG_SERIAL_OMAP=y
 CONFIG_SERIAL_OMAP_CONSOLE=y
+CONFIG_SERIAL_BCM63XX=y
+CONFIG_SERIAL_BCM63XX_CONSOLE=y
 CONFIG_SERIAL_XILINX_PS_UART=y
 CONFIG_SERIAL_XILINX_PS_UART_CONSOLE=y
 CONFIG_SERIAL_FSL_LPUART=y
@@ -365,12 +337,10 @@ CONFIG_SERIAL_ST_ASC_CONSOLE=y
 CONFIG_SERIAL_STM32=y
 CONFIG_SERIAL_STM32_CONSOLE=y
 CONFIG_SERIAL_DEV_BUS=y
-CONFIG_HVC_DRIVER=y
 CONFIG_VIRTIO_CONSOLE=y
+CONFIG_HW_RANDOM=y
+CONFIG_HW_RANDOM_ST=y
 CONFIG_I2C_CHARDEV=y
-CONFIG_I2C_DAVINCI=y
-CONFIG_I2C_MESON=y
-CONFIG_I2C_MUX=y
 CONFIG_I2C_ARB_GPIO_CHALLENGE=m
 CONFIG_I2C_MUX_PCA954x=y
 CONFIG_I2C_MUX_PINCTRL=y
@@ -378,12 +348,13 @@ CONFIG_I2C_DEMUX_PINCTRL=y
 CONFIG_I2C_AT91=m
 CONFIG_I2C_BCM2835=y
 CONFIG_I2C_CADENCE=y
+CONFIG_I2C_DAVINCI=y
 CONFIG_I2C_DESIGNWARE_PLATFORM=y
 CONFIG_I2C_DIGICOLOR=m
 CONFIG_I2C_EMEV2=m
 CONFIG_I2C_GPIO=m
-CONFIG_I2C_EXYNOS5=y
 CONFIG_I2C_IMX=y
+CONFIG_I2C_MESON=y
 CONFIG_I2C_MV64XXX=y
 CONFIG_I2C_RIIC=y
 CONFIG_I2C_RK3X=y
@@ -427,7 +398,6 @@ CONFIG_SPI_SPIDEV=y
 CONFIG_SPMI=y
 CONFIG_PINCTRL_AS3722=y
 CONFIG_PINCTRL_PALMAS=y
-CONFIG_PINCTRL_BCM2835=y
 CONFIG_PINCTRL_APQ8064=y
 CONFIG_PINCTRL_APQ8084=y
 CONFIG_PINCTRL_IPQ8064=y
@@ -437,25 +407,33 @@ CONFIG_PINCTRL_MSM8X74=y
 CONFIG_PINCTRL_MSM8916=y
 CONFIG_PINCTRL_QCOM_SPMI_PMIC=y
 CONFIG_PINCTRL_QCOM_SSBI_PMIC=y
-CONFIG_GPIO_GENERIC_PLATFORM=y
 CONFIG_GPIO_DAVINCI=y
 CONFIG_GPIO_DWAPB=y
 CONFIG_GPIO_EM=y
 CONFIG_GPIO_RCAR=y
+CONFIG_GPIO_SYSCON=y
 CONFIG_GPIO_UNIPHIER=y
 CONFIG_GPIO_XILINX=y
 CONFIG_GPIO_ZYNQ=y
 CONFIG_GPIO_PCA953X=y
 CONFIG_GPIO_PCA953X_IRQ=y
 CONFIG_GPIO_PCF857X=y
-CONFIG_GPIO_TWL4030=y
 CONFIG_GPIO_PALMAS=y
-CONFIG_GPIO_SYSCON=y
 CONFIG_GPIO_TPS6586X=y
 CONFIG_GPIO_TPS65910=y
+CONFIG_GPIO_TWL4030=y
+CONFIG_POWER_AVS=y
+CONFIG_ROCKCHIP_IODOMAIN=y
+CONFIG_POWER_RESET_AS3722=y
+CONFIG_POWER_RESET_GPIO=y
+CONFIG_POWER_RESET_GPIO_RESTART=y
+CONFIG_POWER_RESET_ST=y
+CONFIG_POWER_RESET_KEYSTONE=y
+CONFIG_POWER_RESET_RMOBILE=y
 CONFIG_BATTERY_ACT8945A=y
 CONFIG_BATTERY_CPCAP=m
 CONFIG_BATTERY_SBS=y
+CONFIG_AXP20X_POWER=m
 CONFIG_BATTERY_MAX17040=m
 CONFIG_BATTERY_MAX17042=m
 CONFIG_CHARGER_CPCAP=m
@@ -464,15 +442,6 @@ CONFIG_CHARGER_MAX77693=m
 CONFIG_CHARGER_MAX8997=m
 CONFIG_CHARGER_MAX8998=m
 CONFIG_CHARGER_TPS65090=y
-CONFIG_AXP20X_POWER=m
-CONFIG_POWER_RESET_AS3722=y
-CONFIG_POWER_RESET_GPIO=y
-CONFIG_POWER_RESET_GPIO_RESTART=y
-CONFIG_POWER_RESET_KEYSTONE=y
-CONFIG_POWER_RESET_RMOBILE=y
-CONFIG_POWER_RESET_ST=y
-CONFIG_POWER_AVS=y
-CONFIG_ROCKCHIP_IODOMAIN=y
 CONFIG_SENSORS_IIO_HWMON=y
 CONFIG_SENSORS_LM90=y
 CONFIG_SENSORS_LM95245=y
@@ -480,14 +449,12 @@ CONFIG_SENSORS_NTC_THERMISTOR=m
 CONFIG_SENSORS_PWM_FAN=m
 CONFIG_SENSORS_INA2XX=m
 CONFIG_CPU_THERMAL=y
-CONFIG_BCM2835_THERMAL=m
-CONFIG_BRCMSTB_THERMAL=m
 CONFIG_IMX_THERMAL=y
 CONFIG_ROCKCHIP_THERMAL=y
 CONFIG_RCAR_THERMAL=y
 CONFIG_ARMADA_THERMAL=y
-CONFIG_DAVINCI_WATCHDOG=m
-CONFIG_EXYNOS_THERMAL=m
+CONFIG_BCM2835_THERMAL=m
+CONFIG_BRCMSTB_THERMAL=m
 CONFIG_ST_THERMAL_MEMMAP=y
 CONFIG_WATCHDOG=y
 CONFIG_DA9063_WATCHDOG=m
@@ -495,20 +462,24 @@ CONFIG_XILINX_WATCHDOG=y
 CONFIG_ARM_SP805_WATCHDOG=y
 CONFIG_AT91SAM9X_WATCHDOG=y
 CONFIG_SAMA5D4_WATCHDOG=y
+CONFIG_DW_WATCHDOG=y
+CONFIG_DAVINCI_WATCHDOG=m
 CONFIG_ORION_WATCHDOG=y
 CONFIG_RN5T618_WATCHDOG=y
-CONFIG_ST_LPC_WATCHDOG=y
 CONFIG_SUNXI_WATCHDOG=y
 CONFIG_IMX2_WDT=y
+CONFIG_ST_LPC_WATCHDOG=y
 CONFIG_TEGRA_WATCHDOG=m
 CONFIG_MESON_WATCHDOG=y
-CONFIG_DW_WATCHDOG=y
 CONFIG_DIGICOLOR_WATCHDOG=y
 CONFIG_RENESAS_WDT=m
-CONFIG_BCM2835_WDT=y
 CONFIG_BCM47XX_WDT=y
-CONFIG_BCM7038_WDT=m
+CONFIG_BCM2835_WDT=y
 CONFIG_BCM_KONA_WDT=y
+CONFIG_BCM7038_WDT=m
+CONFIG_BCMA_HOST_SOC=y
+CONFIG_BCMA_DRIVER_GMAC_CMN=y
+CONFIG_BCMA_DRIVER_GPIO=y
 CONFIG_MFD_ACT8945A=y
 CONFIG_MFD_AS3711=y
 CONFIG_MFD_AS3722=y
@@ -516,7 +487,6 @@ CONFIG_MFD_ATMEL_FLEXCOM=y
 CONFIG_MFD_ATMEL_HLCDC=m
 CONFIG_MFD_BCM590XX=y
 CONFIG_MFD_AC100=y
-CONFIG_MFD_AXP20X=y
 CONFIG_MFD_AXP20X_I2C=y
 CONFIG_MFD_AXP20X_RSB=y
 CONFIG_MFD_CROS_EC=m
@@ -529,11 +499,11 @@ CONFIG_MFD_MAX77693=m
 CONFIG_MFD_MAX8907=y
 CONFIG_MFD_MAX8997=y
 CONFIG_MFD_MAX8998=y
-CONFIG_MFD_RK808=y
 CONFIG_MFD_CPCAP=y
 CONFIG_MFD_PM8XXX=y
 CONFIG_MFD_QCOM_RPM=y
 CONFIG_MFD_SPMI_PMIC=y
+CONFIG_MFD_RK808=y
 CONFIG_MFD_RN5T618=y
 CONFIG_MFD_SEC_CORE=y
 CONFIG_MFD_STMPE=y
@@ -543,10 +513,10 @@ CONFIG_MFD_TPS65217=y
 CONFIG_MFD_TPS65218=y
 CONFIG_MFD_TPS6586X=y
 CONFIG_MFD_TPS65910=y
-CONFIG_REGULATOR_ACT8945A=y
-CONFIG_REGULATOR_AB8500=y
 CONFIG_REGULATOR_ACT8865=y
+CONFIG_REGULATOR_ACT8945A=y
 CONFIG_REGULATOR_ANATOP=y
+CONFIG_REGULATOR_AB8500=y
 CONFIG_REGULATOR_AS3711=y
 CONFIG_REGULATOR_AS3722=y
 CONFIG_REGULATOR_AXP20X=y
@@ -554,10 +524,7 @@ CONFIG_REGULATOR_BCM590XX=y
 CONFIG_REGULATOR_CPCAP=y
 CONFIG_REGULATOR_DA9210=y
 CONFIG_REGULATOR_FAN53555=y
-CONFIG_REGULATOR_RK808=y
 CONFIG_REGULATOR_GPIO=y
-CONFIG_MFD_SYSCON=y
-CONFIG_POWER_RESET_SYSCON=y
 CONFIG_REGULATOR_LP872X=y
 CONFIG_REGULATOR_MAX14577=m
 CONFIG_REGULATOR_MAX8907=y
@@ -571,7 +538,8 @@ CONFIG_REGULATOR_PALMAS=y
 CONFIG_REGULATOR_PBIAS=y
 CONFIG_REGULATOR_PWM=y
 CONFIG_REGULATOR_QCOM_RPM=y
-CONFIG_REGULATOR_QCOM_SMD_RPM=y
+CONFIG_REGULATOR_QCOM_SMD_RPM=m
+CONFIG_REGULATOR_RK808=y
 CONFIG_REGULATOR_RN5T618=y
 CONFIG_REGULATOR_S2MPS11=y
 CONFIG_REGULATOR_S5M8767=y
@@ -592,18 +560,17 @@ CONFIG_MEDIA_CEC_SUPPORT=y
 CONFIG_MEDIA_CONTROLLER=y
 CONFIG_VIDEO_V4L2_SUBDEV_API=y
 CONFIG_MEDIA_USB_SUPPORT=y
-CONFIG_USB_VIDEO_CLASS=y
-CONFIG_USB_GSPCA=y
+CONFIG_USB_VIDEO_CLASS=m
 CONFIG_V4L_PLATFORM_DRIVERS=y
 CONFIG_SOC_CAMERA=m
 CONFIG_SOC_CAMERA_PLATFORM=m
-CONFIG_VIDEO_RCAR_VIN=m
-CONFIG_VIDEO_ATMEL_ISI=m
 CONFIG_VIDEO_SAMSUNG_EXYNOS4_IS=m
 CONFIG_VIDEO_S5P_FIMC=m
 CONFIG_VIDEO_S5P_MIPI_CSIS=m
 CONFIG_VIDEO_EXYNOS_FIMC_LITE=m
 CONFIG_VIDEO_EXYNOS4_FIMC_IS=m
+CONFIG_VIDEO_RCAR_VIN=m
+CONFIG_VIDEO_ATMEL_ISI=m
 CONFIG_V4L_MEM2MEM_DRIVERS=y
 CONFIG_VIDEO_SAMSUNG_S5P_JPEG=m
 CONFIG_VIDEO_SAMSUNG_S5P_MFC=m
@@ -614,19 +581,15 @@ CONFIG_VIDEO_STI_DELTA=m
 CONFIG_VIDEO_RENESAS_JPU=m
 CONFIG_VIDEO_RENESAS_VSP1=m
 CONFIG_V4L_TEST_DRIVERS=y
+CONFIG_VIDEO_VIVID=m
 CONFIG_CEC_PLATFORM_DRIVERS=y
 CONFIG_VIDEO_SAMSUNG_S5P_CEC=m
 # CONFIG_MEDIA_SUBDRV_AUTOSELECT is not set
 CONFIG_VIDEO_ADV7180=m
 CONFIG_VIDEO_ML86V7667=m
 CONFIG_DRM=y
-CONFIG_DRM_I2C_ADV7511=m
-CONFIG_DRM_I2C_ADV7511_AUDIO=y
 # CONFIG_DRM_I2C_CH7006 is not set
 # CONFIG_DRM_I2C_SIL164 is not set
-CONFIG_DRM_DUMB_VGA_DAC=m
-CONFIG_DRM_NXP_PTN3460=m
-CONFIG_DRM_PARADE_PS8622=m
 CONFIG_DRM_NOUVEAU=m
 CONFIG_DRM_EXYNOS=m
 CONFIG_DRM_EXYNOS_FIMD=y
@@ -645,13 +608,18 @@ CONFIG_DRM_RCAR_LVDS=y
 CONFIG_DRM_SUN4I=m
 CONFIG_DRM_FSL_DCU=m
 CONFIG_DRM_TEGRA=y
+CONFIG_DRM_PANEL_SIMPLE=y
 CONFIG_DRM_PANEL_SAMSUNG_LD9040=m
 CONFIG_DRM_PANEL_SAMSUNG_S6E63J0X03=m
 CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0=m
-CONFIG_DRM_PANEL_SIMPLE=y
+CONFIG_DRM_DUMB_VGA_DAC=m
+CONFIG_DRM_NXP_PTN3460=m
+CONFIG_DRM_PARADE_PS8622=m
 CONFIG_DRM_SII9234=m
+CONFIG_DRM_I2C_ADV7511=m
+CONFIG_DRM_I2C_ADV7511_AUDIO=y
 CONFIG_DRM_STI=m
-CONFIG_DRM_VC4=y
+CONFIG_DRM_VC4=m
 CONFIG_DRM_ETNAVIV=m
 CONFIG_DRM_MXSFB=m
 CONFIG_FB_ARMCLCD=y
@@ -659,8 +627,6 @@ CONFIG_FB_EFI=y
 CONFIG_FB_WM8505=y
 CONFIG_FB_SH_MOBILE_LCDC=y
 CONFIG_FB_SIMPLE=y
-CONFIG_BACKLIGHT_LCD_SUPPORT=y
-CONFIG_BACKLIGHT_CLASS_DEVICE=y
 CONFIG_LCD_PLATFORM=m
 CONFIG_BACKLIGHT_PWM=y
 CONFIG_BACKLIGHT_AS3711=y
@@ -668,7 +634,6 @@ CONFIG_FRAMEBUFFER_CONSOLE=y
 CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y
 CONFIG_SOUND=m
 CONFIG_SND=m
-CONFIG_SND_DYNAMIC_MINORS=y
 CONFIG_SND_HDA_TEGRA=m
 CONFIG_SND_HDA_INPUT_BEEP=y
 CONFIG_SND_HDA_PATCH_LOADER=y
@@ -692,7 +657,7 @@ CONFIG_SND_SOC_SNOW=m
 CONFIG_SND_SOC_ODROID=m
 CONFIG_SND_SOC_SH4_FSI=m
 CONFIG_SND_SOC_RCAR=m
-CONFIG_SND_SIMPLE_SCU_CARD=m
+CONFIG_SND_SOC_STI=m
 CONFIG_SND_SUN4I_CODEC=m
 CONFIG_SND_SOC_TEGRA=m
 CONFIG_SND_SOC_TEGRA20_I2S=m
@@ -703,31 +668,25 @@ CONFIG_SND_SOC_TEGRA_WM8903=m
 CONFIG_SND_SOC_TEGRA_WM9712=m
 CONFIG_SND_SOC_TEGRA_TRIMSLICE=m
 CONFIG_SND_SOC_TEGRA_ALC5632=m
-CONFIG_SND_SOC_CPCAP=m
 CONFIG_SND_SOC_TEGRA_MAX98090=m
 CONFIG_SND_SOC_AK4642=m
+CONFIG_SND_SOC_CPCAP=m
 CONFIG_SND_SOC_SGTL5000=m
 CONFIG_SND_SOC_SPDIF=m
-CONFIG_SND_SOC_WM8978=m
-CONFIG_SND_SOC_STI=m
 CONFIG_SND_SOC_STI_SAS=m
-CONFIG_SND_SIMPLE_CARD=m
+CONFIG_SND_SOC_WM8978=m
+CONFIG_SND_SIMPLE_SCU_CARD=m
 CONFIG_USB=y
 CONFIG_USB_OTG=y
 CONFIG_USB_XHCI_HCD=y
 CONFIG_USB_XHCI_MVEBU=y
-CONFIG_USB_XHCI_RCAR=m
 CONFIG_USB_XHCI_TEGRA=m
 CONFIG_USB_EHCI_HCD=y
-CONFIG_USB_EHCI_MSM=m
-CONFIG_USB_EHCI_EXYNOS=y
-CONFIG_USB_EHCI_TEGRA=y
 CONFIG_USB_EHCI_HCD_STI=y
-CONFIG_USB_EHCI_HCD_PLATFORM=y
-CONFIG_USB_ISP1760=y
+CONFIG_USB_EHCI_TEGRA=y
+CONFIG_USB_EHCI_EXYNOS=y
 CONFIG_USB_OHCI_HCD=y
 CONFIG_USB_OHCI_HCD_STI=y
-CONFIG_USB_OHCI_HCD_PLATFORM=y
 CONFIG_USB_OHCI_EXYNOS=m
 CONFIG_USB_R8A66597_HCD=m
 CONFIG_USB_RENESAS_USBHS=m
@@ -746,18 +705,18 @@ CONFIG_USB_TI_CPPI41_DMA=y
 CONFIG_USB_TUSB_OMAP_DMA=y
 CONFIG_USB_DWC3=y
 CONFIG_USB_DWC2=y
-CONFIG_USB_HSIC_USB3503=y
 CONFIG_USB_CHIPIDEA=y
 CONFIG_USB_CHIPIDEA_UDC=y
 CONFIG_USB_CHIPIDEA_HOST=y
+CONFIG_USB_ISP1760=y
+CONFIG_USB_HSIC_USB3503=y
 CONFIG_AB8500_USB=y
-CONFIG_KEYSTONE_USB_PHY=y
+CONFIG_KEYSTONE_USB_PHY=m
 CONFIG_NOP_USB_XCEIV=m
 CONFIG_AM335X_PHY_USB=m
 CONFIG_TWL6030_USB=m
 CONFIG_USB_GPIO_VBUS=y
 CONFIG_USB_ISP1301=y
-CONFIG_USB_MSM_OTG=m
 CONFIG_USB_MXS_PHY=y
 CONFIG_USB_GADGET=y
 CONFIG_USB_FSL_USB2=y
@@ -793,21 +752,20 @@ CONFIG_MMC_SDHCI_OF_ESDHC=y
 CONFIG_MMC_SDHCI_ESDHC_IMX=y
 CONFIG_MMC_SDHCI_DOVE=y
 CONFIG_MMC_SDHCI_TEGRA=y
+CONFIG_MMC_SDHCI_S3C=y
 CONFIG_MMC_SDHCI_PXAV3=y
 CONFIG_MMC_SDHCI_SPEAR=y
-CONFIG_MMC_SDHCI_S3C=y
 CONFIG_MMC_SDHCI_S3C_DMA=y
 CONFIG_MMC_SDHCI_BCM_KONA=y
+CONFIG_MMC_MESON_MX_SDIO=y
 CONFIG_MMC_SDHCI_ST=y
 CONFIG_MMC_OMAP=y
 CONFIG_MMC_OMAP_HS=y
 CONFIG_MMC_ATMELMCI=y
 CONFIG_MMC_SDHCI_MSM=y
-CONFIG_MMC_MESON_MX_SDIO=y
 CONFIG_MMC_MVSDIO=y
 CONFIG_MMC_SDHI=y
 CONFIG_MMC_DW=y
-CONFIG_MMC_DW_PLTFM=y
 CONFIG_MMC_DW_EXYNOS=y
 CONFIG_MMC_DW_ROCKCHIP=y
 CONFIG_MMC_SH_MMCIF=y
@@ -847,94 +805,85 @@ CONFIG_RTC_DRV_MAX77686=y
 CONFIG_RTC_DRV_RK808=m
 CONFIG_RTC_DRV_RS5C372=m
 CONFIG_RTC_DRV_BQ32K=m
-CONFIG_RTC_DRV_PALMAS=y
-CONFIG_RTC_DRV_ST_LPC=y
 CONFIG_RTC_DRV_TWL4030=y
+CONFIG_RTC_DRV_PALMAS=y
 CONFIG_RTC_DRV_TPS6586X=y
 CONFIG_RTC_DRV_TPS65910=y
 CONFIG_RTC_DRV_S35390A=m
 CONFIG_RTC_DRV_RX8581=m
 CONFIG_RTC_DRV_EM3027=y
+CONFIG_RTC_DRV_S5M=m
 CONFIG_RTC_DRV_DA9063=m
 CONFIG_RTC_DRV_EFI=m
 CONFIG_RTC_DRV_DIGICOLOR=m
-CONFIG_RTC_DRV_S5M=m
 CONFIG_RTC_DRV_S3C=m
 CONFIG_RTC_DRV_PL031=y
 CONFIG_RTC_DRV_AT91RM9200=m
 CONFIG_RTC_DRV_AT91SAM9=m
 CONFIG_RTC_DRV_VT8500=y
-CONFIG_RTC_DRV_SUN6I=y
 CONFIG_RTC_DRV_SUNXI=y
 CONFIG_RTC_DRV_MV=y
 CONFIG_RTC_DRV_TEGRA=y
+CONFIG_RTC_DRV_ST_LPC=y
 CONFIG_RTC_DRV_CPCAP=m
 CONFIG_DMADEVICES=y
-CONFIG_DW_DMAC=y
 CONFIG_AT_HDMAC=y
 CONFIG_AT_XDMAC=y
+CONFIG_DMA_BCM2835=y
+CONFIG_DMA_SUN6I=y
 CONFIG_FSL_EDMA=y
+CONFIG_IMX_DMA=y
+CONFIG_IMX_SDMA=y
 CONFIG_MV_XOR=y
+CONFIG_MXS_DMA=y
+CONFIG_PL330_DMA=y
+CONFIG_SIRF_DMA=y
+CONFIG_STE_DMA40=y
+CONFIG_ST_FDMA=m
 CONFIG_TEGRA20_APB_DMA=y
+CONFIG_XILINX_DMA=y
+CONFIG_QCOM_BAM_DMA=y
+CONFIG_DW_DMAC=y
 CONFIG_SH_DMAE=y
 CONFIG_RCAR_DMAC=y
 CONFIG_RENESAS_USB_DMAC=m
-CONFIG_STE_DMA40=y
-CONFIG_SIRF_DMA=y
-CONFIG_TI_EDMA=y
-CONFIG_PL330_DMA=y
-CONFIG_IMX_SDMA=y
-CONFIG_IMX_DMA=y
-CONFIG_MXS_DMA=y
-CONFIG_DMA_BCM2835=y
-CONFIG_DMA_OMAP=y
-CONFIG_QCOM_BAM_DMA=y
-CONFIG_XILINX_DMA=y
-CONFIG_DMA_SUN6I=y
-CONFIG_ST_FDMA=m
+CONFIG_VIRTIO_PCI=y
+CONFIG_VIRTIO_MMIO=y
 CONFIG_STAGING=y
-CONFIG_SENSORS_ISL29018=y
-CONFIG_SENSORS_ISL29028=y
 CONFIG_MFD_NVEC=y
 CONFIG_KEYBOARD_NVEC=y
 CONFIG_SERIO_NVEC_PS2=y
 CONFIG_NVEC_POWER=y
 CONFIG_NVEC_PAZ00=y
-CONFIG_BCMA=y
-CONFIG_BCMA_HOST_SOC=y
-CONFIG_BCMA_DRIVER_GMAC_CMN=y
-CONFIG_BCMA_DRIVER_GPIO=y
-CONFIG_QCOM_GSBI=y
-CONFIG_QCOM_PM=y
-CONFIG_QCOM_SMEM=y
-CONFIG_QCOM_SMD_RPM=y
-CONFIG_QCOM_SMP2P=y
-CONFIG_QCOM_SMSM=y
-CONFIG_QCOM_WCNSS_CTRL=m
-CONFIG_ROCKCHIP_PM_DOMAINS=y
-CONFIG_COMMON_CLK_QCOM=y
-CONFIG_QCOM_CLK_RPM=y
-CONFIG_CHROME_PLATFORMS=y
 CONFIG_STAGING_BOARD=y
-CONFIG_CROS_EC_CHARDEV=m
 CONFIG_COMMON_CLK_MAX77686=y
 CONFIG_COMMON_CLK_RK808=m
 CONFIG_COMMON_CLK_S2MPS11=m
+CONFIG_COMMON_CLK_QCOM=y
+CONFIG_QCOM_CLK_RPM=y
 CONFIG_APQ_MMCC_8084=y
 CONFIG_MSM_GCC_8660=y
 CONFIG_MSM_MMCC_8960=y
 CONFIG_MSM_MMCC_8974=y
-CONFIG_HWSPINLOCK_QCOM=y
+CONFIG_BCM2835_MBOX=y
 CONFIG_ROCKCHIP_IOMMU=y
 CONFIG_TEGRA_IOMMU_GART=y
 CONFIG_TEGRA_IOMMU_SMMU=y
 CONFIG_REMOTEPROC=m
 CONFIG_ST_REMOTEPROC=m
 CONFIG_RPMSG_VIRTIO=m
+CONFIG_RASPBERRYPI_POWER=y
+CONFIG_QCOM_GSBI=y
+CONFIG_QCOM_PM=y
+CONFIG_QCOM_SMD_RPM=m
+CONFIG_QCOM_WCNSS_CTRL=m
+CONFIG_ROCKCHIP_PM_DOMAINS=y
+CONFIG_ARCH_TEGRA_2x_SOC=y
+CONFIG_ARCH_TEGRA_3x_SOC=y
+CONFIG_ARCH_TEGRA_114_SOC=y
+CONFIG_ARCH_TEGRA_124_SOC=y
 CONFIG_PM_DEVFREQ=y
 CONFIG_ARM_TEGRA_DEVFREQ=m
-CONFIG_MEMORY=y
-CONFIG_EXTCON=y
 CONFIG_TI_AEMIF=y
 CONFIG_IIO=y
 CONFIG_IIO_SW_TRIGGER=y
@@ -947,56 +896,54 @@ CONFIG_VF610_ADC=m
 CONFIG_XILINX_XADC=y
 CONFIG_MPU3050_I2C=y
 CONFIG_CM36651=m
+CONFIG_SENSORS_ISL29018=y
+CONFIG_SENSORS_ISL29028=y
 CONFIG_AK8975=y
-CONFIG_RASPBERRYPI_POWER=y
 CONFIG_IIO_HRTIMER_TRIGGER=y
 CONFIG_PWM=y
 CONFIG_PWM_ATMEL=m
 CONFIG_PWM_ATMEL_HLCDC_PWM=m
 CONFIG_PWM_ATMEL_TCB=m
+CONFIG_PWM_BCM2835=y
+CONFIG_PWM_BRCMSTB=m
 CONFIG_PWM_FSL_FTM=m
 CONFIG_PWM_MESON=m
 CONFIG_PWM_RCAR=m
 CONFIG_PWM_RENESAS_TPU=y
 CONFIG_PWM_ROCKCHIP=m
 CONFIG_PWM_SAMSUNG=m
+CONFIG_PWM_STI=y
 CONFIG_PWM_SUN4I=y
 CONFIG_PWM_TEGRA=y
 CONFIG_PWM_VT8500=y
+CONFIG_KEYSTONE_IRQ=y
+CONFIG_PHY_SUN4I_USB=y
+CONFIG_PHY_SUN9I_USB=y
 CONFIG_PHY_HIX5HD2_SATA=y
-CONFIG_E1000E=y
-CONFIG_PWM_STI=y
-CONFIG_PWM_BCM2835=y
-CONFIG_PWM_BRCMSTB=m
-CONFIG_PHY_DM816X_USB=m
-CONFIG_OMAP_USB2=y
-CONFIG_TI_PIPE3=y
-CONFIG_TWL4030_USB=m
+CONFIG_PHY_BERLIN_SATA=y
 CONFIG_PHY_BERLIN_USB=y
 CONFIG_PHY_CPCAP_USB=m
-CONFIG_PHY_BERLIN_SATA=y
+CONFIG_PHY_QCOM_APQ8064_SATA=m
+CONFIG_PHY_RCAR_GEN2=m
 CONFIG_PHY_ROCKCHIP_DP=m
 CONFIG_PHY_ROCKCHIP_USB=y
-CONFIG_PHY_QCOM_APQ8064_SATA=m
+CONFIG_PHY_SAMSUNG_USB2=m
 CONFIG_PHY_MIPHY28LP=y
-CONFIG_PHY_RCAR_GEN2=m
 CONFIG_PHY_STIH407_USB=y
 CONFIG_PHY_STM32_USBPHYC=y
-CONFIG_PHY_SUN4I_USB=y
-CONFIG_PHY_SUN9I_USB=y
-CONFIG_PHY_SAMSUNG_USB2=m
 CONFIG_PHY_TEGRA_XUSB=y
-CONFIG_PHY_BRCM_SATA=y
-CONFIG_NVMEM=y
+CONFIG_PHY_DM816X_USB=m
+CONFIG_OMAP_USB2=y
+CONFIG_TI_PIPE3=y
+CONFIG_TWL4030_USB=m
 CONFIG_NVMEM_IMX_OCOTP=y
 CONFIG_NVMEM_SUNXI_SID=y
 CONFIG_NVMEM_VF610_OCOTP=y
-CONFIG_BCM2835_MBOX=y
 CONFIG_RASPBERRYPI_FIRMWARE=y
-CONFIG_EFI_VARS=m
-CONFIG_EFI_CAPSULE_LOADER=m
 CONFIG_BCM47XX_NVRAM=y
 CONFIG_BCM47XX_SPROM=y
+CONFIG_EFI_VARS=m
+CONFIG_EFI_CAPSULE_LOADER=m
 CONFIG_EXT4_FS=y
 CONFIG_AUTOFS4_FS=y
 CONFIG_MSDOS_FS=y
@@ -1004,7 +951,6 @@ CONFIG_VFAT_FS=y
 CONFIG_NTFS_FS=y
 CONFIG_TMPFS_POSIX_ACL=y
 CONFIG_UBIFS_FS=y
-CONFIG_TMPFS=y
 CONFIG_SQUASHFS=y
 CONFIG_SQUASHFS_LZO=y
 CONFIG_SQUASHFS_XZ=y
@@ -1020,13 +966,7 @@ CONFIG_NLS_CODEPAGE_437=y
 CONFIG_NLS_ISO8859_1=y
 CONFIG_NLS_UTF8=y
 CONFIG_PRINTK_TIME=y
-CONFIG_DEBUG_FS=y
 CONFIG_MAGIC_SYSRQ=y
-CONFIG_LOCKUP_DETECTOR=y
-CONFIG_CPUFREQ_DT=y
-CONFIG_KEYSTONE_IRQ=y
-CONFIG_HW_RANDOM=y
-CONFIG_HW_RANDOM_ST=y
 CONFIG_CRYPTO_USER=m
 CONFIG_CRYPTO_USER_API_HASH=m
 CONFIG_CRYPTO_USER_API_SKCIPHER=m
@@ -1035,27 +975,19 @@ CONFIG_CRYPTO_USER_API_AEAD=m
 CONFIG_CRYPTO_DEV_MARVELL_CESA=m
 CONFIG_CRYPTO_DEV_EXYNOS_RNG=m
 CONFIG_CRYPTO_DEV_S5P=m
+CONFIG_CRYPTO_DEV_ATMEL_AES=m
+CONFIG_CRYPTO_DEV_ATMEL_TDES=m
+CONFIG_CRYPTO_DEV_ATMEL_SHA=m
 CONFIG_CRYPTO_DEV_SUN4I_SS=m
 CONFIG_CRYPTO_DEV_ROCKCHIP=m
 CONFIG_ARM_CRYPTO=y
-CONFIG_CRYPTO_SHA1_ARM=m
 CONFIG_CRYPTO_SHA1_ARM_NEON=m
 CONFIG_CRYPTO_SHA1_ARM_CE=m
 CONFIG_CRYPTO_SHA2_ARM_CE=m
-CONFIG_CRYPTO_SHA256_ARM=m
 CONFIG_CRYPTO_SHA512_ARM=m
 CONFIG_CRYPTO_AES_ARM=m
 CONFIG_CRYPTO_AES_ARM_BS=m
 CONFIG_CRYPTO_AES_ARM_CE=m
-CONFIG_CRYPTO_CHACHA20_NEON=m
-CONFIG_CRYPTO_CRC32_ARM_CE=m
-CONFIG_CRYPTO_CRCT10DIF_ARM_CE=m
 CONFIG_CRYPTO_GHASH_ARM_CE=m
-CONFIG_CRYPTO_DEV_ATMEL_AES=m
-CONFIG_CRYPTO_DEV_ATMEL_TDES=m
-CONFIG_CRYPTO_DEV_ATMEL_SHA=m
-CONFIG_VIDEO_VIVID=m
-CONFIG_VIRTIO=y
-CONFIG_VIRTIO_PCI=y
-CONFIG_VIRTIO_PCI_LEGACY=y
-CONFIG_VIRTIO_MMIO=y
+CONFIG_CRYPTO_CRC32_ARM_CE=m
+CONFIG_CRYPTO_CHACHA20_NEON=m
index 3c1e203e53b9ccd752731f228b595a5678557782..57caa742016ed59bc8d3755fd6b9526f0c05f860 100644 (file)
         * Allocate stack space to store 128 bytes worth of tweaks.  For
         * performance, this space is aligned to a 16-byte boundary so that we
         * can use the load/store instructions that declare 16-byte alignment.
+        * For Thumb2 compatibility, don't do the 'bic' directly on 'sp'.
         */
-       sub             sp, #128
-       bic             sp, #0xf
+       sub             r12, sp, #128
+       bic             r12, #0xf
+       mov             sp, r12
 
 .if \n == 64
        // Load first tweak
index a71f16536b6c178c09334efc3047f4ddcc8da91b..6e41336b0bc4fc71ebaf5f5f4bae7e5e9e1b0395 100644 (file)
@@ -1 +1,4 @@
 obj-$(CONFIG_TRUSTED_FOUNDATIONS)      += trusted_foundations.o
+
+# tf_generic_smc() fails to build with -fsanitize-coverage=trace-pc
+KCOV_INSTRUMENT                := n
index 0cd4dccbae788626ddd277376d1dc78c719a9ff2..b17ee03d280b6ff9ff706f04ab4d3be57f20bfe8 100644 (file)
@@ -460,6 +460,10 @@ THUMB(     orr     \reg , \reg , #PSR_T_BIT        )
        adds    \tmp, \addr, #\size - 1
        sbcccs  \tmp, \tmp, \limit
        bcs     \bad
+#ifdef CONFIG_CPU_SPECTRE
+       movcs   \addr, #0
+       csdb
+#endif
 #endif
        .endm
 
index 66d0e215a773cb66d3baaa5a08cbc91056fe00be..f74756641410ecf6a4b261117fc1588b362e7fbe 100644 (file)
@@ -130,7 +130,7 @@ static inline int atomic_cmpxchg_relaxed(atomic_t *ptr, int old, int new)
 }
 #define atomic_cmpxchg_relaxed         atomic_cmpxchg_relaxed
 
-static inline int __atomic_add_unless(atomic_t *v, int a, int u)
+static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
 {
        int oldval, newval;
        unsigned long tmp;
@@ -156,6 +156,7 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
 
        return oldval;
 }
+#define atomic_fetch_add_unless                atomic_fetch_add_unless
 
 #else /* ARM_ARCH_6 */
 
@@ -215,15 +216,7 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
        return ret;
 }
 
-static inline int __atomic_add_unless(atomic_t *v, int a, int u)
-{
-       int c, old;
-
-       c = atomic_read(v);
-       while (c != u && (old = atomic_cmpxchg((v), c, c + a)) != c)
-               c = old;
-       return c;
-}
+#define atomic_fetch_andnot            atomic_fetch_andnot
 
 #endif /* __LINUX_ARM_ARCH__ */
 
@@ -254,17 +247,6 @@ ATOMIC_OPS(xor, ^=, eor)
 
 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
 
-#define atomic_inc(v)          atomic_add(1, v)
-#define atomic_dec(v)          atomic_sub(1, v)
-
-#define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
-#define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
-#define atomic_inc_return_relaxed(v)    (atomic_add_return_relaxed(1, v))
-#define atomic_dec_return_relaxed(v)    (atomic_sub_return_relaxed(1, v))
-#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
-
-#define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0)
-
 #ifndef CONFIG_GENERIC_ATOMIC64
 typedef struct {
        long long counter;
@@ -494,12 +476,13 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
 
        return result;
 }
+#define atomic64_dec_if_positive atomic64_dec_if_positive
 
-static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
+static inline long long atomic64_fetch_add_unless(atomic64_t *v, long long a,
+                                                 long long u)
 {
-       long long val;
+       long long oldval, newval;
        unsigned long tmp;
-       int ret = 1;
 
        smp_mb();
        prefetchw(&v->counter);
@@ -508,33 +491,23 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
 "1:    ldrexd  %0, %H0, [%4]\n"
 "      teq     %0, %5\n"
 "      teqeq   %H0, %H5\n"
-"      moveq   %1, #0\n"
 "      beq     2f\n"
-"      adds    %Q0, %Q0, %Q6\n"
-"      adc     %R0, %R0, %R6\n"
-"      strexd  %2, %0, %H0, [%4]\n"
+"      adds    %Q1, %Q0, %Q6\n"
+"      adc     %R1, %R0, %R6\n"
+"      strexd  %2, %1, %H1, [%4]\n"
 "      teq     %2, #0\n"
 "      bne     1b\n"
 "2:"
-       : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
+       : "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter)
        : "r" (&v->counter), "r" (u), "r" (a)
        : "cc");
 
-       if (ret)
+       if (oldval != u)
                smp_mb();
 
-       return ret;
+       return oldval;
 }
-
-#define atomic64_add_negative(a, v)    (atomic64_add_return((a), (v)) < 0)
-#define atomic64_inc(v)                        atomic64_add(1LL, (v))
-#define atomic64_inc_return_relaxed(v) atomic64_add_return_relaxed(1LL, (v))
-#define atomic64_inc_and_test(v)       (atomic64_inc_return(v) == 0)
-#define atomic64_sub_and_test(a, v)    (atomic64_sub_return((a), (v)) == 0)
-#define atomic64_dec(v)                        atomic64_sub(1LL, (v))
-#define atomic64_dec_return_relaxed(v) atomic64_sub_return_relaxed(1LL, (v))
-#define atomic64_dec_and_test(v)       (atomic64_dec_return((v)) == 0)
-#define atomic64_inc_not_zero(v)       atomic64_add_unless((v), 1LL, 0LL)
+#define atomic64_fetch_add_unless atomic64_fetch_add_unless
 
 #endif /* !CONFIG_GENERIC_ATOMIC64 */
 #endif
index 4cab9bb823fbbb4511225a695927a9387bce7eec..c92e42a5c8f75d2c03e3fd03cceff245e4da0bc0 100644 (file)
@@ -215,7 +215,6 @@ extern int _find_next_bit_be(const unsigned long *p, int size, int offset);
 
 #if __LINUX_ARM_ARCH__ < 5
 
-#include <asm-generic/bitops/ffz.h>
 #include <asm-generic/bitops/__fls.h>
 #include <asm-generic/bitops/__ffs.h>
 #include <asm-generic/bitops/fls.h>
@@ -223,93 +222,20 @@ extern int _find_next_bit_be(const unsigned long *p, int size, int offset);
 
 #else
 
-static inline int constant_fls(int x)
-{
-       int r = 32;
-
-       if (!x)
-               return 0;
-       if (!(x & 0xffff0000u)) {
-               x <<= 16;
-               r -= 16;
-       }
-       if (!(x & 0xff000000u)) {
-               x <<= 8;
-               r -= 8;
-       }
-       if (!(x & 0xf0000000u)) {
-               x <<= 4;
-               r -= 4;
-       }
-       if (!(x & 0xc0000000u)) {
-               x <<= 2;
-               r -= 2;
-       }
-       if (!(x & 0x80000000u)) {
-               x <<= 1;
-               r -= 1;
-       }
-       return r;
-}
-
-/*
- * On ARMv5 and above those functions can be implemented around the
- * clz instruction for much better code efficiency.  __clz returns
- * the number of leading zeros, zero input will return 32, and
- * 0x80000000 will return 0.
- */
-static inline unsigned int __clz(unsigned int x)
-{
-       unsigned int ret;
-
-       asm("clz\t%0, %1" : "=r" (ret) : "r" (x));
-
-       return ret;
-}
-
-/*
- * fls() returns zero if the input is zero, otherwise returns the bit
- * position of the last set bit, where the LSB is 1 and MSB is 32.
- */
-static inline int fls(int x)
-{
-       if (__builtin_constant_p(x))
-              return constant_fls(x);
-
-       return 32 - __clz(x);
-}
-
-/*
- * __fls() returns the bit position of the last bit set, where the
- * LSB is 0 and MSB is 31.  Zero input is undefined.
- */
-static inline unsigned long __fls(unsigned long x)
-{
-       return fls(x) - 1;
-}
-
-/*
- * ffs() returns zero if the input was zero, otherwise returns the bit
- * position of the first set bit, where the LSB is 1 and MSB is 32.
- */
-static inline int ffs(int x)
-{
-       return fls(x & -x);
-}
-
 /*
- * __ffs() returns the bit position of the first bit set, where the
- * LSB is 0 and MSB is 31.  Zero input is undefined.
+ * On ARMv5 and above, the gcc built-ins may rely on the clz instruction
+ * and produce optimal inlined code in all cases. On ARMv7 it is even
+ * better by also using the rbit instruction.
  */
-static inline unsigned long __ffs(unsigned long x)
-{
-       return ffs(x) - 1;
-}
-
-#define ffz(x) __ffs( ~(x) )
+#include <asm-generic/bitops/builtin-__fls.h>
+#include <asm-generic/bitops/builtin-__ffs.h>
+#include <asm-generic/bitops/builtin-fls.h>
+#include <asm-generic/bitops/builtin-ffs.h>
 
 #endif
 
+#include <asm-generic/bitops/ffz.h>
+
 #include <asm-generic/bitops/fls64.h>
 
 #include <asm-generic/bitops/sched.h>
index 17f1f1a814ff60968fdff9a18005c4448bc66690..38badaae8d9d712fe97ca5ac2f6b6de7198a44ec 100644 (file)
@@ -58,6 +58,9 @@ void efi_virtmap_unload(void);
 #define efi_call_runtime(f, ...)       sys_table_arg->runtime->f(__VA_ARGS__)
 #define efi_is_64bit()                 (false)
 
+#define efi_table_attr(table, attr, instance)                          \
+       ((table##_t *)instance)->attr
+
 #define efi_call_proto(protocol, f, instance, ...)                     \
        ((protocol##_t *)instance)->f(instance, ##__VA_ARGS__)
 
index e46e4e7bdba39974979b54ab137d13ba90f5d96d..ac54c06764e61e5a3cddbbb5ebacd41cbc2fbfe5 100644 (file)
@@ -111,14 +111,17 @@ static inline void decode_ctrl_reg(u32 reg,
        asm volatile("mcr p14, 0, %0, " #N "," #M ", " #OP2 : : "r" (VAL));\
 } while (0)
 
+struct perf_event_attr;
 struct notifier_block;
 struct perf_event;
 struct pmu;
 
 extern int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl,
                                  int *gen_len, int *gen_type);
-extern int arch_check_bp_in_kernelspace(struct perf_event *bp);
-extern int arch_validate_hwbkpt_settings(struct perf_event *bp);
+extern int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw);
+extern int hw_breakpoint_arch_parse(struct perf_event *bp,
+                                   const struct perf_event_attr *attr,
+                                   struct arch_hw_breakpoint *hw);
 extern int hw_breakpoint_exceptions_notify(struct notifier_block *unused,
                                           unsigned long val, void *data);
 
index b6f319606e306ad00864e81e3ead98a96cdf76ef..c883fcbe93b67ef68bfc18a6e48d4ec53c37cdd0 100644 (file)
@@ -31,11 +31,6 @@ extern void asm_do_IRQ(unsigned int, struct pt_regs *);
 void handle_IRQ(unsigned int, struct pt_regs *);
 void init_IRQ(void);
 
-#ifdef CONFIG_MULTI_IRQ_HANDLER
-extern void (*handle_arch_irq)(struct pt_regs *);
-extern void set_handle_irq(void (*handle_irq)(struct pt_regs *));
-#endif
-
 #ifdef CONFIG_SMP
 extern void arch_trigger_cpumask_backtrace(const cpumask_t *mask,
                                           bool exclude_self);
index 59655459da591bfb767231d10a8826cc784fd24c..82290f212d8e7d0275a8c379f64a49c074dcdfde 100644 (file)
@@ -44,8 +44,6 @@ struct prev_kprobe {
 struct kprobe_ctlblk {
        unsigned int kprobe_status;
        struct prev_kprobe prev_kprobe;
-       struct pt_regs jprobe_saved_regs;
-       char jprobes_stack[MAX_STACK_SIZE];
 };
 
 void arch_remove_kprobe(struct kprobe *);
index 5c1ad11aa39264aee7e9210cbf6747adab443930..bb8851208e1755b2c8eceff2054140f1a9933d01 100644 (file)
@@ -59,7 +59,7 @@ struct machine_desc {
        void                    (*init_time)(void);
        void                    (*init_machine)(void);
        void                    (*init_late)(void);
-#ifdef CONFIG_MULTI_IRQ_HANDLER
+#ifdef CONFIG_GENERIC_IRQ_MULTI_HANDLER
        void                    (*handle_irq)(struct pt_regs *);
 #endif
        void                    (*restart)(enum reboot_mode, const char *);
index 0f79e4dec7f98ccddb8429a1e6b262ca146c0d38..4ac3a019a46fe50c3d73376cfa9f9bfdd028a4d8 100644 (file)
@@ -13,7 +13,6 @@
 extern void timer_tick(void);
 
 typedef void (*clock_access_fn)(struct timespec64 *);
-extern int register_persistent_clock(clock_access_fn read_boot,
-                                    clock_access_fn read_persistent);
+extern int register_persistent_clock(clock_access_fn read_persistent);
 
 #endif
index 1e5b9bb9227066a498c6d6edb6dfe238e53d6c99..991c9127c6501661bff013eaa2675bacbaf49876 100644 (file)
@@ -51,7 +51,6 @@ struct arch_probes_insn {
  * We assume one instruction can consume at most 64 bytes stack, which is
  * 'push {r0-r15}'. Instructions consume more or unknown stack space like
  * 'str r0, [sp, #-80]' and 'str r0, [sp, r1]' should be prohibit to probe.
- * Both kprobe and jprobe use this macro.
  */
 #define MAX_STACK_SIZE                 64
 
index e71cc35de16335af87a878abaf5530166111a8fd..9b37b6ab27fe052eb8225c811688700c6ae99bb3 100644 (file)
@@ -123,8 +123,8 @@ struct user_vfp_exc;
 
 extern int vfp_preserve_user_clear_hwstate(struct user_vfp __user *,
                                           struct user_vfp_exc __user *);
-extern int vfp_restore_user_hwstate(struct user_vfp __user *,
-                                   struct user_vfp_exc __user *);
+extern int vfp_restore_user_hwstate(struct user_vfp *,
+                                   struct user_vfp_exc *);
 #endif
 
 /*
index d5562f9ce60079139d360e5d6afac59469051454..f854148c8d7c258927b031d0c87e8aa8a142e309 100644 (file)
@@ -292,5 +292,13 @@ static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
 {
 }
 
+static inline void tlb_flush_remove_tables(struct mm_struct *mm)
+{
+}
+
+static inline void tlb_flush_remove_tables_local(void *arg)
+{
+}
+
 #endif /* CONFIG_MMU */
 #endif
index 3d614e90c19f307ac04b3e625c52b6bc80b66da7..5451e1f05a193c002f3b30af921975a28c232833 100644 (file)
@@ -84,6 +84,13 @@ static inline void set_fs(mm_segment_t fs)
                : "cc"); \
        flag; })
 
+/*
+ * This is a type: either unsigned long, if the argument fits into
+ * that type, or otherwise unsigned long long.
+ */
+#define __inttype(x) \
+       __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
+
 /*
  * Single-value transfer routines.  They automatically use the right
  * size if we just have the right pointer type.  Note that the functions
@@ -153,7 +160,7 @@ extern int __get_user_64t_4(void *);
        ({                                                              \
                unsigned long __limit = current_thread_info()->addr_limit - 1; \
                register typeof(*(p)) __user *__p asm("r0") = (p);      \
-               register typeof(x) __r2 asm("r2");                      \
+               register __inttype(x) __r2 asm("r2");                   \
                register unsigned long __l asm("r1") = __limit;         \
                register int __e asm("r0");                             \
                unsigned int __ua_flags = uaccess_save_and_enable();    \
@@ -243,6 +250,16 @@ static inline void set_fs(mm_segment_t fs)
 #define user_addr_max() \
        (uaccess_kernel() ? ~0UL : get_fs())
 
+#ifdef CONFIG_CPU_SPECTRE
+/*
+ * When mitigating Spectre variant 1, it is not worth fixing the non-
+ * verifying accessors, because we need to add verification of the
+ * address space there.  Force these to use the standard get_user()
+ * version instead.
+ */
+#define __get_user(x, ptr) get_user(x, ptr)
+#else
+
 /*
  * The "__xxx" versions of the user access functions do not verify the
  * address space - it must have been done previously with a separate
@@ -259,12 +276,6 @@ static inline void set_fs(mm_segment_t fs)
        __gu_err;                                                       \
 })
 
-#define __get_user_error(x, ptr, err)                                  \
-({                                                                     \
-       __get_user_err((x), (ptr), err);                                \
-       (void) 0;                                                       \
-})
-
 #define __get_user_err(x, ptr, err)                                    \
 do {                                                                   \
        unsigned long __gu_addr = (unsigned long)(ptr);                 \
@@ -324,6 +335,7 @@ do {                                                                        \
 
 #define __get_user_asm_word(x, addr, err)                      \
        __get_user_asm(x, addr, err, ldr)
+#endif
 
 
 #define __put_user_switch(x, ptr, __err, __fn)                         \
index 179a9f6bd1e31c63564fd3e67444d41916939617..e85a3af9ddeb5694b793363f8245ba1ad5f99899 100644 (file)
@@ -22,7 +22,7 @@
 #include <asm/glue-df.h>
 #include <asm/glue-pf.h>
 #include <asm/vfpmacros.h>
-#ifndef CONFIG_MULTI_IRQ_HANDLER
+#ifndef CONFIG_GENERIC_IRQ_MULTI_HANDLER
 #include <mach/entry-macro.S>
 #endif
 #include <asm/thread_notify.h>
@@ -39,7 +39,7 @@
  * Interrupt handling.
  */
        .macro  irq_handler
-#ifdef CONFIG_MULTI_IRQ_HANDLER
+#ifdef CONFIG_GENERIC_IRQ_MULTI_HANDLER
        ldr     r1, =handle_arch_irq
        mov     r0, sp
        badr    lr, 9997f
@@ -1226,9 +1226,3 @@ vector_addrexcptn:
        .globl  cr_alignment
 cr_alignment:
        .space  4
-
-#ifdef CONFIG_MULTI_IRQ_HANDLER
-       .globl  handle_arch_irq
-handle_arch_irq:
-       .space  4
-#endif
index 106a1466518d0a24f29ee64ec364f8f1d1034d62..746565a876dcdd362522d1e546c8404faacfbfe2 100644 (file)
@@ -48,6 +48,7 @@ saved_pc      .req    lr
  * from those features make this path too inefficient.
  */
 ret_fast_syscall:
+__ret_fast_syscall:
  UNWIND(.fnstart       )
  UNWIND(.cantunwind    )
        disable_irq_notrace                     @ disable interrupts
@@ -78,6 +79,7 @@ fast_work_pending:
  * call.
  */
 ret_fast_syscall:
+__ret_fast_syscall:
  UNWIND(.fnstart       )
  UNWIND(.cantunwind    )
        str     r0, [sp, #S_R0 + S_OFF]!        @ save returned r0
@@ -255,7 +257,7 @@ local_restart:
        tst     r10, #_TIF_SYSCALL_WORK         @ are we tracing syscalls?
        bne     __sys_trace
 
-       invoke_syscall tbl, scno, r10, ret_fast_syscall
+       invoke_syscall tbl, scno, r10, __ret_fast_syscall
 
        add     r1, sp, #S_OFF
 2:     cmp     scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE)
index dd546d65a3830d819a48fc1463d55d1cc2110c18..ec29de2500764e11ad839bda165c135f12b6a6e5 100644 (file)
@@ -53,7 +53,11 @@ ENTRY(stext)
  THUMB(1:                      )
 #endif
 
-       setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9 @ ensure svc mode
+#ifdef CONFIG_ARM_VIRT_EXT
+       bl      __hyp_stub_install
+#endif
+       @ ensure svc mode and all interrupts masked
+       safe_svcmode_maskall r9
                                                @ and irqs disabled
 #if defined(CONFIG_CPU_CP15)
        mrc     p15, 0, r9, c0, c0              @ get processor id
@@ -89,7 +93,11 @@ ENTRY(secondary_startup)
         * the processor type - there is no need to check the machine type
         * as it has already been validated by the primary processor.
         */
-       setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9
+#ifdef CONFIG_ARM_VIRT_EXT
+       bl      __hyp_stub_install_secondary
+#endif
+       safe_svcmode_maskall r9
+
 #ifndef CONFIG_CPU_CP15
        ldr     r9, =CONFIG_PROCESSOR_ID
 #else
@@ -177,7 +185,7 @@ M_CLASS(streq       r3, [r12, #PMSAv8_MAIR1])
        bic     r0, r0, #CR_I
 #endif
        mcr     p15, 0, r0, c1, c0, 0           @ write control reg
-       isb
+       instr_sync
 #elif defined (CONFIG_CPU_V7M)
 #ifdef CONFIG_ARM_MPU
        ldreq   r3, [r12, MPU_CTRL]
index 629e25152c0d03e27593e22f9326db7507933c55..1d5fbf1d1c675770dde85c75912b692827375e1c 100644 (file)
@@ -456,14 +456,13 @@ static int get_hbp_len(u8 hbp_len)
 /*
  * Check whether bp virtual address is in kernel space.
  */
-int arch_check_bp_in_kernelspace(struct perf_event *bp)
+int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw)
 {
        unsigned int len;
        unsigned long va;
-       struct arch_hw_breakpoint *info = counter_arch_bp(bp);
 
-       va = info->address;
-       len = get_hbp_len(info->ctrl.len);
+       va = hw->address;
+       len = get_hbp_len(hw->ctrl.len);
 
        return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE);
 }
@@ -518,42 +517,42 @@ int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl,
 /*
  * Construct an arch_hw_breakpoint from a perf_event.
  */
-static int arch_build_bp_info(struct perf_event *bp)
+static int arch_build_bp_info(struct perf_event *bp,
+                             const struct perf_event_attr *attr,
+                             struct arch_hw_breakpoint *hw)
 {
-       struct arch_hw_breakpoint *info = counter_arch_bp(bp);
-
        /* Type */
-       switch (bp->attr.bp_type) {
+       switch (attr->bp_type) {
        case HW_BREAKPOINT_X:
-               info->ctrl.type = ARM_BREAKPOINT_EXECUTE;
+               hw->ctrl.type = ARM_BREAKPOINT_EXECUTE;
                break;
        case HW_BREAKPOINT_R:
-               info->ctrl.type = ARM_BREAKPOINT_LOAD;
+               hw->ctrl.type = ARM_BREAKPOINT_LOAD;
                break;
        case HW_BREAKPOINT_W:
-               info->ctrl.type = ARM_BREAKPOINT_STORE;
+               hw->ctrl.type = ARM_BREAKPOINT_STORE;
                break;
        case HW_BREAKPOINT_RW:
-               info->ctrl.type = ARM_BREAKPOINT_LOAD | ARM_BREAKPOINT_STORE;
+               hw->ctrl.type = ARM_BREAKPOINT_LOAD | ARM_BREAKPOINT_STORE;
                break;
        default:
                return -EINVAL;
        }
 
        /* Len */
-       switch (bp->attr.bp_len) {
+       switch (attr->bp_len) {
        case HW_BREAKPOINT_LEN_1:
-               info->ctrl.len = ARM_BREAKPOINT_LEN_1;
+               hw->ctrl.len = ARM_BREAKPOINT_LEN_1;
                break;
        case HW_BREAKPOINT_LEN_2:
-               info->ctrl.len = ARM_BREAKPOINT_LEN_2;
+               hw->ctrl.len = ARM_BREAKPOINT_LEN_2;
                break;
        case HW_BREAKPOINT_LEN_4:
-               info->ctrl.len = ARM_BREAKPOINT_LEN_4;
+               hw->ctrl.len = ARM_BREAKPOINT_LEN_4;
                break;
        case HW_BREAKPOINT_LEN_8:
-               info->ctrl.len = ARM_BREAKPOINT_LEN_8;
-               if ((info->ctrl.type != ARM_BREAKPOINT_EXECUTE)
+               hw->ctrl.len = ARM_BREAKPOINT_LEN_8;
+               if ((hw->ctrl.type != ARM_BREAKPOINT_EXECUTE)
                        && max_watchpoint_len >= 8)
                        break;
        default:
@@ -566,24 +565,24 @@ static int arch_build_bp_info(struct perf_event *bp)
         * by the hardware and must be aligned to the appropriate number of
         * bytes.
         */
-       if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE &&
-           info->ctrl.len != ARM_BREAKPOINT_LEN_2 &&
-           info->ctrl.len != ARM_BREAKPOINT_LEN_4)
+       if (hw->ctrl.type == ARM_BREAKPOINT_EXECUTE &&
+           hw->ctrl.len != ARM_BREAKPOINT_LEN_2 &&
+           hw->ctrl.len != ARM_BREAKPOINT_LEN_4)
                return -EINVAL;
 
        /* Address */
-       info->address = bp->attr.bp_addr;
+       hw->address = attr->bp_addr;
 
        /* Privilege */
-       info->ctrl.privilege = ARM_BREAKPOINT_USER;
-       if (arch_check_bp_in_kernelspace(bp))
-               info->ctrl.privilege |= ARM_BREAKPOINT_PRIV;
+       hw->ctrl.privilege = ARM_BREAKPOINT_USER;
+       if (arch_check_bp_in_kernelspace(hw))
+               hw->ctrl.privilege |= ARM_BREAKPOINT_PRIV;
 
        /* Enabled? */
-       info->ctrl.enabled = !bp->attr.disabled;
+       hw->ctrl.enabled = !attr->disabled;
 
        /* Mismatch */
-       info->ctrl.mismatch = 0;
+       hw->ctrl.mismatch = 0;
 
        return 0;
 }
@@ -591,9 +590,10 @@ static int arch_build_bp_info(struct perf_event *bp)
 /*
  * Validate the arch-specific HW Breakpoint register settings.
  */
-int arch_validate_hwbkpt_settings(struct perf_event *bp)
+int hw_breakpoint_arch_parse(struct perf_event *bp,
+                            const struct perf_event_attr *attr,
+                            struct arch_hw_breakpoint *hw)
 {
-       struct arch_hw_breakpoint *info = counter_arch_bp(bp);
        int ret = 0;
        u32 offset, alignment_mask = 0x3;
 
@@ -602,14 +602,14 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
                return -ENODEV;
 
        /* Build the arch_hw_breakpoint. */
-       ret = arch_build_bp_info(bp);
+       ret = arch_build_bp_info(bp, attr, hw);
        if (ret)
                goto out;
 
        /* Check address alignment. */
-       if (info->ctrl.len == ARM_BREAKPOINT_LEN_8)
+       if (hw->ctrl.len == ARM_BREAKPOINT_LEN_8)
                alignment_mask = 0x7;
-       offset = info->address & alignment_mask;
+       offset = hw->address & alignment_mask;
        switch (offset) {
        case 0:
                /* Aligned */
@@ -617,19 +617,19 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
        case 1:
        case 2:
                /* Allow halfword watchpoints and breakpoints. */
-               if (info->ctrl.len == ARM_BREAKPOINT_LEN_2)
+               if (hw->ctrl.len == ARM_BREAKPOINT_LEN_2)
                        break;
        case 3:
                /* Allow single byte watchpoint. */
-               if (info->ctrl.len == ARM_BREAKPOINT_LEN_1)
+               if (hw->ctrl.len == ARM_BREAKPOINT_LEN_1)
                        break;
        default:
                ret = -EINVAL;
                goto out;
        }
 
-       info->address &= ~alignment_mask;
-       info->ctrl.len <<= offset;
+       hw->address &= ~alignment_mask;
+       hw->ctrl.len <<= offset;
 
        if (is_default_overflow_handler(bp)) {
                /*
@@ -640,7 +640,7 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
                        return -EINVAL;
 
                /* We don't allow mismatch breakpoints in kernel space. */
-               if (arch_check_bp_in_kernelspace(bp))
+               if (arch_check_bp_in_kernelspace(hw))
                        return -EPERM;
 
                /*
@@ -655,8 +655,8 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
                 * reports them.
                 */
                if (!debug_exception_updates_fsr() &&
-                   (info->ctrl.type == ARM_BREAKPOINT_LOAD ||
-                    info->ctrl.type == ARM_BREAKPOINT_STORE))
+                   (hw->ctrl.type == ARM_BREAKPOINT_LOAD ||
+                    hw->ctrl.type == ARM_BREAKPOINT_STORE))
                        return -EINVAL;
        }
 
index ece04a457486c5998d312bce4f3c69b97e0e7b64..9908dacf9229fbfa694ceebdfb2ed1b534c3f522 100644 (file)
@@ -102,16 +102,6 @@ void __init init_IRQ(void)
        uniphier_cache_init();
 }
 
-#ifdef CONFIG_MULTI_IRQ_HANDLER
-void __init set_handle_irq(void (*handle_irq)(struct pt_regs *))
-{
-       if (handle_arch_irq)
-               return;
-
-       handle_arch_irq = handle_irq;
-}
-#endif
-
 #ifdef CONFIG_SPARSE_IRQ
 int __init arch_probe_nr_irqs(void)
 {
index 225d1c58d2de98d5c4a92de4905052203f1e25b6..d9c2991331111617cc3fce04f38fe17fd1a78f6d 100644 (file)
@@ -338,6 +338,7 @@ static struct vm_area_struct gate_vma = {
 
 static int __init gate_vma_init(void)
 {
+       vma_init(&gate_vma, NULL);
        gate_vma.vm_page_prot = PAGE_READONLY_EXEC;
        return 0;
 }
index 35ca494c028cc841e1077bf59428b11d9d817fbb..4c249cb261f3913112792cd6cad0a7e2df17ff4f 100644 (file)
@@ -1145,7 +1145,7 @@ void __init setup_arch(char **cmdline_p)
 
        reserve_crashkernel();
 
-#ifdef CONFIG_MULTI_IRQ_HANDLER
+#ifdef CONFIG_GENERIC_IRQ_MULTI_HANDLER
        handle_arch_irq = mdesc->handle_irq;
 #endif
 
index f09e9d66d605f4159990ad044cfd29486d621d20..b8f766cf3a905d5c226f8caa2f0cc367b73671f6 100644 (file)
@@ -150,22 +150,18 @@ static int preserve_vfp_context(struct vfp_sigframe __user *frame)
 
 static int restore_vfp_context(char __user **auxp)
 {
-       struct vfp_sigframe __user *frame =
-               (struct vfp_sigframe __user *)*auxp;
-       unsigned long magic;
-       unsigned long size;
-       int err = 0;
-
-       __get_user_error(magic, &frame->magic, err);
-       __get_user_error(size, &frame->size, err);
+       struct vfp_sigframe frame;
+       int err;
 
+       err = __copy_from_user(&frame, *auxp, sizeof(frame));
        if (err)
-               return -EFAULT;
-       if (magic != VFP_MAGIC || size != VFP_STORAGE_SIZE)
+               return err;
+
+       if (frame.magic != VFP_MAGIC || frame.size != VFP_STORAGE_SIZE)
                return -EINVAL;
 
-       *auxp += size;
-       return vfp_restore_user_hwstate(&frame->ufp, &frame->ufp_exc);
+       *auxp += sizeof(frame);
+       return vfp_restore_user_hwstate(&frame.ufp, &frame.ufp_exc);
 }
 
 #endif
@@ -176,6 +172,7 @@ static int restore_vfp_context(char __user **auxp)
 
 static int restore_sigframe(struct pt_regs *regs, struct sigframe __user *sf)
 {
+       struct sigcontext context;
        char __user *aux;
        sigset_t set;
        int err;
@@ -184,23 +181,26 @@ static int restore_sigframe(struct pt_regs *regs, struct sigframe __user *sf)
        if (err == 0)
                set_current_blocked(&set);
 
-       __get_user_error(regs->ARM_r0, &sf->uc.uc_mcontext.arm_r0, err);
-       __get_user_error(regs->ARM_r1, &sf->uc.uc_mcontext.arm_r1, err);
-       __get_user_error(regs->ARM_r2, &sf->uc.uc_mcontext.arm_r2, err);
-       __get_user_error(regs->ARM_r3, &sf->uc.uc_mcontext.arm_r3, err);
-       __get_user_error(regs->ARM_r4, &sf->uc.uc_mcontext.arm_r4, err);
-       __get_user_error(regs->ARM_r5, &sf->uc.uc_mcontext.arm_r5, err);
-       __get_user_error(regs->ARM_r6, &sf->uc.uc_mcontext.arm_r6, err);
-       __get_user_error(regs->ARM_r7, &sf->uc.uc_mcontext.arm_r7, err);
-       __get_user_error(regs->ARM_r8, &sf->uc.uc_mcontext.arm_r8, err);
-       __get_user_error(regs->ARM_r9, &sf->uc.uc_mcontext.arm_r9, err);
-       __get_user_error(regs->ARM_r10, &sf->uc.uc_mcontext.arm_r10, err);
-       __get_user_error(regs->ARM_fp, &sf->uc.uc_mcontext.arm_fp, err);
-       __get_user_error(regs->ARM_ip, &sf->uc.uc_mcontext.arm_ip, err);
-       __get_user_error(regs->ARM_sp, &sf->uc.uc_mcontext.arm_sp, err);
-       __get_user_error(regs->ARM_lr, &sf->uc.uc_mcontext.arm_lr, err);
-       __get_user_error(regs->ARM_pc, &sf->uc.uc_mcontext.arm_pc, err);
-       __get_user_error(regs->ARM_cpsr, &sf->uc.uc_mcontext.arm_cpsr, err);
+       err |= __copy_from_user(&context, &sf->uc.uc_mcontext, sizeof(context));
+       if (err == 0) {
+               regs->ARM_r0 = context.arm_r0;
+               regs->ARM_r1 = context.arm_r1;
+               regs->ARM_r2 = context.arm_r2;
+               regs->ARM_r3 = context.arm_r3;
+               regs->ARM_r4 = context.arm_r4;
+               regs->ARM_r5 = context.arm_r5;
+               regs->ARM_r6 = context.arm_r6;
+               regs->ARM_r7 = context.arm_r7;
+               regs->ARM_r8 = context.arm_r8;
+               regs->ARM_r9 = context.arm_r9;
+               regs->ARM_r10 = context.arm_r10;
+               regs->ARM_fp = context.arm_fp;
+               regs->ARM_ip = context.arm_ip;
+               regs->ARM_sp = context.arm_sp;
+               regs->ARM_lr = context.arm_lr;
+               regs->ARM_pc = context.arm_pc;
+               regs->ARM_cpsr = context.arm_cpsr;
+       }
 
        err |= !valid_user_regs(regs);
 
@@ -544,7 +544,7 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
         * Increment event counter and perform fixup for the pre-signal
         * frame.
         */
-       rseq_signal_deliver(regs);
+       rseq_signal_deliver(ksig, regs);
 
        /*
         * Set up the stack frame
@@ -666,7 +666,7 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
                        } else {
                                clear_thread_flag(TIF_NOTIFY_RESUME);
                                tracehook_notify_resume(regs);
-                               rseq_handle_notify_resume(regs);
+                               rseq_handle_notify_resume(NULL, regs);
                        }
                }
                local_irq_disable();
index 1df21a61e379e1a0fe66e784c7a1212814b100da..f0dd4b6ebb6330e5007e883d2ba2d099a4e1df94 100644 (file)
@@ -329,9 +329,11 @@ asmlinkage long sys_oabi_semtimedop(int semid,
                return -ENOMEM;
        err = 0;
        for (i = 0; i < nsops; i++) {
-               __get_user_error(sops[i].sem_num, &tsops->sem_num, err);
-               __get_user_error(sops[i].sem_op,  &tsops->sem_op,  err);
-               __get_user_error(sops[i].sem_flg, &tsops->sem_flg, err);
+               struct oabi_sembuf osb;
+               err |= __copy_from_user(&osb, tsops, sizeof(osb));
+               sops[i].sem_num = osb.sem_num;
+               sops[i].sem_op = osb.sem_op;
+               sops[i].sem_flg = osb.sem_flg;
                tsops++;
        }
        if (timeout) {
index cf2701cb0de8c67b605a19bc2f21bc1aa34daad2..078b259ead4ef617fd0c36d43174beedd5eae033 100644 (file)
@@ -83,29 +83,18 @@ static void dummy_clock_access(struct timespec64 *ts)
 }
 
 static clock_access_fn __read_persistent_clock = dummy_clock_access;
-static clock_access_fn __read_boot_clock = dummy_clock_access;
 
 void read_persistent_clock64(struct timespec64 *ts)
 {
        __read_persistent_clock(ts);
 }
 
-void read_boot_clock64(struct timespec64 *ts)
-{
-       __read_boot_clock(ts);
-}
-
-int __init register_persistent_clock(clock_access_fn read_boot,
-                                    clock_access_fn read_persistent)
+int __init register_persistent_clock(clock_access_fn read_persistent)
 {
        /* Only allow the clockaccess functions to be registered once */
-       if (__read_persistent_clock == dummy_clock_access &&
-           __read_boot_clock == dummy_clock_access) {
-               if (read_boot)
-                       __read_boot_clock = read_boot;
+       if (__read_persistent_clock == dummy_clock_access) {
                if (read_persistent)
                        __read_persistent_clock = read_persistent;
-
                return 0;
        }
 
index 7a4b060490012dd29f8a6d9fb8e24dfa58896bd1..a826df3d3814bfef44d6e6f71b38c8131d9fe25b 100644 (file)
        .text
 
 ENTRY(arm_copy_from_user)
+#ifdef CONFIG_CPU_SPECTRE
+       get_thread_info r3
+       ldr     r3, [r3, #TI_ADDR_LIMIT]
+       adds    ip, r1, r2      @ ip=addr+size
+       sub     r3, r3, #1      @ addr_limit - 1
+       cmpcc   ip, r3          @ if (addr+size > addr_limit - 1)
+       movcs   r1, #0          @ addr = NULL
+       csdb
+#endif
 
 #include "copy_template.S"
 
index c46a728df44ead2a0b986a1f218e3c61e9820c34..25aac6ee2ab18cdd0189c27bac759efc02f2d1c5 100644 (file)
@@ -20,6 +20,7 @@ config ARCH_BCM_IPROC
        select GPIOLIB
        select ARM_AMBA
        select PINCTRL
+       select PCI_DOMAINS if PCI
        help
          This enables support for systems based on Broadcom IPROC architected SoCs.
          The IPROC complex contains one or more ARM CPUs along with common
index e22fb40e34bc55be6dd807de63fb9cd009107916..6d5beb11bd965a805107328d2522144f4b857f9a 100644 (file)
@@ -774,7 +774,7 @@ static struct gpiod_lookup_table mmc_gpios_table = {
                GPIO_LOOKUP("davinci_gpio.0", DA850_MMCSD_CD_PIN, "cd",
                            GPIO_ACTIVE_LOW),
                GPIO_LOOKUP("davinci_gpio.0", DA850_MMCSD_WP_PIN, "wp",
-                           GPIO_ACTIVE_LOW),
+                           GPIO_ACTIVE_HIGH),
        },
 };
 
index 69df3620eca5ce1720f88ab86cf5a36df891d7e7..1c73694c871ad8289b572056d5c3727f3ee22eb2 100644 (file)
@@ -109,6 +109,45 @@ void omap5_erratum_workaround_801819(void)
 static inline void omap5_erratum_workaround_801819(void) { }
 #endif
 
+#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
+/*
+ * Configure ACR and enable ACTLR[0] (Enable invalidates of BTB with
+ * ICIALLU) to activate the workaround for secondary Core.
+ * NOTE: it is assumed that the primary core's configuration is done
+ * by the boot loader (kernel will detect a misconfiguration and complain
+ * if this is not done).
+ *
+ * In General Purpose(GP) devices, ACR bit settings can only be done
+ * by ROM code in "secure world" using the smc call and there is no
+ * option to update the "firmware" on such devices. This also works for
+ * High security(HS) devices, as a backup option in case the
+ * "update" is not done in the "security firmware".
+ */
+static void omap5_secondary_harden_predictor(void)
+{
+       u32 acr, acr_mask;
+
+       asm volatile ("mrc p15, 0, %0, c1, c0, 1" : "=r" (acr));
+
+       /*
+        * ACTLR[0] (Enable invalidates of BTB with ICIALLU)
+        */
+       acr_mask = BIT(0);
+
+       /* Do we already have it done.. if yes, skip expensive smc */
+       if ((acr & acr_mask) == acr_mask)
+               return;
+
+       acr |= acr_mask;
+       omap_smc1(OMAP5_DRA7_MON_SET_ACR_INDEX, acr);
+
+       pr_debug("%s: ARM ACR setup for CVE_2017_5715 applied on CPU%d\n",
+                __func__, smp_processor_id());
+}
+#else
+static inline void omap5_secondary_harden_predictor(void) { }
+#endif
+
 static void omap4_secondary_init(unsigned int cpu)
 {
        /*
@@ -131,6 +170,8 @@ static void omap4_secondary_init(unsigned int cpu)
                set_cntfreq();
                /* Configure ACR to disable streaming WA for 801819 */
                omap5_erratum_workaround_801819();
+               /* Enable ACR to allow for ICUALLU workaround */
+               omap5_secondary_harden_predictor();
        }
 
        /*
index 9c10248fadccc2d03ef3b3bcbddbe0b43347f158..4e8c2116808ecf3d36d36653184dc89d2941885e 100644 (file)
@@ -185,7 +185,7 @@ static int pxa_irq_suspend(void)
 {
        int i;
 
-       for (i = 0; i < pxa_internal_irq_nr / 32; i++) {
+       for (i = 0; i < DIV_ROUND_UP(pxa_internal_irq_nr, 32); i++) {
                void __iomem *base = irq_base(i);
 
                saved_icmr[i] = __raw_readl(base + ICMR);
@@ -204,7 +204,7 @@ static void pxa_irq_resume(void)
 {
        int i;
 
-       for (i = 0; i < pxa_internal_irq_nr / 32; i++) {
+       for (i = 0; i < DIV_ROUND_UP(pxa_internal_irq_nr, 32); i++) {
                void __iomem *base = irq_base(i);
 
                __raw_writel(saved_icmr[i], base + ICMR);
index 39aef4876ed41346b81ef0d79d4657e8b93fd208..04b2f22c2739abeb414708be209736ea6a571989 100644 (file)
@@ -212,7 +212,7 @@ static DEFINE_MUTEX(ecard_mutex);
  */
 static void ecard_init_pgtables(struct mm_struct *mm)
 {
-       struct vm_area_struct vma;
+       struct vm_area_struct vma = TLB_FLUSH_VMA(mm, VM_EXEC);
 
        /* We want to set up the page tables for the following mapping:
         *  Virtual     Physical
@@ -237,9 +237,6 @@ static void ecard_init_pgtables(struct mm_struct *mm)
 
        memcpy(dst_pgd, src_pgd, sizeof(pgd_t) * (EASI_SIZE / PGDIR_SIZE));
 
-       vma.vm_flags = VM_EXEC;
-       vma.vm_mm = mm;
-
        flush_tlb_range(&vma, IO_START, IO_START + IO_SIZE);
        flush_tlb_range(&vma, EASI_START, EASI_START + EASI_SIZE);
 }
index d0f62eacf59da510388dd206d2673eb1b6aa84e3..4adb901dd5ebdd99f2a2b747c1bc237e123a87de 100644 (file)
@@ -10,6 +10,7 @@ menuconfig ARCH_SOCFPGA
        select HAVE_ARM_SCU
        select HAVE_ARM_TWD if SMP
        select MFD_SYSCON
+       select PCI_DOMAINS if PCI
 
 if ARCH_SOCFPGA
 config SOCFPGA_SUSPEND
index 96a7b6cf459bafa658528c9900434ef71a644bd3..b169e580bf8298026c8ae5791193adef3bbef342 100644 (file)
@@ -702,7 +702,6 @@ config ARM_THUMBEE
 
 config ARM_VIRT_EXT
        bool
-       depends on MMU
        default y if CPU_V7
        help
          Enable the kernel to make use of the ARM Virtualization
index c186474422f3fb25cb809a6d0bff48f476ef8595..0cc8e04295a40dc1d16f308396afdfb7540aa48c 100644 (file)
@@ -736,20 +736,29 @@ static int __mark_rodata_ro(void *unused)
        return 0;
 }
 
+static int kernel_set_to_readonly __read_mostly;
+
 void mark_rodata_ro(void)
 {
+       kernel_set_to_readonly = 1;
        stop_machine(__mark_rodata_ro, NULL, NULL);
        debug_checkwx();
 }
 
 void set_kernel_text_rw(void)
 {
+       if (!kernel_set_to_readonly)
+               return;
+
        set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), false,
                                current->active_mm);
 }
 
 void set_kernel_text_ro(void)
 {
+       if (!kernel_set_to_readonly)
+               return;
+
        set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), true,
                                current->active_mm);
 }
index 5dd6c58d653b2d0a6be9522a4849eefa1f19c6b0..7d67c70bbded38ce20cf4216fbf7ab9579542f76 100644 (file)
@@ -53,7 +53,8 @@ static inline bool security_extensions_enabled(void)
 {
        /* Check CPUID Identification Scheme before ID_PFR1 read */
        if ((read_cpuid_id() & 0x000f0000) == 0x000f0000)
-               return !!cpuid_feature_extract(CPUID_EXT_PFR1, 4);
+               return cpuid_feature_extract(CPUID_EXT_PFR1, 4) ||
+                       cpuid_feature_extract(CPUID_EXT_PFR1, 20);
        return 0;
 }
 
index 8015ad434a4029f2a1fd8f738f26a587e1a42332..24101925fe64ebde9301bdd60bcb3a79cfa89623 100644 (file)
@@ -11,7 +11,7 @@
 void __init tcm_init(void);
 #else
 /* No TCM support, just blank inlines to be optimized out */
-inline void tcm_init(void)
+static inline void tcm_init(void)
 {
 }
 #endif
index 6e8b7161303936908b3b2b7adfced5d17de379ce..f6a62ae44a65b61e162203ad261a7fbb5d4b34cf 100644 (file)
@@ -1844,7 +1844,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
                /* there are 2 passes here */
                bpf_jit_dump(prog->len, image_size, 2, ctx.target);
 
-       set_memory_ro((unsigned long)header, header->pages);
+       bpf_jit_binary_lock_ro(header);
        prog->bpf_func = (void *)ctx.target;
        prog->jited = 1;
        prog->jited_len = image_size;
index 2438b96004c1c36013cb8e55fe2fb4b2eb180663..fcc5bfec8bd1eff7a36eb2b046de4d5d227f24be 100644 (file)
@@ -110,7 +110,7 @@ int __init omap_init_clocksource_32k(void __iomem *vbase)
        }
 
        sched_clock_register(omap_32k_read_sched_clock, 32, 32768);
-       register_persistent_clock(NULL, omap_read_persistent_clock64);
+       register_persistent_clock(omap_read_persistent_clock64);
        pr_info("OMAP clocksource: 32k_counter at 32768 Hz\n");
 
        return 0;
index e90cc8a08186c03af0cd44827d4b77e978436f90..f8bd523d64d15aa5ce3d8ba11730b9b850da230e 100644 (file)
@@ -47,9 +47,6 @@
                           (unsigned long)(addr) +      \
                           (size))
 
-/* Used as a marker in ARM_pc to note when we're in a jprobe. */
-#define JPROBE_MAGIC_ADDR              0xffffffff
-
 DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
 DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
 
@@ -289,8 +286,8 @@ void __kprobes kprobe_handler(struct pt_regs *regs)
                                break;
                        case KPROBE_REENTER:
                                /* A nested probe was hit in FIQ, it is a BUG */
-                               pr_warn("Unrecoverable kprobe detected at %p.\n",
-                                       p->addr);
+                               pr_warn("Unrecoverable kprobe detected.\n");
+                               dump_kprobe(p);
                                /* fall through */
                        default:
                                /* impossible cases */
@@ -303,10 +300,10 @@ void __kprobes kprobe_handler(struct pt_regs *regs)
 
                        /*
                         * If we have no pre-handler or it returned 0, we
-                        * continue with normal processing.  If we have a
-                        * pre-handler and it returned non-zero, it prepped
-                        * for calling the break_handler below on re-entry,
-                        * so get out doing nothing more here.
+                        * continue with normal processing. If we have a
+                        * pre-handler and it returned non-zero, it will
+                        * modify the execution path and no need to single
+                        * stepping. Let's just reset current kprobe and exit.
                         */
                        if (!p->pre_handler || !p->pre_handler(p, regs)) {
                                kcb->kprobe_status = KPROBE_HIT_SS;
@@ -315,20 +312,9 @@ void __kprobes kprobe_handler(struct pt_regs *regs)
                                        kcb->kprobe_status = KPROBE_HIT_SSDONE;
                                        p->post_handler(p, regs, 0);
                                }
-                               reset_current_kprobe();
-                       }
-               }
-       } else if (cur) {
-               /* We probably hit a jprobe.  Call its break handler. */
-               if (cur->break_handler && cur->break_handler(cur, regs)) {
-                       kcb->kprobe_status = KPROBE_HIT_SS;
-                       singlestep(cur, regs, kcb);
-                       if (cur->post_handler) {
-                               kcb->kprobe_status = KPROBE_HIT_SSDONE;
-                               cur->post_handler(cur, regs, 0);
                        }
+                       reset_current_kprobe();
                }
-               reset_current_kprobe();
        } else {
                /*
                 * The probe was removed and a race is in progress.
@@ -521,117 +507,6 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
        regs->ARM_lr = (unsigned long)&kretprobe_trampoline;
 }
 
-int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
-{
-       struct jprobe *jp = container_of(p, struct jprobe, kp);
-       struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
-       long sp_addr = regs->ARM_sp;
-       long cpsr;
-
-       kcb->jprobe_saved_regs = *regs;
-       memcpy(kcb->jprobes_stack, (void *)sp_addr, MIN_STACK_SIZE(sp_addr));
-       regs->ARM_pc = (long)jp->entry;
-
-       cpsr = regs->ARM_cpsr | PSR_I_BIT;
-#ifdef CONFIG_THUMB2_KERNEL
-       /* Set correct Thumb state in cpsr */
-       if (regs->ARM_pc & 1)
-               cpsr |= PSR_T_BIT;
-       else
-               cpsr &= ~PSR_T_BIT;
-#endif
-       regs->ARM_cpsr = cpsr;
-
-       preempt_disable();
-       return 1;
-}
-
-void __kprobes jprobe_return(void)
-{
-       struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
-
-       __asm__ __volatile__ (
-               /*
-                * Setup an empty pt_regs. Fill SP and PC fields as
-                * they're needed by longjmp_break_handler.
-                *
-                * We allocate some slack between the original SP and start of
-                * our fabricated regs. To be precise we want to have worst case
-                * covered which is STMFD with all 16 regs so we allocate 2 *
-                * sizeof(struct_pt_regs)).
-                *
-                * This is to prevent any simulated instruction from writing
-                * over the regs when they are accessing the stack.
-                */
-#ifdef CONFIG_THUMB2_KERNEL
-               "sub    r0, %0, %1              \n\t"
-               "mov    sp, r0                  \n\t"
-#else
-               "sub    sp, %0, %1              \n\t"
-#endif
-               "ldr    r0, ="__stringify(JPROBE_MAGIC_ADDR)"\n\t"
-               "str    %0, [sp, %2]            \n\t"
-               "str    r0, [sp, %3]            \n\t"
-               "mov    r0, sp                  \n\t"
-               "bl     kprobe_handler          \n\t"
-
-               /*
-                * Return to the context saved by setjmp_pre_handler
-                * and restored by longjmp_break_handler.
-                */
-#ifdef CONFIG_THUMB2_KERNEL
-               "ldr    lr, [sp, %2]            \n\t" /* lr = saved sp */
-               "ldrd   r0, r1, [sp, %5]        \n\t" /* r0,r1 = saved lr,pc */
-               "ldr    r2, [sp, %4]            \n\t" /* r2 = saved psr */
-               "stmdb  lr!, {r0, r1, r2}       \n\t" /* push saved lr and */
-                                                     /* rfe context */
-               "ldmia  sp, {r0 - r12}          \n\t"
-               "mov    sp, lr                  \n\t"
-               "ldr    lr, [sp], #4            \n\t"
-               "rfeia  sp!                     \n\t"
-#else
-               "ldr    r0, [sp, %4]            \n\t"
-               "msr    cpsr_cxsf, r0           \n\t"
-               "ldmia  sp, {r0 - pc}           \n\t"
-#endif
-               :
-               : "r" (kcb->jprobe_saved_regs.ARM_sp),
-                 "I" (sizeof(struct pt_regs) * 2),
-                 "J" (offsetof(struct pt_regs, ARM_sp)),
-                 "J" (offsetof(struct pt_regs, ARM_pc)),
-                 "J" (offsetof(struct pt_regs, ARM_cpsr)),
-                 "J" (offsetof(struct pt_regs, ARM_lr))
-               : "memory", "cc");
-}
-
-int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
-{
-       struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
-       long stack_addr = kcb->jprobe_saved_regs.ARM_sp;
-       long orig_sp = regs->ARM_sp;
-       struct jprobe *jp = container_of(p, struct jprobe, kp);
-
-       if (regs->ARM_pc == JPROBE_MAGIC_ADDR) {
-               if (orig_sp != stack_addr) {
-                       struct pt_regs *saved_regs =
-                               (struct pt_regs *)kcb->jprobe_saved_regs.ARM_sp;
-                       printk("current sp %lx does not match saved sp %lx\n",
-                              orig_sp, stack_addr);
-                       printk("Saved registers for jprobe %p\n", jp);
-                       show_regs(saved_regs);
-                       printk("Current registers\n");
-                       show_regs(regs);
-                       BUG();
-               }
-               *regs = kcb->jprobe_saved_regs;
-               memcpy((void *)stack_addr, kcb->jprobes_stack,
-                      MIN_STACK_SIZE(stack_addr));
-               preempt_enable_no_resched();
-               return 1;
-       }
-       return 0;
-}
-
 int __kprobes arch_trampoline_kprobe(struct kprobe *p)
 {
        return 0;
index 14db14152909c9942978c370b3d8230bc5ae4508..cc237fa9b90fbd176faf9d6f04a5a74b42a9a234 100644 (file)
@@ -1461,7 +1461,6 @@ fail:
        print_registers(&result_regs);
 
        if (mem) {
-               pr_err("current_stack=%p\n", current_stack);
                pr_err("expected_memory:\n");
                print_memory(expected_memory, mem_size);
                pr_err("result_memory:\n");
index a81404c09d5d11b8d8d8dc6d8047eaec25316641..94516c40ebd35c8e9200b6dd9de69429efc0b8a3 100644 (file)
@@ -8,8 +8,5 @@
 # asflags-y := -DDEBUG
 
 KBUILD_AFLAGS  :=$(KBUILD_AFLAGS:-msoft-float=-Wa,-mfpu=softvfp+vfp -mfloat-abi=soft)
-LDFLAGS                +=--no-warn-mismatch
 
-obj-y                  += vfp.o
-
-vfp-$(CONFIG_VFP)      += vfpmodule.o entry.o vfphw.o vfpsingle.o vfpdouble.o
+obj-y          += vfpmodule.o entry.o vfphw.o vfpsingle.o vfpdouble.o
index 35d0f823e8239f99f29596bc9805c9ab0bae467f..dc7e6b50ef674839a21480c69e12bec144d19194 100644 (file)
@@ -596,13 +596,11 @@ int vfp_preserve_user_clear_hwstate(struct user_vfp __user *ufp,
 }
 
 /* Sanitise and restore the current VFP state from the provided structures. */
-int vfp_restore_user_hwstate(struct user_vfp __user *ufp,
-                            struct user_vfp_exc __user *ufp_exc)
+int vfp_restore_user_hwstate(struct user_vfp *ufp, struct user_vfp_exc *ufp_exc)
 {
        struct thread_info *thread = current_thread_info();
        struct vfp_hard_struct *hwstate = &thread->vfpstate.hard;
        unsigned long fpexc;
-       int err = 0;
 
        /* Disable VFP to avoid corrupting the new thread state. */
        vfp_flush_hwstate(thread);
@@ -611,17 +609,16 @@ int vfp_restore_user_hwstate(struct user_vfp __user *ufp,
         * Copy the floating point registers. There can be unused
         * registers see asm/hwcap.h for details.
         */
-       err |= __copy_from_user(&hwstate->fpregs, &ufp->fpregs,
-                               sizeof(hwstate->fpregs));
+       memcpy(&hwstate->fpregs, &ufp->fpregs, sizeof(hwstate->fpregs));
        /*
         * Copy the status and control register.
         */
-       __get_user_error(hwstate->fpscr, &ufp->fpscr, err);
+       hwstate->fpscr = ufp->fpscr;
 
        /*
         * Sanitise and restore the exception registers.
         */
-       __get_user_error(fpexc, &ufp_exc->fpexc, err);
+       fpexc = ufp_exc->fpexc;
 
        /* Ensure the VFP is enabled. */
        fpexc |= FPEXC_EN;
@@ -630,10 +627,10 @@ int vfp_restore_user_hwstate(struct user_vfp __user *ufp,
        fpexc &= ~(FPEXC_EX | FPEXC_FP2V);
        hwstate->fpexc = fpexc;
 
-       __get_user_error(hwstate->fpinst, &ufp_exc->fpinst, err);
-       __get_user_error(hwstate->fpinst2, &ufp_exc->fpinst2, err);
+       hwstate->fpinst = ufp_exc->fpinst;
+       hwstate->fpinst2 = ufp_exc->fpinst2;
 
-       return err ? -EFAULT : 0;
+       return 0;
 }
 
 /*
index 8073625371f5d22defae1efe6322a717201e8010..07060e5b58641cc008f41aa927c2e6043ab6afbb 100644 (file)
@@ -59,6 +59,9 @@ struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS] __initdata;
 
 static __read_mostly unsigned int xen_events_irq;
 
+uint32_t xen_start_flags;
+EXPORT_SYMBOL(xen_start_flags);
+
 int xen_remap_domain_gfn_array(struct vm_area_struct *vma,
                               unsigned long addr,
                               xen_pfn_t *gfn, int nr,
@@ -293,9 +296,7 @@ void __init xen_early_init(void)
        xen_setup_features();
 
        if (xen_feature(XENFEAT_dom0))
-               xen_start_info->flags |= SIF_INITDOMAIN|SIF_PRIVILEGED;
-       else
-               xen_start_info->flags &= ~(SIF_INITDOMAIN|SIF_PRIVILEGED);
+               xen_start_flags |= SIF_INITDOMAIN|SIF_PRIVILEGED;
 
        if (!console_set_on_cmdline && !xen_initial_domain())
                add_preferred_console("hvc", 0, NULL);
index 42c090cf02927283ccb2dc597e30205a11050ecb..3d1011957823108d5b988b983fe8225423cc7060 100644 (file)
@@ -74,6 +74,7 @@ config ARM64
        select GENERIC_CPU_AUTOPROBE
        select GENERIC_EARLY_IOREMAP
        select GENERIC_IDLE_POLL_SETUP
+       select GENERIC_IRQ_MULTI_HANDLER
        select GENERIC_IRQ_PROBE
        select GENERIC_IRQ_SHOW
        select GENERIC_IRQ_SHOW_LEVEL
@@ -264,9 +265,6 @@ config ARCH_SUPPORTS_UPROBES
 config ARCH_PROC_KCORE_TEXT
        def_bool y
 
-config MULTI_IRQ_HANDLER
-       def_bool y
-
 source "init/Kconfig"
 
 source "kernel/Kconfig.freezer"
index 45272266dafb64a1fda433e7f557bf11b89e908e..e7101b19d5902775bf0a2f951a866f1abcf614b1 100644 (file)
@@ -10,7 +10,7 @@
 #
 # Copyright (C) 1995-2001 by Russell King
 
-LDFLAGS_vmlinux        :=-p --no-undefined -X
+LDFLAGS_vmlinux        :=--no-undefined -X
 CPPFLAGS_vmlinux.lds = -DTEXT_OFFSET=$(TEXT_OFFSET)
 GZFLAGS                :=-9
 
@@ -60,15 +60,15 @@ ifeq ($(CONFIG_CPU_BIG_ENDIAN), y)
 KBUILD_CPPFLAGS        += -mbig-endian
 CHECKFLAGS     += -D__AARCH64EB__
 AS             += -EB
-LD             += -EB
-LDFLAGS                += -maarch64linuxb
+# We must use the linux target here, since distributions don't tend to package
+# the ELF linker scripts with binutils, and this results in a build failure.
+LDFLAGS                += -EB -maarch64linuxb
 UTS_MACHINE    := aarch64_be
 else
 KBUILD_CPPFLAGS        += -mlittle-endian
 CHECKFLAGS     += -D__AARCH64EL__
 AS             += -EL
-LD             += -EL
-LDFLAGS                += -maarch64linux
+LDFLAGS                += -EL -maarch64linux # See comment above
 UTS_MACHINE    := aarch64
 endif
 
index e6b059378dc04784927a9b996f24213685bf406a..67dac595dc72ebdeffcd5b6bffd50d115cce8cbc 100644 (file)
                        interrupts = <0 99 4>;
                        resets = <&rst SPIM0_RESET>;
                        reg-io-width = <4>;
-                       num-chipselect = <4>;
-                       bus-num = <0>;
+                       num-cs = <4>;
                        status = "disabled";
                };
 
                        interrupts = <0 100 4>;
                        resets = <&rst SPIM1_RESET>;
                        reg-io-width = <4>;
-                       num-chipselect = <4>;
-                       bus-num = <0>;
+                       num-cs = <4>;
                        status = "disabled";
                };
 
index 4b3331fbfe39d7b81d9466fb718975b6265c8e5e..dff9b15eb3c0b63a70c65070c465305c35985dee 100644 (file)
 
 &ethmac {
        status = "okay";
-       phy-mode = "rgmii";
        pinctrl-0 = <&eth_rgmii_y_pins>;
        pinctrl-names = "default";
+       phy-handle = <&eth_phy0>;
+       phy-mode = "rgmii";
+
+       mdio {
+               compatible = "snps,dwmac-mdio";
+               #address-cells = <1>;
+               #size-cells = <0>;
+
+               eth_phy0: ethernet-phy@0 {
+                       /* Realtek RTL8211F (0x001cc916) */
+                       reg = <0>;
+                       eee-broken-1000t;
+               };
+       };
 };
 
 &uart_A {
index fee87737a201f1121fe7a3ad3cd70c1d20415a0d..67d7115e4effbde75173aa4a4c07ae890b3183c5 100644 (file)
 
                        sd_emmc_b: sd@5000 {
                                compatible = "amlogic,meson-axg-mmc";
-                               reg = <0x0 0x5000 0x0 0x2000>;
+                               reg = <0x0 0x5000 0x0 0x800>;
                                interrupts = <GIC_SPI 217 IRQ_TYPE_EDGE_RISING>;
                                status = "disabled";
                                clocks = <&clkc CLKID_SD_EMMC_B>,
 
                        sd_emmc_c: mmc@7000 {
                                compatible = "amlogic,meson-axg-mmc";
-                               reg = <0x0 0x7000 0x0 0x2000>;
+                               reg = <0x0 0x7000 0x0 0x800>;
                                interrupts = <GIC_SPI 218 IRQ_TYPE_EDGE_RISING>;
                                status = "disabled";
                                clocks = <&clkc CLKID_SD_EMMC_C>,
index 3c31e21cbed7fcdde5bbdf030fcd6c194be5033d..b8dc4dbb391b669fc13eb13b1a24f01d24ab252f 100644 (file)
                        no-map;
                };
 
+               /* Alternate 3 MiB reserved for ARM Trusted Firmware (BL31) */
+               secmon_reserved_alt: secmon@5000000 {
+                       reg = <0x0 0x05000000 0x0 0x300000>;
+                       no-map;
+               };
+
                linux,cma {
                        compatible = "shared-dma-pool";
                        reusable;
 
                        sd_emmc_a: mmc@70000 {
                                compatible = "amlogic,meson-gx-mmc", "amlogic,meson-gxbb-mmc";
-                               reg = <0x0 0x70000 0x0 0x2000>;
+                               reg = <0x0 0x70000 0x0 0x800>;
                                interrupts = <GIC_SPI 216 IRQ_TYPE_EDGE_RISING>;
                                status = "disabled";
                        };
 
                        sd_emmc_b: mmc@72000 {
                                compatible = "amlogic,meson-gx-mmc", "amlogic,meson-gxbb-mmc";
-                               reg = <0x0 0x72000 0x0 0x2000>;
+                               reg = <0x0 0x72000 0x0 0x800>;
                                interrupts = <GIC_SPI 217 IRQ_TYPE_EDGE_RISING>;
                                status = "disabled";
                        };
 
                        sd_emmc_c: mmc@74000 {
                                compatible = "amlogic,meson-gx-mmc", "amlogic,meson-gxbb-mmc";
-                               reg = <0x0 0x74000 0x0 0x2000>;
+                               reg = <0x0 0x74000 0x0 0x800>;
                                interrupts = <GIC_SPI 218 IRQ_TYPE_EDGE_RISING>;
                                status = "disabled";
                        };
index eb327664a4d8c38c196b7cec2dbbe5e5ac2c147f..6aaafff674f97f56625c2da8ea6a5b7dd10eb2d8 100644 (file)
@@ -6,7 +6,7 @@
 
 &apb {
        mali: gpu@c0000 {
-               compatible = "amlogic,meson-gxbb-mali", "arm,mali-450";
+               compatible = "amlogic,meson-gxl-mali", "arm,mali-450";
                reg = <0x0 0xc0000 0x0 0x40000>;
                interrupts = <GIC_SPI 160 IRQ_TYPE_LEVEL_HIGH>,
                             <GIC_SPI 161 IRQ_TYPE_LEVEL_HIGH>,
index 3e3eb31748a35a7790a9dc90e56971f004660298..f63bceb88caafa249d84de963c3daa034fb842b7 100644 (file)
 
        bus-width = <4>;
        cap-sd-highspeed;
-       sd-uhs-sdr12;
-       sd-uhs-sdr25;
-       sd-uhs-sdr50;
        max-frequency = <100000000>;
        disable-wp;
 
index 0cfd701809dec578ac31f5f68a7fcfbc21822619..a1b31013ab6e3494d810619fadf81752a67b94f4 100644 (file)
 &usb0 {
        status = "okay";
 };
+
+&usb2_phy0 {
+       /*
+        * HDMI_5V is also used as supply for the USB VBUS.
+        */
+       phy-supply = <&hdmi_5v>;
+};
index 27538eea547b19a0fe8c14a97de4aa303ba63978..c87a80e9bcc6a80bc0f8a59c43a32d6485facafe 100644 (file)
 / {
        compatible = "amlogic,meson-gxl";
 
-       reserved-memory {
-               /* Alternate 3 MiB reserved for ARM Trusted Firmware (BL31) */
-               secmon_reserved_alt: secmon@5000000 {
-                       reg = <0x0 0x05000000 0x0 0x300000>;
-                       no-map;
-               };
-       };
-
        soc {
                usb0: usb@c9000000 {
                        status = "disabled";
index 4a2a6af8e752dbbe3a17fa02861fb3603d7c44cb..4057197048dcbbacaee733c6067cc677fd1ad54d 100644 (file)
 
                #interrupt-cells = <1>;
                interrupt-map-mask = <0 0 0 0>;
-               interrupt-map = <0 0 0 0 &gic 0 GIC_SPI 281 IRQ_TYPE_NONE>;
+               interrupt-map = <0 0 0 0 &gic 0 GIC_SPI 281 IRQ_TYPE_LEVEL_HIGH>;
 
                linux,pci-domain = <0>;
 
 
                #interrupt-cells = <1>;
                interrupt-map-mask = <0 0 0 0>;
-               interrupt-map = <0 0 0 0 &gic 0 GIC_SPI 305 IRQ_TYPE_NONE>;
+               interrupt-map = <0 0 0 0 &gic 0 GIC_SPI 305 IRQ_TYPE_LEVEL_HIGH>;
 
                linux,pci-domain = <4>;
 
                        reg = <0x66080000 0x100>;
                        #address-cells = <1>;
                        #size-cells = <0>;
-                       interrupts = <GIC_SPI 394 IRQ_TYPE_NONE>;
+                       interrupts = <GIC_SPI 394 IRQ_TYPE_LEVEL_HIGH>;
                        clock-frequency = <100000>;
                        status = "disabled";
                };
                        reg = <0x660b0000 0x100>;
                        #address-cells = <1>;
                        #size-cells = <0>;
-                       interrupts = <GIC_SPI 395 IRQ_TYPE_NONE>;
+                       interrupts = <GIC_SPI 395 IRQ_TYPE_LEVEL_HIGH>;
                        clock-frequency = <100000>;
                        status = "disabled";
                };
index eb6f08cdbd796c3d764393f9e2e70db2129b0e28..77efa28c4dd53db718b22e64569385f6d92c2feb 100644 (file)
        enet-phy-lane-swap;
 };
 
+&sdio0 {
+       mmc-ddr-1_8v;
+};
+
 &uart2 {
        status = "okay";
 };
index 5084b037320fd9cb65133ca929517062a245af3b..55ba495ef56e1f54b518483bc9e5369fcb03b441 100644 (file)
@@ -42,3 +42,7 @@
 &gphy0 {
        enet-phy-lane-swap;
 };
+
+&sdio0 {
+       mmc-ddr-1_8v;
+};
index 99aaff0b6d72b6bc971863411b80caa3dd165048..b203152ad67ca18b4421bb035b2d13d32d7f9be5 100644 (file)
                        reg = <0x000b0000 0x100>;
                        #address-cells = <1>;
                        #size-cells = <0>;
-                       interrupts = <GIC_SPI 177 IRQ_TYPE_NONE>;
+                       interrupts = <GIC_SPI 177 IRQ_TYPE_LEVEL_HIGH>;
                        clock-frequency = <100000>;
                        status = "disabled";
                };
                        reg = <0x000e0000 0x100>;
                        #address-cells = <1>;
                        #size-cells = <0>;
-                       interrupts = <GIC_SPI 178 IRQ_TYPE_NONE>;
+                       interrupts = <GIC_SPI 178 IRQ_TYPE_LEVEL_HIGH>;
                        clock-frequency = <100000>;
                        status = "disabled";
                };
index c6999624ed8abdcf4a7f8cea12635eb0606e6432..68c5a6c819aef2c3fbe8b59aac695cfb0c3a3a77 100644 (file)
        vmmc-supply = <&wlan_en>;
        ti,non-removable;
        non-removable;
+       cap-power-off-card;
+       keep-power-in-suspend;
        #address-cells = <0x1>;
        #size-cells = <0x0>;
        status = "ok";
index edb4ee0b8896b2c9a5572e1160e273eac42e062d..7f12624f6c8e8c6af0a3900f3e7d703a969c6c0e 100644 (file)
                dwmmc_2: dwmmc2@f723f000 {
                        bus-width = <0x4>;
                        non-removable;
+                       cap-power-off-card;
+                       keep-power-in-suspend;
                        vmmc-supply = <&reg_vdd_3v3>;
                        mmc-pwrseq = <&wl1835_pwrseq>;
 
index 7dabe25f6774827fd08ec78b3f3793e5b5658177..1c6ff8197a88b1f890fed5b592b9358986942145 100644 (file)
 
                CP110_LABEL(icu): interrupt-controller@1e0000 {
                        compatible = "marvell,cp110-icu";
-                       reg = <0x1e0000 0x10>;
+                       reg = <0x1e0000 0x440>;
                        #interrupt-cells = <3>;
                        interrupt-controller;
                        msi-parent = <&gicp>;
index 0f829db33efe2dfa2735a7cdf570de77c49a6356..4d5ef01f43a331c456eddf1a324f1e1d450bcea5 100644 (file)
@@ -75,7 +75,7 @@
 
                serial@75b1000 {
                        label = "LS-UART0";
-                       status = "okay";
+                       status = "disabled";
                        pinctrl-names = "default", "sleep";
                        pinctrl-0 = <&blsp2_uart2_4pins_default>;
                        pinctrl-1 = <&blsp2_uart2_4pins_sleep>;
index 650f356f69ca748f0fbef0c52f4026e43f511e46..c2625d15a8c08f535e6f00f5c8228212c4d7ec5f 100644 (file)
 
                                port@0 {
                                        reg = <0>;
-                                       etf_out: endpoint {
+                                       etf_in: endpoint {
                                                slave-mode;
                                                remote-endpoint = <&funnel0_out>;
                                        };
                                };
                                port@1 {
                                        reg = <0>;
-                                       etf_in: endpoint {
+                                       etf_out: endpoint {
                                                remote-endpoint = <&replicator_in>;
                                        };
                                };
index 9b4dc41703e38036283aa2a4eededd3322e7a428..ae3b5adf32dfe4a31125880e3a8fa49877c83923 100644 (file)
@@ -54,7 +54,7 @@
        sound {
                compatible = "audio-graph-card";
                label = "UniPhier LD11";
-               widgets = "Headphone", "Headphone Jack";
+               widgets = "Headphone", "Headphones";
                dais = <&i2s_port2
                        &i2s_port3
                        &i2s_port4
index fe6608ea327772e3ad0125c020f8d0102dda35bb..7919233c9ce27e3c86dc8dffce13a97e00112c64 100644 (file)
@@ -54,7 +54,7 @@
        sound {
                compatible = "audio-graph-card";
                label = "UniPhier LD20";
-               widgets = "Headphone", "Headphone Jack";
+               widgets = "Headphone", "Headphones";
                dais = <&i2s_port2
                        &i2s_port3
                        &i2s_port4
index 3cfa8ca267384615694e693ed0371df694fea1f4..f9a186f6af8a9206de939bbdf3f6013988b8b994 100644 (file)
@@ -47,6 +47,7 @@ CONFIG_ARCH_MVEBU=y
 CONFIG_ARCH_QCOM=y
 CONFIG_ARCH_ROCKCHIP=y
 CONFIG_ARCH_SEATTLE=y
+CONFIG_ARCH_SYNQUACER=y
 CONFIG_ARCH_RENESAS=y
 CONFIG_ARCH_R8A7795=y
 CONFIG_ARCH_R8A7796=y
@@ -58,7 +59,6 @@ CONFIG_ARCH_R8A77995=y
 CONFIG_ARCH_STRATIX10=y
 CONFIG_ARCH_TEGRA=y
 CONFIG_ARCH_SPRD=y
-CONFIG_ARCH_SYNQUACER=y
 CONFIG_ARCH_THUNDER=y
 CONFIG_ARCH_THUNDER2=y
 CONFIG_ARCH_UNIPHIER=y
@@ -67,25 +67,23 @@ CONFIG_ARCH_XGENE=y
 CONFIG_ARCH_ZX=y
 CONFIG_ARCH_ZYNQMP=y
 CONFIG_PCI=y
-CONFIG_HOTPLUG_PCI_PCIE=y
 CONFIG_PCI_IOV=y
 CONFIG_HOTPLUG_PCI=y
 CONFIG_HOTPLUG_PCI_ACPI=y
-CONFIG_PCI_LAYERSCAPE=y
-CONFIG_PCI_HISI=y
-CONFIG_PCIE_QCOM=y
-CONFIG_PCIE_KIRIN=y
-CONFIG_PCIE_ARMADA_8K=y
-CONFIG_PCIE_HISI_STB=y
 CONFIG_PCI_AARDVARK=y
 CONFIG_PCI_TEGRA=y
 CONFIG_PCIE_RCAR=y
-CONFIG_PCIE_ROCKCHIP=y
-CONFIG_PCIE_ROCKCHIP_HOST=m
 CONFIG_PCI_HOST_GENERIC=y
 CONFIG_PCI_XGENE=y
 CONFIG_PCI_HOST_THUNDER_PEM=y
 CONFIG_PCI_HOST_THUNDER_ECAM=y
+CONFIG_PCIE_ROCKCHIP_HOST=m
+CONFIG_PCI_LAYERSCAPE=y
+CONFIG_PCI_HISI=y
+CONFIG_PCIE_QCOM=y
+CONFIG_PCIE_ARMADA_8K=y
+CONFIG_PCIE_KIRIN=y
+CONFIG_PCIE_HISI_STB=y
 CONFIG_ARM64_VA_BITS_48=y
 CONFIG_SCHED_MC=y
 CONFIG_NUMA=y
@@ -104,8 +102,6 @@ CONFIG_HIBERNATION=y
 CONFIG_WQ_POWER_EFFICIENT_DEFAULT=y
 CONFIG_ARM_CPUIDLE=y
 CONFIG_CPU_FREQ=y
-CONFIG_CPU_FREQ_GOV_ATTR_SET=y
-CONFIG_CPU_FREQ_GOV_COMMON=y
 CONFIG_CPU_FREQ_STAT=y
 CONFIG_CPU_FREQ_GOV_POWERSAVE=m
 CONFIG_CPU_FREQ_GOV_USERSPACE=y
@@ -113,11 +109,11 @@ CONFIG_CPU_FREQ_GOV_ONDEMAND=y
 CONFIG_CPU_FREQ_GOV_CONSERVATIVE=m
 CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y
 CONFIG_CPUFREQ_DT=y
+CONFIG_ACPI_CPPC_CPUFREQ=m
 CONFIG_ARM_ARMADA_37XX_CPUFREQ=y
 CONFIG_ARM_BIG_LITTLE_CPUFREQ=y
 CONFIG_ARM_SCPI_CPUFREQ=y
 CONFIG_ARM_TEGRA186_CPUFREQ=y
-CONFIG_ACPI_CPPC_CPUFREQ=m
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
@@ -236,11 +232,6 @@ CONFIG_SMSC911X=y
 CONFIG_SNI_AVE=y
 CONFIG_SNI_NETSEC=y
 CONFIG_STMMAC_ETH=m
-CONFIG_DWMAC_IPQ806X=m
-CONFIG_DWMAC_MESON=m
-CONFIG_DWMAC_ROCKCHIP=m
-CONFIG_DWMAC_SUNXI=m
-CONFIG_DWMAC_SUN8I=m
 CONFIG_MDIO_BUS_MUX_MMIOREG=y
 CONFIG_AT803X_PHY=m
 CONFIG_MARVELL_PHY=m
@@ -269,8 +260,8 @@ CONFIG_WL18XX=m
 CONFIG_WLCORE_SDIO=m
 CONFIG_INPUT_EVDEV=y
 CONFIG_KEYBOARD_ADC=m
-CONFIG_KEYBOARD_CROS_EC=y
 CONFIG_KEYBOARD_GPIO=y
+CONFIG_KEYBOARD_CROS_EC=y
 CONFIG_INPUT_TOUCHSCREEN=y
 CONFIG_TOUCHSCREEN_ATMEL_MXT=m
 CONFIG_INPUT_MISC=y
@@ -296,17 +287,13 @@ CONFIG_SERIAL_SAMSUNG=y
 CONFIG_SERIAL_SAMSUNG_CONSOLE=y
 CONFIG_SERIAL_TEGRA=y
 CONFIG_SERIAL_SH_SCI=y
-CONFIG_SERIAL_SH_SCI_NR_UARTS=11
-CONFIG_SERIAL_SH_SCI_CONSOLE=y
 CONFIG_SERIAL_MSM=y
 CONFIG_SERIAL_MSM_CONSOLE=y
 CONFIG_SERIAL_XILINX_PS_UART=y
 CONFIG_SERIAL_XILINX_PS_UART_CONSOLE=y
 CONFIG_SERIAL_MVEBU_UART=y
 CONFIG_SERIAL_DEV_BUS=y
-CONFIG_SERIAL_DEV_CTRL_TTYPORT=y
 CONFIG_VIRTIO_CONSOLE=y
-CONFIG_I2C_HID=m
 CONFIG_I2C_CHARDEV=y
 CONFIG_I2C_MUX=y
 CONFIG_I2C_MUX_PCA954x=y
@@ -325,26 +312,26 @@ CONFIG_I2C_RCAR=y
 CONFIG_I2C_CROS_EC_TUNNEL=y
 CONFIG_SPI=y
 CONFIG_SPI_ARMADA_3700=y
-CONFIG_SPI_MESON_SPICC=m
-CONFIG_SPI_MESON_SPIFC=m
 CONFIG_SPI_BCM2835=m
 CONFIG_SPI_BCM2835AUX=m
+CONFIG_SPI_MESON_SPICC=m
+CONFIG_SPI_MESON_SPIFC=m
 CONFIG_SPI_ORION=y
 CONFIG_SPI_PL022=y
-CONFIG_SPI_QUP=y
 CONFIG_SPI_ROCKCHIP=y
+CONFIG_SPI_QUP=y
 CONFIG_SPI_S3C64XX=y
 CONFIG_SPI_SPIDEV=m
 CONFIG_SPMI=y
-CONFIG_PINCTRL_IPQ8074=y
 CONFIG_PINCTRL_SINGLE=y
 CONFIG_PINCTRL_MAX77620=y
+CONFIG_PINCTRL_IPQ8074=y
 CONFIG_PINCTRL_MSM8916=y
 CONFIG_PINCTRL_MSM8994=y
 CONFIG_PINCTRL_MSM8996=y
-CONFIG_PINCTRL_MT7622=y
 CONFIG_PINCTRL_QDF2XXX=y
 CONFIG_PINCTRL_QCOM_SPMI_PMIC=y
+CONFIG_PINCTRL_MT7622=y
 CONFIG_GPIO_DWAPB=y
 CONFIG_GPIO_MB86S7X=y
 CONFIG_GPIO_PL061=y
@@ -368,13 +355,13 @@ CONFIG_SENSORS_INA2XX=m
 CONFIG_THERMAL_GOV_POWER_ALLOCATOR=y
 CONFIG_CPU_THERMAL=y
 CONFIG_THERMAL_EMULATION=y
+CONFIG_ROCKCHIP_THERMAL=m
+CONFIG_RCAR_GEN3_THERMAL=y
 CONFIG_ARMADA_THERMAL=y
 CONFIG_BRCMSTB_THERMAL=m
 CONFIG_EXYNOS_THERMAL=y
-CONFIG_RCAR_GEN3_THERMAL=y
-CONFIG_QCOM_TSENS=y
-CONFIG_ROCKCHIP_THERMAL=m
 CONFIG_TEGRA_BPMP_THERMAL=m
+CONFIG_QCOM_TSENS=y
 CONFIG_UNIPHIER_THERMAL=y
 CONFIG_WATCHDOG=y
 CONFIG_S3C2410_WATCHDOG=y
@@ -395,9 +382,9 @@ CONFIG_MFD_MAX77620=y
 CONFIG_MFD_SPMI_PMIC=y
 CONFIG_MFD_RK808=y
 CONFIG_MFD_SEC_CORE=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
 CONFIG_REGULATOR_AXP20X=y
 CONFIG_REGULATOR_FAN53555=y
-CONFIG_REGULATOR_FIXED_VOLTAGE=y
 CONFIG_REGULATOR_GPIO=y
 CONFIG_REGULATOR_HI6421V530=y
 CONFIG_REGULATOR_HI655X=y
@@ -407,16 +394,15 @@ CONFIG_REGULATOR_QCOM_SMD_RPM=y
 CONFIG_REGULATOR_QCOM_SPMI=y
 CONFIG_REGULATOR_RK808=y
 CONFIG_REGULATOR_S2MPS11=y
+CONFIG_RC_CORE=m
+CONFIG_RC_DECODERS=y
+CONFIG_RC_DEVICES=y
+CONFIG_IR_MESON=m
 CONFIG_MEDIA_SUPPORT=m
 CONFIG_MEDIA_CAMERA_SUPPORT=y
 CONFIG_MEDIA_ANALOG_TV_SUPPORT=y
 CONFIG_MEDIA_DIGITAL_TV_SUPPORT=y
 CONFIG_MEDIA_CONTROLLER=y
-CONFIG_MEDIA_RC_SUPPORT=y
-CONFIG_RC_CORE=m
-CONFIG_RC_DEVICES=y
-CONFIG_RC_DECODERS=y
-CONFIG_IR_MESON=m
 CONFIG_VIDEO_V4L2_SUBDEV_API=y
 # CONFIG_DVB_NET is not set
 CONFIG_V4L_MEM2MEM_DRIVERS=y
@@ -441,8 +427,7 @@ CONFIG_ROCKCHIP_DW_HDMI=y
 CONFIG_ROCKCHIP_DW_MIPI_DSI=y
 CONFIG_ROCKCHIP_INNO_HDMI=y
 CONFIG_DRM_RCAR_DU=m
-CONFIG_DRM_RCAR_LVDS=y
-CONFIG_DRM_RCAR_VSP=y
+CONFIG_DRM_RCAR_LVDS=m
 CONFIG_DRM_TEGRA=m
 CONFIG_DRM_PANEL_SIMPLE=m
 CONFIG_DRM_I2C_ADV7511=m
@@ -455,7 +440,6 @@ CONFIG_FB_ARMCLCD=y
 CONFIG_BACKLIGHT_GENERIC=m
 CONFIG_BACKLIGHT_PWM=m
 CONFIG_BACKLIGHT_LP855X=m
-CONFIG_FRAMEBUFFER_CONSOLE=y
 CONFIG_LOGO=y
 # CONFIG_LOGO_LINUX_MONO is not set
 # CONFIG_LOGO_LINUX_VGA16 is not set
@@ -468,6 +452,7 @@ CONFIG_SND_SOC_RCAR=m
 CONFIG_SND_SOC_AK4613=m
 CONFIG_SND_SIMPLE_CARD=m
 CONFIG_SND_AUDIO_GRAPH_CARD=m
+CONFIG_I2C_HID=m
 CONFIG_USB=y
 CONFIG_USB_OTG=y
 CONFIG_USB_XHCI_HCD=y
@@ -501,12 +486,12 @@ CONFIG_MMC_BLOCK_MINORS=32
 CONFIG_MMC_ARMMMCI=y
 CONFIG_MMC_SDHCI=y
 CONFIG_MMC_SDHCI_ACPI=y
-CONFIG_MMC_SDHCI_F_SDH30=y
 CONFIG_MMC_SDHCI_PLTFM=y
 CONFIG_MMC_SDHCI_OF_ARASAN=y
 CONFIG_MMC_SDHCI_OF_ESDHC=y
 CONFIG_MMC_SDHCI_CADENCE=y
 CONFIG_MMC_SDHCI_TEGRA=y
+CONFIG_MMC_SDHCI_F_SDH30=y
 CONFIG_MMC_MESON_GX=y
 CONFIG_MMC_SDHCI_MSM=y
 CONFIG_MMC_SPI=y
@@ -524,11 +509,11 @@ CONFIG_LEDS_CLASS=y
 CONFIG_LEDS_GPIO=y
 CONFIG_LEDS_PWM=y
 CONFIG_LEDS_SYSCON=y
+CONFIG_LEDS_TRIGGER_DISK=y
 CONFIG_LEDS_TRIGGER_HEARTBEAT=y
 CONFIG_LEDS_TRIGGER_CPU=y
 CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
 CONFIG_LEDS_TRIGGER_PANIC=y
-CONFIG_LEDS_TRIGGER_DISK=y
 CONFIG_EDAC=y
 CONFIG_EDAC_GHES=y
 CONFIG_RTC_CLASS=y
@@ -537,13 +522,13 @@ CONFIG_RTC_DRV_RK808=m
 CONFIG_RTC_DRV_S5M=y
 CONFIG_RTC_DRV_DS3232=y
 CONFIG_RTC_DRV_EFI=y
+CONFIG_RTC_DRV_CROS_EC=y
 CONFIG_RTC_DRV_S3C=y
 CONFIG_RTC_DRV_PL031=y
 CONFIG_RTC_DRV_SUN6I=y
 CONFIG_RTC_DRV_ARMADA38X=y
 CONFIG_RTC_DRV_TEGRA=y
 CONFIG_RTC_DRV_XGENE=y
-CONFIG_RTC_DRV_CROS_EC=y
 CONFIG_DMADEVICES=y
 CONFIG_DMA_BCM2835=m
 CONFIG_K3_DMA=y
@@ -579,7 +564,6 @@ CONFIG_HWSPINLOCK_QCOM=y
 CONFIG_ARM_MHU=y
 CONFIG_PLATFORM_MHU=y
 CONFIG_BCM2835_MBOX=y
-CONFIG_HI6220_MBOX=y
 CONFIG_QCOM_APCS_IPC=y
 CONFIG_ROCKCHIP_IOMMU=y
 CONFIG_TEGRA_IOMMU_SMMU=y
@@ -602,7 +586,6 @@ CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y
 CONFIG_EXTCON_USB_GPIO=y
 CONFIG_EXTCON_USBC_CROS_EC=y
 CONFIG_MEMORY=y
-CONFIG_TEGRA_MC=y
 CONFIG_IIO=y
 CONFIG_EXYNOS_ADC=y
 CONFIG_ROCKCHIP_SARADC=m
@@ -618,27 +601,27 @@ CONFIG_PWM_RCAR=m
 CONFIG_PWM_ROCKCHIP=y
 CONFIG_PWM_SAMSUNG=y
 CONFIG_PWM_TEGRA=m
+CONFIG_PHY_XGENE=y
+CONFIG_PHY_SUN4I_USB=y
+CONFIG_PHY_HI6220_USB=y
 CONFIG_PHY_HISTB_COMBPHY=y
 CONFIG_PHY_HISI_INNO_USB2=y
-CONFIG_PHY_RCAR_GEN3_USB2=y
-CONFIG_PHY_RCAR_GEN3_USB3=m
-CONFIG_PHY_HI6220_USB=y
-CONFIG_PHY_QCOM_USB_HS=y
-CONFIG_PHY_SUN4I_USB=y
 CONFIG_PHY_MVEBU_CP110_COMPHY=y
 CONFIG_PHY_QCOM_QMP=m
-CONFIG_PHY_ROCKCHIP_INNO_USB2=y
+CONFIG_PHY_QCOM_USB_HS=y
+CONFIG_PHY_RCAR_GEN3_USB2=y
+CONFIG_PHY_RCAR_GEN3_USB3=m
 CONFIG_PHY_ROCKCHIP_EMMC=y
+CONFIG_PHY_ROCKCHIP_INNO_USB2=y
 CONFIG_PHY_ROCKCHIP_PCIE=m
 CONFIG_PHY_ROCKCHIP_TYPEC=y
-CONFIG_PHY_XGENE=y
 CONFIG_PHY_TEGRA_XUSB=y
 CONFIG_QCOM_L2_PMU=y
 CONFIG_QCOM_L3_PMU=y
-CONFIG_MESON_EFUSE=m
 CONFIG_QCOM_QFPROM=y
 CONFIG_ROCKCHIP_EFUSE=y
 CONFIG_UNIPHIER_EFUSE=y
+CONFIG_MESON_EFUSE=m
 CONFIG_TEE=y
 CONFIG_OPTEE=y
 CONFIG_ARM_SCPI_PROTOCOL=y
@@ -647,7 +630,6 @@ CONFIG_EFI_CAPSULE_LOADER=y
 CONFIG_ACPI=y
 CONFIG_ACPI_APEI=y
 CONFIG_ACPI_APEI_GHES=y
-CONFIG_ACPI_APEI_PCIEAER=y
 CONFIG_ACPI_APEI_MEMORY_FAILURE=y
 CONFIG_ACPI_APEI_EINJ=y
 CONFIG_EXT2_FS=y
@@ -682,7 +664,6 @@ CONFIG_DEBUG_INFO=y
 CONFIG_DEBUG_FS=y
 CONFIG_MAGIC_SYSRQ=y
 CONFIG_DEBUG_KERNEL=y
-CONFIG_LOCKUP_DETECTOR=y
 # CONFIG_SCHED_DEBUG is not set
 # CONFIG_DEBUG_PREEMPT is not set
 # CONFIG_FTRACE is not set
@@ -691,20 +672,15 @@ CONFIG_SECURITY=y
 CONFIG_CRYPTO_ECHAINIV=y
 CONFIG_CRYPTO_ANSI_CPRNG=y
 CONFIG_ARM64_CRYPTO=y
-CONFIG_CRYPTO_SHA256_ARM64=m
-CONFIG_CRYPTO_SHA512_ARM64=m
 CONFIG_CRYPTO_SHA1_ARM64_CE=y
 CONFIG_CRYPTO_SHA2_ARM64_CE=y
+CONFIG_CRYPTO_SHA512_ARM64_CE=m
+CONFIG_CRYPTO_SHA3_ARM64=m
+CONFIG_CRYPTO_SM3_ARM64_CE=m
 CONFIG_CRYPTO_GHASH_ARM64_CE=y
 CONFIG_CRYPTO_CRCT10DIF_ARM64_CE=m
 CONFIG_CRYPTO_CRC32_ARM64_CE=m
-CONFIG_CRYPTO_AES_ARM64=m
-CONFIG_CRYPTO_AES_ARM64_CE=m
 CONFIG_CRYPTO_AES_ARM64_CE_CCM=y
 CONFIG_CRYPTO_AES_ARM64_CE_BLK=y
-CONFIG_CRYPTO_AES_ARM64_NEON_BLK=m
 CONFIG_CRYPTO_CHACHA20_NEON=m
 CONFIG_CRYPTO_AES_ARM64_BS=m
-CONFIG_CRYPTO_SHA512_ARM64_CE=m
-CONFIG_CRYPTO_SHA3_ARM64=m
-CONFIG_CRYPTO_SM3_ARM64_CE=m
index 88f5aef7934c77a5213fdc85a7f8fe435fc76e70..e3a375c4cb83c383242ac6b9cc8b3247939e0947 100644 (file)
         *                           u32 *macp, u8 const rk[], u32 rounds);
         */
 ENTRY(ce_aes_ccm_auth_data)
-       frame_push      7
-
-       mov     x19, x0
-       mov     x20, x1
-       mov     x21, x2
-       mov     x22, x3
-       mov     x23, x4
-       mov     x24, x5
-
-       ldr     w25, [x22]                      /* leftover from prev round? */
+       ldr     w8, [x3]                        /* leftover from prev round? */
        ld1     {v0.16b}, [x0]                  /* load mac */
-       cbz     w25, 1f
-       sub     w25, w25, #16
+       cbz     w8, 1f
+       sub     w8, w8, #16
        eor     v1.16b, v1.16b, v1.16b
-0:     ldrb    w7, [x20], #1                   /* get 1 byte of input */
-       subs    w21, w21, #1
-       add     w25, w25, #1
+0:     ldrb    w7, [x1], #1                    /* get 1 byte of input */
+       subs    w2, w2, #1
+       add     w8, w8, #1
        ins     v1.b[0], w7
        ext     v1.16b, v1.16b, v1.16b, #1      /* rotate in the input bytes */
        beq     8f                              /* out of input? */
-       cbnz    w25, 0b
+       cbnz    w8, 0b
        eor     v0.16b, v0.16b, v1.16b
-1:     ld1     {v3.4s}, [x23]                  /* load first round key */
-       prfm    pldl1strm, [x20]
-       cmp     w24, #12                        /* which key size? */
-       add     x6, x23, #16
-       sub     w7, w24, #2                     /* modified # of rounds */
+1:     ld1     {v3.4s}, [x4]                   /* load first round key */
+       prfm    pldl1strm, [x1]
+       cmp     w5, #12                         /* which key size? */
+       add     x6, x4, #16
+       sub     w7, w5, #2                      /* modified # of rounds */
        bmi     2f
        bne     5f
        mov     v5.16b, v3.16b
@@ -64,43 +55,33 @@ ENTRY(ce_aes_ccm_auth_data)
        ld1     {v5.4s}, [x6], #16              /* load next round key */
        bpl     3b
        aese    v0.16b, v4.16b
-       subs    w21, w21, #16                   /* last data? */
+       subs    w2, w2, #16                     /* last data? */
        eor     v0.16b, v0.16b, v5.16b          /* final round */
        bmi     6f
-       ld1     {v1.16b}, [x20], #16            /* load next input block */
+       ld1     {v1.16b}, [x1], #16             /* load next input block */
        eor     v0.16b, v0.16b, v1.16b          /* xor with mac */
-       beq     6f
-
-       if_will_cond_yield_neon
-       st1     {v0.16b}, [x19]                 /* store mac */
-       do_cond_yield_neon
-       ld1     {v0.16b}, [x19]                 /* reload mac */
-       endif_yield_neon
-
-       b       1b
-6:     st1     {v0.16b}, [x19]                 /* store mac */
+       bne     1b
+6:     st1     {v0.16b}, [x0]                  /* store mac */
        beq     10f
-       adds    w21, w21, #16
+       adds    w2, w2, #16
        beq     10f
-       mov     w25, w21
-7:     ldrb    w7, [x20], #1
+       mov     w8, w2
+7:     ldrb    w7, [x1], #1
        umov    w6, v0.b[0]
        eor     w6, w6, w7
-       strb    w6, [x19], #1
-       subs    w21, w21, #1
+       strb    w6, [x0], #1
+       subs    w2, w2, #1
        beq     10f
        ext     v0.16b, v0.16b, v0.16b, #1      /* rotate out the mac bytes */
        b       7b
-8:     mov     w7, w25
-       add     w25, w25, #16
+8:     mov     w7, w8
+       add     w8, w8, #16
 9:     ext     v1.16b, v1.16b, v1.16b, #1
        adds    w7, w7, #1
        bne     9b
        eor     v0.16b, v0.16b, v1.16b
-       st1     {v0.16b}, [x19]
-10:    str     w25, [x22]
-
-       frame_pop
+       st1     {v0.16b}, [x0]
+10:    str     w8, [x3]
        ret
 ENDPROC(ce_aes_ccm_auth_data)
 
@@ -145,29 +126,19 @@ ENTRY(ce_aes_ccm_final)
 ENDPROC(ce_aes_ccm_final)
 
        .macro  aes_ccm_do_crypt,enc
-       frame_push      8
-
-       mov     x19, x0
-       mov     x20, x1
-       mov     x21, x2
-       mov     x22, x3
-       mov     x23, x4
-       mov     x24, x5
-       mov     x25, x6
-
-       ldr     x26, [x25, #8]                  /* load lower ctr */
-       ld1     {v0.16b}, [x24]                 /* load mac */
-CPU_LE(        rev     x26, x26                )       /* keep swabbed ctr in reg */
+       ldr     x8, [x6, #8]                    /* load lower ctr */
+       ld1     {v0.16b}, [x5]                  /* load mac */
+CPU_LE(        rev     x8, x8                  )       /* keep swabbed ctr in reg */
 0:     /* outer loop */
-       ld1     {v1.8b}, [x25]                  /* load upper ctr */
-       prfm    pldl1strm, [x20]
-       add     x26, x26, #1
-       rev     x9, x26
-       cmp     w23, #12                        /* which key size? */
-       sub     w7, w23, #2                     /* get modified # of rounds */
+       ld1     {v1.8b}, [x6]                   /* load upper ctr */
+       prfm    pldl1strm, [x1]
+       add     x8, x8, #1
+       rev     x9, x8
+       cmp     w4, #12                         /* which key size? */
+       sub     w7, w4, #2                      /* get modified # of rounds */
        ins     v1.d[1], x9                     /* no carry in lower ctr */
-       ld1     {v3.4s}, [x22]                  /* load first round key */
-       add     x10, x22, #16
+       ld1     {v3.4s}, [x3]                   /* load first round key */
+       add     x10, x3, #16
        bmi     1f
        bne     4f
        mov     v5.16b, v3.16b
@@ -194,9 +165,9 @@ CPU_LE(     rev     x26, x26                )       /* keep swabbed ctr in reg */
        bpl     2b
        aese    v0.16b, v4.16b
        aese    v1.16b, v4.16b
-       subs    w21, w21, #16
-       bmi     7f                              /* partial block? */
-       ld1     {v2.16b}, [x20], #16            /* load next input block */
+       subs    w2, w2, #16
+       bmi     6f                              /* partial block? */
+       ld1     {v2.16b}, [x1], #16             /* load next input block */
        .if     \enc == 1
        eor     v2.16b, v2.16b, v5.16b          /* final round enc+mac */
        eor     v1.16b, v1.16b, v2.16b          /* xor with crypted ctr */
@@ -205,29 +176,18 @@ CPU_LE(   rev     x26, x26                )       /* keep swabbed ctr in reg */
        eor     v1.16b, v2.16b, v5.16b          /* final round enc */
        .endif
        eor     v0.16b, v0.16b, v2.16b          /* xor mac with pt ^ rk[last] */
-       st1     {v1.16b}, [x19], #16            /* write output block */
-       beq     5f
-
-       if_will_cond_yield_neon
-       st1     {v0.16b}, [x24]                 /* store mac */
-       do_cond_yield_neon
-       ld1     {v0.16b}, [x24]                 /* reload mac */
-       endif_yield_neon
-
-       b       0b
-5:
-CPU_LE(        rev     x26, x26                        )
-       st1     {v0.16b}, [x24]                 /* store mac */
-       str     x26, [x25, #8]                  /* store lsb end of ctr (BE) */
-
-6:     frame_pop
-       ret
-
-7:     eor     v0.16b, v0.16b, v5.16b          /* final round mac */
+       st1     {v1.16b}, [x0], #16             /* write output block */
+       bne     0b
+CPU_LE(        rev     x8, x8                  )
+       st1     {v0.16b}, [x5]                  /* store mac */
+       str     x8, [x6, #8]                    /* store lsb end of ctr (BE) */
+5:     ret
+
+6:     eor     v0.16b, v0.16b, v5.16b          /* final round mac */
        eor     v1.16b, v1.16b, v5.16b          /* final round enc */
-       st1     {v0.16b}, [x24]                 /* store mac */
-       add     w21, w21, #16                   /* process partial tail block */
-8:     ldrb    w9, [x20], #1                   /* get 1 byte of input */
+       st1     {v0.16b}, [x5]                  /* store mac */
+       add     w2, w2, #16                     /* process partial tail block */
+7:     ldrb    w9, [x1], #1                    /* get 1 byte of input */
        umov    w6, v1.b[0]                     /* get top crypted ctr byte */
        umov    w7, v0.b[0]                     /* get top mac byte */
        .if     \enc == 1
@@ -237,13 +197,13 @@ CPU_LE(   rev     x26, x26                        )
        eor     w9, w9, w6
        eor     w7, w7, w9
        .endif
-       strb    w9, [x19], #1                   /* store out byte */
-       strb    w7, [x24], #1                   /* store mac byte */
-       subs    w21, w21, #1
-       beq     6b
+       strb    w9, [x0], #1                    /* store out byte */
+       strb    w7, [x5], #1                    /* store mac byte */
+       subs    w2, w2, #1
+       beq     5b
        ext     v0.16b, v0.16b, v0.16b, #1      /* shift out mac byte */
        ext     v1.16b, v1.16b, v1.16b, #1      /* shift out ctr byte */
-       b       8b
+       b       7b
        .endm
 
        /*
index 253188fb8cb0cea0e35d0f4ed77b5e2c6332d507..e3e50950a863675b72a3c1e0d605d81cf5f258f2 100644 (file)
@@ -223,8 +223,8 @@ static int ctr_encrypt(struct skcipher_request *req)
                kernel_neon_begin();
                aes_ctr_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
                                (u8 *)ctx->key_enc, rounds, blocks, walk.iv);
-               err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
                kernel_neon_end();
+               err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
        }
        if (walk.nbytes) {
                u8 __aligned(8) tail[AES_BLOCK_SIZE];
index dcffb9e77589cd843bb04691a46d117013f12ba1..c723647b37db0387f58d3ea88f899147fdbc2727 100644 (file)
@@ -322,55 +322,41 @@ ENDPROC(pmull_ghash_update_p8)
        .endm
 
        .macro          pmull_gcm_do_crypt, enc
-       frame_push      10
+       ld1             {SHASH.2d}, [x4]
+       ld1             {XL.2d}, [x1]
+       ldr             x8, [x5, #8]                    // load lower counter
 
-       mov             x19, x0
-       mov             x20, x1
-       mov             x21, x2
-       mov             x22, x3
-       mov             x23, x4
-       mov             x24, x5
-       mov             x25, x6
-       mov             x26, x7
-       .if             \enc == 1
-       ldr             x27, [sp, #96]                  // first stacked arg
-       .endif
-
-       ldr             x28, [x24, #8]                  // load lower counter
-CPU_LE(        rev             x28, x28        )
-
-0:     mov             x0, x25
-       load_round_keys w26, x0
-       ld1             {SHASH.2d}, [x23]
-       ld1             {XL.2d}, [x20]
+       load_round_keys w7, x6
 
        movi            MASK.16b, #0xe1
        ext             SHASH2.16b, SHASH.16b, SHASH.16b, #8
+CPU_LE(        rev             x8, x8          )
        shl             MASK.2d, MASK.2d, #57
        eor             SHASH2.16b, SHASH2.16b, SHASH.16b
 
        .if             \enc == 1
-       ld1             {KS.16b}, [x27]
+       ldr             x10, [sp]
+       ld1             {KS.16b}, [x10]
        .endif
 
-1:     ld1             {CTR.8b}, [x24]                 // load upper counter
-       ld1             {INP.16b}, [x22], #16
-       rev             x9, x28
-       add             x28, x28, #1
-       sub             w19, w19, #1
+0:     ld1             {CTR.8b}, [x5]                  // load upper counter
+       ld1             {INP.16b}, [x3], #16
+       rev             x9, x8
+       add             x8, x8, #1
+       sub             w0, w0, #1
        ins             CTR.d[1], x9                    // set lower counter
 
        .if             \enc == 1
        eor             INP.16b, INP.16b, KS.16b        // encrypt input
-       st1             {INP.16b}, [x21], #16
+       st1             {INP.16b}, [x2], #16
        .endif
 
        rev64           T1.16b, INP.16b
 
-       cmp             w26, #12
-       b.ge            4f                              // AES-192/256?
+       cmp             w7, #12
+       b.ge            2f                              // AES-192/256?
 
-2:     enc_round       CTR, v21
+1:     enc_round       CTR, v21
 
        ext             T2.16b, XL.16b, XL.16b, #8
        ext             IN1.16b, T1.16b, T1.16b, #8
@@ -425,39 +411,27 @@ CPU_LE(   rev             x28, x28        )
 
        .if             \enc == 0
        eor             INP.16b, INP.16b, KS.16b
-       st1             {INP.16b}, [x21], #16
+       st1             {INP.16b}, [x2], #16
        .endif
 
-       cbz             w19, 3f
+       cbnz            w0, 0b
 
-       if_will_cond_yield_neon
-       st1             {XL.2d}, [x20]
-       .if             \enc == 1
-       st1             {KS.16b}, [x27]
-       .endif
-       do_cond_yield_neon
-       b               0b
-       endif_yield_neon
+CPU_LE(        rev             x8, x8          )
+       st1             {XL.2d}, [x1]
+       str             x8, [x5, #8]                    // store lower counter
 
-       b               1b
-
-3:     st1             {XL.2d}, [x20]
        .if             \enc == 1
-       st1             {KS.16b}, [x27]
+       st1             {KS.16b}, [x10]
        .endif
 
-CPU_LE(        rev             x28, x28        )
-       str             x28, [x24, #8]                  // store lower counter
-
-       frame_pop
        ret
 
-4:     b.eq            5f                              // AES-192?
+2:     b.eq            3f                              // AES-192?
        enc_round       CTR, v17
        enc_round       CTR, v18
-5:     enc_round       CTR, v19
+3:     enc_round       CTR, v19
        enc_round       CTR, v20
-       b               2b
+       b               1b
        .endm
 
        /*
index 7cf0b1aa6ea80bf6f87d45b85a349fb7a9e677e5..8a10f1d7199aed6ab88ded8d29ad5513df26a2cb 100644 (file)
@@ -488,9 +488,13 @@ static int gcm_decrypt(struct aead_request *req)
                        err = skcipher_walk_done(&walk,
                                                 walk.nbytes % AES_BLOCK_SIZE);
                }
-               if (walk.nbytes)
-                       pmull_gcm_encrypt_block(iv, iv, NULL,
+               if (walk.nbytes) {
+                       kernel_neon_begin();
+                       pmull_gcm_encrypt_block(iv, iv, ctx->aes_key.key_enc,
                                                num_rounds(&ctx->aes_key));
+                       kernel_neon_end();
+               }
+
        } else {
                __aes_arm64_encrypt(ctx->aes_key.key_enc, tag, iv,
                                    num_rounds(&ctx->aes_key));
index a91933b1e2e62ba235ef05ddf8f9d34dbb6bcf49..4b650ec1d7dd1aa8d4418b6b896f81de4a2187ab 100644 (file)
@@ -28,7 +28,12 @@ typedef void (*alternative_cb_t)(struct alt_instr *alt,
                                 __le32 *origptr, __le32 *updptr, int nr_inst);
 
 void __init apply_alternatives_all(void);
-void apply_alternatives(void *start, size_t length);
+
+#ifdef CONFIG_MODULES
+void apply_alternatives_module(void *start, size_t length);
+#else
+static inline void apply_alternatives_module(void *start, size_t length) { }
+#endif
 
 #define ALTINSTR_ENTRY(feature,cb)                                           \
        " .word 661b - .\n"                             /* label           */ \
index c0235e0ff8493fc516109d1e2e5cbaa647fd27dc..9bca54dda75c60e9fa24c13ae8bda4b8c9f38f1e 100644 (file)
 
 #include <asm/cmpxchg.h>
 
-#define ___atomic_add_unless(v, a, u, sfx)                             \
-({                                                                     \
-       typeof((v)->counter) c, old;                                    \
-                                                                       \
-       c = atomic##sfx##_read(v);                                      \
-       while (c != (u) &&                                              \
-             (old = atomic##sfx##_cmpxchg((v), c, c + (a))) != c)      \
-               c = old;                                                \
-       c;                                                              \
- })
-
 #define ATOMIC_INIT(i) { (i) }
 
 #define atomic_read(v)                 READ_ONCE((v)->counter)
 #define atomic_add_return_release      atomic_add_return_release
 #define atomic_add_return              atomic_add_return
 
-#define atomic_inc_return_relaxed(v)   atomic_add_return_relaxed(1, (v))
-#define atomic_inc_return_acquire(v)   atomic_add_return_acquire(1, (v))
-#define atomic_inc_return_release(v)   atomic_add_return_release(1, (v))
-#define atomic_inc_return(v)           atomic_add_return(1, (v))
-
 #define atomic_sub_return_relaxed      atomic_sub_return_relaxed
 #define atomic_sub_return_acquire      atomic_sub_return_acquire
 #define atomic_sub_return_release      atomic_sub_return_release
 #define atomic_sub_return              atomic_sub_return
 
-#define atomic_dec_return_relaxed(v)   atomic_sub_return_relaxed(1, (v))
-#define atomic_dec_return_acquire(v)   atomic_sub_return_acquire(1, (v))
-#define atomic_dec_return_release(v)   atomic_sub_return_release(1, (v))
-#define atomic_dec_return(v)           atomic_sub_return(1, (v))
-
 #define atomic_fetch_add_relaxed       atomic_fetch_add_relaxed
 #define atomic_fetch_add_acquire       atomic_fetch_add_acquire
 #define atomic_fetch_add_release       atomic_fetch_add_release
        cmpxchg_release(&((v)->counter), (old), (new))
 #define atomic_cmpxchg(v, old, new)    cmpxchg(&((v)->counter), (old), (new))
 
-#define atomic_inc(v)                  atomic_add(1, (v))
-#define atomic_dec(v)                  atomic_sub(1, (v))
-#define atomic_inc_and_test(v)         (atomic_inc_return(v) == 0)
-#define atomic_dec_and_test(v)         (atomic_dec_return(v) == 0)
-#define atomic_sub_and_test(i, v)      (atomic_sub_return((i), (v)) == 0)
-#define atomic_add_negative(i, v)      (atomic_add_return((i), (v)) < 0)
-#define __atomic_add_unless(v, a, u)   ___atomic_add_unless(v, a, u,)
 #define atomic_andnot                  atomic_andnot
 
 /*
 #define atomic64_add_return_release    atomic64_add_return_release
 #define atomic64_add_return            atomic64_add_return
 
-#define atomic64_inc_return_relaxed(v) atomic64_add_return_relaxed(1, (v))
-#define atomic64_inc_return_acquire(v) atomic64_add_return_acquire(1, (v))
-#define atomic64_inc_return_release(v) atomic64_add_return_release(1, (v))
-#define atomic64_inc_return(v)         atomic64_add_return(1, (v))
-
 #define atomic64_sub_return_relaxed    atomic64_sub_return_relaxed
 #define atomic64_sub_return_acquire    atomic64_sub_return_acquire
 #define atomic64_sub_return_release    atomic64_sub_return_release
 #define atomic64_sub_return            atomic64_sub_return
 
-#define atomic64_dec_return_relaxed(v) atomic64_sub_return_relaxed(1, (v))
-#define atomic64_dec_return_acquire(v) atomic64_sub_return_acquire(1, (v))
-#define atomic64_dec_return_release(v) atomic64_sub_return_release(1, (v))
-#define atomic64_dec_return(v)         atomic64_sub_return(1, (v))
-
 #define atomic64_fetch_add_relaxed     atomic64_fetch_add_relaxed
 #define atomic64_fetch_add_acquire     atomic64_fetch_add_acquire
 #define atomic64_fetch_add_release     atomic64_fetch_add_release
 #define atomic64_cmpxchg_release       atomic_cmpxchg_release
 #define atomic64_cmpxchg               atomic_cmpxchg
 
-#define atomic64_inc(v)                        atomic64_add(1, (v))
-#define atomic64_dec(v)                        atomic64_sub(1, (v))
-#define atomic64_inc_and_test(v)       (atomic64_inc_return(v) == 0)
-#define atomic64_dec_and_test(v)       (atomic64_dec_return(v) == 0)
-#define atomic64_sub_and_test(i, v)    (atomic64_sub_return((i), (v)) == 0)
-#define atomic64_add_negative(i, v)    (atomic64_add_return((i), (v)) < 0)
-#define atomic64_add_unless(v, a, u)   (___atomic_add_unless(v, a, u, 64) != u)
 #define atomic64_andnot                        atomic64_andnot
 
-#define atomic64_inc_not_zero(v)       atomic64_add_unless((v), 1, 0)
+#define atomic64_dec_if_positive       atomic64_dec_if_positive
 
 #endif
 #endif
index 9c19594ce7cb9932ede27894fb81b3467a43aa58..10d536b1af74c6f8a19d3742978e470064b86334 100644 (file)
 #define __ASM_BITOPS_H
 
 #include <linux/compiler.h>
-#include <asm/barrier.h>
 
 #ifndef _LINUX_BITOPS_H
 #error only <linux/bitops.h> can be included directly
 #endif
 
-/*
- * Little endian assembly atomic bitops.
- */
-extern void set_bit(int nr, volatile unsigned long *p);
-extern void clear_bit(int nr, volatile unsigned long *p);
-extern void change_bit(int nr, volatile unsigned long *p);
-extern int test_and_set_bit(int nr, volatile unsigned long *p);
-extern int test_and_clear_bit(int nr, volatile unsigned long *p);
-extern int test_and_change_bit(int nr, volatile unsigned long *p);
-
 #include <asm-generic/bitops/builtin-__ffs.h>
 #include <asm-generic/bitops/builtin-ffs.h>
 #include <asm-generic/bitops/builtin-__fls.h>
@@ -44,15 +33,11 @@ extern int test_and_change_bit(int nr, volatile unsigned long *p);
 
 #include <asm-generic/bitops/sched.h>
 #include <asm-generic/bitops/hweight.h>
-#include <asm-generic/bitops/lock.h>
 
+#include <asm-generic/bitops/atomic.h>
+#include <asm-generic/bitops/lock.h>
 #include <asm-generic/bitops/non-atomic.h>
 #include <asm-generic/bitops/le.h>
-
-/*
- * Ext2 is defined to use little-endian byte ordering.
- */
-#define ext2_set_bit_atomic(lock, nr, p)       test_and_set_bit_le(nr, p)
-#define ext2_clear_bit_atomic(lock, nr, p)     test_and_clear_bit_le(nr, p)
+#include <asm-generic/bitops/ext2-atomic-setbit.h>
 
 #endif /* __ASM_BITOPS_H */
index 192d791f11036747704fedbc0839b37a4c1e49fc..7ed320895d1f463d1e95cd9ec6328a49eed765ae 100644 (file)
@@ -87,6 +87,9 @@ static inline unsigned long efi_get_max_initrd_addr(unsigned long dram_base,
 #define efi_call_runtime(f, ...)       sys_table_arg->runtime->f(__VA_ARGS__)
 #define efi_is_64bit()                 (true)
 
+#define efi_table_attr(table, attr, instance)                          \
+       ((table##_t *)instance)->attr
+
 #define efi_call_proto(protocol, f, instance, ...)                     \
        ((protocol##_t *)instance)->f(instance, ##__VA_ARGS__)
 
index 41770766d9648f1dd03568f5d2adc86f95f77e3a..6a53e59ced95484ba12e3140ee5dfd0786bb3132 100644 (file)
@@ -119,13 +119,16 @@ static inline void decode_ctrl_reg(u32 reg,
 
 struct task_struct;
 struct notifier_block;
+struct perf_event_attr;
 struct perf_event;
 struct pmu;
 
 extern int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl,
                                  int *gen_len, int *gen_type, int *offset);
-extern int arch_check_bp_in_kernelspace(struct perf_event *bp);
-extern int arch_validate_hwbkpt_settings(struct perf_event *bp);
+extern int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw);
+extern int hw_breakpoint_arch_parse(struct perf_event *bp,
+                                   const struct perf_event_attr *attr,
+                                   struct arch_hw_breakpoint *hw);
 extern int hw_breakpoint_exceptions_notify(struct notifier_block *unused,
                                           unsigned long val, void *data);
 
index a0fee6985e6a75b621644ea4e7d19f8b4f220643..b2b0c6405eb082fea7c99e607e7f2c9d9cca4ee7 100644 (file)
@@ -8,8 +8,6 @@
 
 struct pt_regs;
 
-extern void set_handle_irq(void (*handle_irq)(struct pt_regs *));
-
 static inline int nr_legacy_irqs(void)
 {
        return 0;
index 6deb8d726041eb9763efa84d4689ca24881d1e54..d5a44cf859e94284a1a38990105107fe3de28b2f 100644 (file)
@@ -48,7 +48,6 @@ struct kprobe_ctlblk {
        unsigned long saved_irqflag;
        struct prev_kprobe prev_kprobe;
        struct kprobe_step_ctx ss_ctx;
-       struct pt_regs jprobe_saved_regs;
 };
 
 void arch_remove_kprobe(struct kprobe *);
index fda9a8ca48bef71b0d4a76be1a45295af1211dd6..fe8777b12f8667c2c0b23952057fc13041276442 100644 (file)
@@ -306,6 +306,7 @@ struct kvm_vcpu_arch {
 #define KVM_ARM64_FP_ENABLED           (1 << 1) /* guest FP regs loaded */
 #define KVM_ARM64_FP_HOST              (1 << 2) /* host FP regs loaded */
 #define KVM_ARM64_HOST_SVE_IN_USE      (1 << 3) /* backup for host TIF_SVE */
+#define KVM_ARM64_HOST_SVE_ENABLED     (1 << 4) /* SVE enabled for EL0 */
 
 #define vcpu_gp_regs(v)                (&(v)->arch.ctxt.gp_regs)
 
index 9f82d6b53851e4b6bedbb28f6d0e7480acd622a6..1bdeca8918a684814f84ca3841b88a3123749cbb 100644 (file)
@@ -224,10 +224,8 @@ static inline void set_pte(pte_t *ptep, pte_t pte)
         * Only if the new pte is valid and kernel, otherwise TLB maintenance
         * or update_mmu_cache() have the necessary barriers.
         */
-       if (pte_valid_not_user(pte)) {
+       if (pte_valid_not_user(pte))
                dsb(ishst);
-               isb();
-       }
 }
 
 extern void __sync_icache_dcache(pte_t pteval);
@@ -434,7 +432,6 @@ static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
 {
        WRITE_ONCE(*pmdp, pmd);
        dsb(ishst);
-       isb();
 }
 
 static inline void pmd_clear(pmd_t *pmdp)
@@ -485,7 +482,6 @@ static inline void set_pud(pud_t *pudp, pud_t pud)
 {
        WRITE_ONCE(*pudp, pud);
        dsb(ishst);
-       isb();
 }
 
 static inline void pud_clear(pud_t *pudp)
index fa8b3fe932e6f568841017215524bc0b894cbf28..6495cc51246fc873bef97f99a2b0139f806516c1 100644 (file)
@@ -29,20 +29,15 @@ DECLARE_PER_CPU(bool, kernel_neon_busy);
 static __must_check inline bool may_use_simd(void)
 {
        /*
-        * The raw_cpu_read() is racy if called with preemption enabled.
-        * This is not a bug: kernel_neon_busy is only set when
-        * preemption is disabled, so we cannot migrate to another CPU
-        * while it is set, nor can we migrate to a CPU where it is set.
-        * So, if we find it clear on some CPU then we're guaranteed to
-        * find it clear on any CPU we could migrate to.
-        *
-        * If we are in between kernel_neon_begin()...kernel_neon_end(),
-        * the flag will be set, but preemption is also disabled, so we
-        * can't migrate to another CPU and spuriously see it become
-        * false.
+        * kernel_neon_busy is only set while preemption is disabled,
+        * and is clear whenever preemption is enabled. Since
+        * this_cpu_read() is atomic w.r.t. preemption, kernel_neon_busy
+        * cannot change under our feet -- if it's set we cannot be
+        * migrated, and if it's clear we cannot be migrated to a CPU
+        * where it is set.
         */
        return !in_irq() && !irqs_disabled() && !in_nmi() &&
-               !raw_cpu_read(kernel_neon_busy);
+               !this_cpu_read(kernel_neon_busy);
 }
 
 #else /* ! CONFIG_KERNEL_MODE_NEON */
index 6171178075dcab62def613141732a0b7601b1c43..a8f84812c6e8925c9429451dc3119bfbd5620e8c 100644 (file)
@@ -728,6 +728,17 @@ asm(
        asm volatile("msr_s " __stringify(r) ", %x0" : : "rZ" (__val)); \
 } while (0)
 
+/*
+ * Modify bits in a sysreg. Bits in the clear mask are zeroed, then bits in the
+ * set mask are set. Other bits are left as-is.
+ */
+#define sysreg_clear_set(sysreg, clear, set) do {                      \
+       u64 __scs_val = read_sysreg(sysreg);                            \
+       u64 __scs_new = (__scs_val & ~(u64)(clear)) | (set);            \
+       if (__scs_new != __scs_val)                                     \
+               write_sysreg(__scs_new, sysreg);                        \
+} while (0)
+
 static inline void config_sctlr_el1(u32 clear, u32 set)
 {
        u32 val;
index ffdaea7954bb620daf19aba8b855d4c04b1a33c1..0ad1cf233470d5abd212f05eea0f0ba832314012 100644 (file)
@@ -37,7 +37,7 @@ static inline void __tlb_remove_table(void *_table)
 
 static inline void tlb_flush(struct mmu_gather *tlb)
 {
-       struct vm_area_struct vma = { .vm_mm = tlb->mm, };
+       struct vm_area_struct vma = TLB_FLUSH_VMA(tlb->mm, 0);
 
        /*
         * The ASID allocator will either invalidate the ASID or mark
index 5c4bce4ac381a4ab87107e4aa47a9b7beef7d891..36fb069fd049c7053f38b75b9916bba7cb630643 100644 (file)
@@ -122,7 +122,30 @@ static void patch_alternative(struct alt_instr *alt,
        }
 }
 
-static void __apply_alternatives(void *alt_region, bool use_linear_alias)
+/*
+ * We provide our own, private D-cache cleaning function so that we don't
+ * accidentally call into the cache.S code, which is patched by us at
+ * runtime.
+ */
+static void clean_dcache_range_nopatch(u64 start, u64 end)
+{
+       u64 cur, d_size, ctr_el0;
+
+       ctr_el0 = read_sanitised_ftr_reg(SYS_CTR_EL0);
+       d_size = 4 << cpuid_feature_extract_unsigned_field(ctr_el0,
+                                                          CTR_DMINLINE_SHIFT);
+       cur = start & ~(d_size - 1);
+       do {
+               /*
+                * We must clean+invalidate to the PoC in order to avoid
+                * Cortex-A53 errata 826319, 827319, 824069 and 819472
+                * (this corresponds to ARM64_WORKAROUND_CLEAN_CACHE)
+                */
+               asm volatile("dc civac, %0" : : "r" (cur) : "memory");
+       } while (cur += d_size, cur < end);
+}
+
+static void __apply_alternatives(void *alt_region, bool is_module)
 {
        struct alt_instr *alt;
        struct alt_region *region = alt_region;
@@ -145,7 +168,7 @@ static void __apply_alternatives(void *alt_region, bool use_linear_alias)
                pr_info_once("patching kernel code\n");
 
                origptr = ALT_ORIG_PTR(alt);
-               updptr = use_linear_alias ? lm_alias(origptr) : origptr;
+               updptr = is_module ? origptr : lm_alias(origptr);
                nr_inst = alt->orig_len / AARCH64_INSN_SIZE;
 
                if (alt->cpufeature < ARM64_CB_PATCH)
@@ -155,8 +178,20 @@ static void __apply_alternatives(void *alt_region, bool use_linear_alias)
 
                alt_cb(alt, origptr, updptr, nr_inst);
 
-               flush_icache_range((uintptr_t)origptr,
-                                  (uintptr_t)(origptr + nr_inst));
+               if (!is_module) {
+                       clean_dcache_range_nopatch((u64)origptr,
+                                                  (u64)(origptr + nr_inst));
+               }
+       }
+
+       /*
+        * The core module code takes care of cache maintenance in
+        * flush_module_icache().
+        */
+       if (!is_module) {
+               dsb(ish);
+               __flush_icache_all();
+               isb();
        }
 }
 
@@ -178,7 +213,7 @@ static int __apply_alternatives_multi_stop(void *unused)
                isb();
        } else {
                BUG_ON(alternatives_applied);
-               __apply_alternatives(&region, true);
+               __apply_alternatives(&region, false);
                /* Barriers provided by the cache flushing */
                WRITE_ONCE(alternatives_applied, 1);
        }
@@ -192,12 +227,14 @@ void __init apply_alternatives_all(void)
        stop_machine(__apply_alternatives_multi_stop, NULL, cpu_online_mask);
 }
 
-void apply_alternatives(void *start, size_t length)
+#ifdef CONFIG_MODULES
+void apply_alternatives_module(void *start, size_t length)
 {
        struct alt_region region = {
                .begin  = start,
                .end    = start + length,
        };
 
-       __apply_alternatives(&region, false);
+       __apply_alternatives(&region, true);
 }
+#endif
index d2856b129097899d37ba3790056fc28eefc8409e..c6d80743f4eded6fda5e9fb1dadce9a62e69888c 100644 (file)
@@ -937,7 +937,7 @@ static int __init parse_kpti(char *str)
        __kpti_forced = enabled ? 1 : -1;
        return 0;
 }
-__setup("kpti=", parse_kpti);
+early_param("kpti", parse_kpti);
 #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
 
 #ifdef CONFIG_ARM64_HW_AFDBM
@@ -1351,9 +1351,9 @@ static void __update_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
 
 static void update_cpu_capabilities(u16 scope_mask)
 {
-       __update_cpu_capabilities(arm64_features, scope_mask, "detected:");
        __update_cpu_capabilities(arm64_errata, scope_mask,
                                  "enabling workaround for");
+       __update_cpu_capabilities(arm64_features, scope_mask, "detected:");
 }
 
 static int __enable_cpu_capability(void *arg)
@@ -1408,8 +1408,8 @@ __enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
 
 static void __init enable_cpu_capabilities(u16 scope_mask)
 {
-       __enable_cpu_capabilities(arm64_features, scope_mask);
        __enable_cpu_capabilities(arm64_errata, scope_mask);
+       __enable_cpu_capabilities(arm64_features, scope_mask);
 }
 
 /*
index 413dbe530da836d3dc1119830628454168921064..8c9644376326fe96f05645d298c1b4fcd383d652 100644 (file)
@@ -343,14 +343,13 @@ static int get_hbp_len(u8 hbp_len)
 /*
  * Check whether bp virtual address is in kernel space.
  */
-int arch_check_bp_in_kernelspace(struct perf_event *bp)
+int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw)
 {
        unsigned int len;
        unsigned long va;
-       struct arch_hw_breakpoint *info = counter_arch_bp(bp);
 
-       va = info->address;
-       len = get_hbp_len(info->ctrl.len);
+       va = hw->address;
+       len = get_hbp_len(hw->ctrl.len);
 
        return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE);
 }
@@ -421,53 +420,53 @@ int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl,
 /*
  * Construct an arch_hw_breakpoint from a perf_event.
  */
-static int arch_build_bp_info(struct perf_event *bp)
+static int arch_build_bp_info(struct perf_event *bp,
+                             const struct perf_event_attr *attr,
+                             struct arch_hw_breakpoint *hw)
 {
-       struct arch_hw_breakpoint *info = counter_arch_bp(bp);
-
        /* Type */
-       switch (bp->attr.bp_type) {
+       switch (attr->bp_type) {
        case HW_BREAKPOINT_X:
-               info->ctrl.type = ARM_BREAKPOINT_EXECUTE;
+               hw->ctrl.type = ARM_BREAKPOINT_EXECUTE;
                break;
        case HW_BREAKPOINT_R:
-               info->ctrl.type = ARM_BREAKPOINT_LOAD;
+               hw->ctrl.type = ARM_BREAKPOINT_LOAD;
                break;
        case HW_BREAKPOINT_W:
-               info->ctrl.type = ARM_BREAKPOINT_STORE;
+               hw->ctrl.type = ARM_BREAKPOINT_STORE;
                break;
        case HW_BREAKPOINT_RW:
-               info->ctrl.type = ARM_BREAKPOINT_LOAD | ARM_BREAKPOINT_STORE;
+               hw->ctrl.type = ARM_BREAKPOINT_LOAD | ARM_BREAKPOINT_STORE;
                break;
        default:
                return -EINVAL;
        }
 
        /* Len */
-       switch (bp->attr.bp_len) {
+       switch (attr->bp_len) {
        case HW_BREAKPOINT_LEN_1:
-               info->ctrl.len = ARM_BREAKPOINT_LEN_1;
+               hw->ctrl.len = ARM_BREAKPOINT_LEN_1;
                break;
        case HW_BREAKPOINT_LEN_2:
-               info->ctrl.len = ARM_BREAKPOINT_LEN_2;
+               hw->ctrl.len = ARM_BREAKPOINT_LEN_2;
                break;
        case HW_BREAKPOINT_LEN_3:
-               info->ctrl.len = ARM_BREAKPOINT_LEN_3;
+               hw->ctrl.len = ARM_BREAKPOINT_LEN_3;
                break;
        case HW_BREAKPOINT_LEN_4:
-               info->ctrl.len = ARM_BREAKPOINT_LEN_4;
+               hw->ctrl.len = ARM_BREAKPOINT_LEN_4;
                break;
        case HW_BREAKPOINT_LEN_5:
-               info->ctrl.len = ARM_BREAKPOINT_LEN_5;
+               hw->ctrl.len = ARM_BREAKPOINT_LEN_5;
                break;
        case HW_BREAKPOINT_LEN_6:
-               info->ctrl.len = ARM_BREAKPOINT_LEN_6;
+               hw->ctrl.len = ARM_BREAKPOINT_LEN_6;
                break;
        case HW_BREAKPOINT_LEN_7:
-               info->ctrl.len = ARM_BREAKPOINT_LEN_7;
+               hw->ctrl.len = ARM_BREAKPOINT_LEN_7;
                break;
        case HW_BREAKPOINT_LEN_8:
-               info->ctrl.len = ARM_BREAKPOINT_LEN_8;
+               hw->ctrl.len = ARM_BREAKPOINT_LEN_8;
                break;
        default:
                return -EINVAL;
@@ -478,37 +477,37 @@ static int arch_build_bp_info(struct perf_event *bp)
         * AArch32 also requires breakpoints of length 2 for Thumb.
         * Watchpoints can be of length 1, 2, 4 or 8 bytes.
         */
-       if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) {
+       if (hw->ctrl.type == ARM_BREAKPOINT_EXECUTE) {
                if (is_compat_bp(bp)) {
-                       if (info->ctrl.len != ARM_BREAKPOINT_LEN_2 &&
-                           info->ctrl.len != ARM_BREAKPOINT_LEN_4)
+                       if (hw->ctrl.len != ARM_BREAKPOINT_LEN_2 &&
+                           hw->ctrl.len != ARM_BREAKPOINT_LEN_4)
                                return -EINVAL;
-               } else if (info->ctrl.len != ARM_BREAKPOINT_LEN_4) {
+               } else if (hw->ctrl.len != ARM_BREAKPOINT_LEN_4) {
                        /*
                         * FIXME: Some tools (I'm looking at you perf) assume
                         *        that breakpoints should be sizeof(long). This
                         *        is nonsense. For now, we fix up the parameter
                         *        but we should probably return -EINVAL instead.
                         */
-                       info->ctrl.len = ARM_BREAKPOINT_LEN_4;
+                       hw->ctrl.len = ARM_BREAKPOINT_LEN_4;
                }
        }
 
        /* Address */
-       info->address = bp->attr.bp_addr;
+       hw->address = attr->bp_addr;
 
        /*
         * Privilege
         * Note that we disallow combined EL0/EL1 breakpoints because
         * that would complicate the stepping code.
         */
-       if (arch_check_bp_in_kernelspace(bp))
-               info->ctrl.privilege = AARCH64_BREAKPOINT_EL1;
+       if (arch_check_bp_in_kernelspace(hw))
+               hw->ctrl.privilege = AARCH64_BREAKPOINT_EL1;
        else
-               info->ctrl.privilege = AARCH64_BREAKPOINT_EL0;
+               hw->ctrl.privilege = AARCH64_BREAKPOINT_EL0;
 
        /* Enabled? */
-       info->ctrl.enabled = !bp->attr.disabled;
+       hw->ctrl.enabled = !attr->disabled;
 
        return 0;
 }
@@ -516,14 +515,15 @@ static int arch_build_bp_info(struct perf_event *bp)
 /*
  * Validate the arch-specific HW Breakpoint register settings.
  */
-int arch_validate_hwbkpt_settings(struct perf_event *bp)
+int hw_breakpoint_arch_parse(struct perf_event *bp,
+                            const struct perf_event_attr *attr,
+                            struct arch_hw_breakpoint *hw)
 {
-       struct arch_hw_breakpoint *info = counter_arch_bp(bp);
        int ret;
        u64 alignment_mask, offset;
 
        /* Build the arch_hw_breakpoint. */
-       ret = arch_build_bp_info(bp);
+       ret = arch_build_bp_info(bp, attr, hw);
        if (ret)
                return ret;
 
@@ -537,42 +537,42 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
         * that here.
         */
        if (is_compat_bp(bp)) {
-               if (info->ctrl.len == ARM_BREAKPOINT_LEN_8)
+               if (hw->ctrl.len == ARM_BREAKPOINT_LEN_8)
                        alignment_mask = 0x7;
                else
                        alignment_mask = 0x3;
-               offset = info->address & alignment_mask;
+               offset = hw->address & alignment_mask;
                switch (offset) {
                case 0:
                        /* Aligned */
                        break;
                case 1:
                        /* Allow single byte watchpoint. */
-                       if (info->ctrl.len == ARM_BREAKPOINT_LEN_1)
+                       if (hw->ctrl.len == ARM_BREAKPOINT_LEN_1)
                                break;
                case 2:
                        /* Allow halfword watchpoints and breakpoints. */
-                       if (info->ctrl.len == ARM_BREAKPOINT_LEN_2)
+                       if (hw->ctrl.len == ARM_BREAKPOINT_LEN_2)
                                break;
                default:
                        return -EINVAL;
                }
        } else {
-               if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE)
+               if (hw->ctrl.type == ARM_BREAKPOINT_EXECUTE)
                        alignment_mask = 0x3;
                else
                        alignment_mask = 0x7;
-               offset = info->address & alignment_mask;
+               offset = hw->address & alignment_mask;
        }
 
-       info->address &= ~alignment_mask;
-       info->ctrl.len <<= offset;
+       hw->address &= ~alignment_mask;
+       hw->ctrl.len <<= offset;
 
        /*
         * Disallow per-task kernel breakpoints since these would
         * complicate the stepping code.
         */
-       if (info->ctrl.privilege == AARCH64_BREAKPOINT_EL1 && bp->hw.target)
+       if (hw->ctrl.privilege == AARCH64_BREAKPOINT_EL1 && bp->hw.target)
                return -EINVAL;
 
        return 0;
index 60e5fc661f745b899ff6137b4c3f002de867a390..780a12f59a8f8c3426c3a4274e32ae9c3d829ab0 100644 (file)
@@ -42,16 +42,6 @@ int arch_show_interrupts(struct seq_file *p, int prec)
        return 0;
 }
 
-void (*handle_arch_irq)(struct pt_regs *) = NULL;
-
-void __init set_handle_irq(void (*handle_irq)(struct pt_regs *))
-{
-       if (handle_arch_irq)
-               return;
-
-       handle_arch_irq = handle_irq;
-}
-
 #ifdef CONFIG_VMAP_STACK
 static void init_irq_stacks(void)
 {
index 155fd91e78f4a62180e7577355ca4a6b0eb283f4..f0f27aeefb73623a0983c1f3eec2054d306021dc 100644 (file)
@@ -448,9 +448,8 @@ int module_finalize(const Elf_Ehdr *hdr,
        const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
 
        for (s = sechdrs, se = sechdrs + hdr->e_shnum; s < se; s++) {
-               if (strcmp(".altinstructions", secstrs + s->sh_name) == 0) {
-                       apply_alternatives((void *)s->sh_addr, s->sh_size);
-               }
+               if (strcmp(".altinstructions", secstrs + s->sh_name) == 0)
+                       apply_alternatives_module((void *)s->sh_addr, s->sh_size);
 #ifdef CONFIG_ARM64_MODULE_PLTS
                if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE) &&
                    !strcmp(".text.ftrace_trampoline", secstrs + s->sh_name))
index d849d9804011df6eabb143ed68da8d260dd5074d..e78c3ef04d95de696dfc87ce03ebdc90c258b4d1 100644 (file)
@@ -275,7 +275,7 @@ static int __kprobes reenter_kprobe(struct kprobe *p,
                break;
        case KPROBE_HIT_SS:
        case KPROBE_REENTER:
-               pr_warn("Unrecoverable kprobe detected at %p.\n", p->addr);
+               pr_warn("Unrecoverable kprobe detected.\n");
                dump_kprobe(p);
                BUG();
                break;
@@ -395,9 +395,9 @@ static void __kprobes kprobe_handler(struct pt_regs *regs)
                        /*
                         * If we have no pre-handler or it returned 0, we
                         * continue with normal processing.  If we have a
-                        * pre-handler and it returned non-zero, it prepped
-                        * for calling the break_handler below on re-entry,
-                        * so get out doing nothing more here.
+                        * pre-handler and it returned non-zero, it will
+                        * modify the execution path and no need to single
+                        * stepping. Let's just reset current kprobe and exit.
                         *
                         * pre_handler can hit a breakpoint and can step thru
                         * before return, keep PSTATE D-flag enabled until
@@ -405,16 +405,8 @@ static void __kprobes kprobe_handler(struct pt_regs *regs)
                         */
                        if (!p->pre_handler || !p->pre_handler(p, regs)) {
                                setup_singlestep(p, regs, kcb, 0);
-                               return;
-                       }
-               }
-       } else if ((le32_to_cpu(*(kprobe_opcode_t *) addr) ==
-           BRK64_OPCODE_KPROBES) && cur_kprobe) {
-               /* We probably hit a jprobe.  Call its break handler. */
-               if (cur_kprobe->break_handler  &&
-                    cur_kprobe->break_handler(cur_kprobe, regs)) {
-                       setup_singlestep(cur_kprobe, regs, kcb, 0);
-                       return;
+                       } else
+                               reset_current_kprobe();
                }
        }
        /*
@@ -465,74 +457,6 @@ kprobe_breakpoint_handler(struct pt_regs *regs, unsigned int esr)
        return DBG_HOOK_HANDLED;
 }
 
-int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
-{
-       struct jprobe *jp = container_of(p, struct jprobe, kp);
-       struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
-
-       kcb->jprobe_saved_regs = *regs;
-       /*
-        * Since we can't be sure where in the stack frame "stacked"
-        * pass-by-value arguments are stored we just don't try to
-        * duplicate any of the stack. Do not use jprobes on functions that
-        * use more than 64 bytes (after padding each to an 8 byte boundary)
-        * of arguments, or pass individual arguments larger than 16 bytes.
-        */
-
-       instruction_pointer_set(regs, (unsigned long) jp->entry);
-       preempt_disable();
-       pause_graph_tracing();
-       return 1;
-}
-
-void __kprobes jprobe_return(void)
-{
-       struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
-
-       /*
-        * Jprobe handler return by entering break exception,
-        * encoded same as kprobe, but with following conditions
-        * -a special PC to identify it from the other kprobes.
-        * -restore stack addr to original saved pt_regs
-        */
-       asm volatile("                          mov sp, %0      \n"
-                    "jprobe_return_break:      brk %1          \n"
-                    :
-                    : "r" (kcb->jprobe_saved_regs.sp),
-                      "I" (BRK64_ESR_KPROBES)
-                    : "memory");
-
-       unreachable();
-}
-
-int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
-{
-       struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
-       long stack_addr = kcb->jprobe_saved_regs.sp;
-       long orig_sp = kernel_stack_pointer(regs);
-       struct jprobe *jp = container_of(p, struct jprobe, kp);
-       extern const char jprobe_return_break[];
-
-       if (instruction_pointer(regs) != (u64) jprobe_return_break)
-               return 0;
-
-       if (orig_sp != stack_addr) {
-               struct pt_regs *saved_regs =
-                   (struct pt_regs *)kcb->jprobe_saved_regs.sp;
-               pr_err("current sp %lx does not match saved sp %lx\n",
-                      orig_sp, stack_addr);
-               pr_err("Saved registers for jprobe %p\n", jp);
-               __show_regs(saved_regs);
-               pr_err("Current registers\n");
-               __show_regs(regs);
-               BUG();
-       }
-       unpause_graph_tracing();
-       *regs = kcb->jprobe_saved_regs;
-       preempt_enable_no_resched();
-       return 1;
-}
-
 bool arch_within_kprobe_blacklist(unsigned long addr)
 {
        if ((addr >= (unsigned long)__kprobes_text_start &&
index f3e2e3aec0b0632793abc2ce06dbaa2addd97eb5..2faa9863d2e569e704191bd1939dac2eb111cb5b 100644 (file)
@@ -179,7 +179,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
  * This is the secondary CPU boot entry.  We're using this CPUs
  * idle thread stack, but a set of temporary page tables.
  */
-asmlinkage void secondary_start_kernel(void)
+asmlinkage notrace void secondary_start_kernel(void)
 {
        u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
        struct mm_struct *mm = &init_mm;
index dc6ecfa5a2d2564c90a5ce92003a0e3b8490cbce..aac7808ce2162a9d2bdcdcc938b649655663912e 100644 (file)
@@ -5,13 +5,14 @@
  * Copyright 2018 Arm Limited
  * Author: Dave Martin <Dave.Martin@arm.com>
  */
-#include <linux/bottom_half.h>
+#include <linux/irqflags.h>
 #include <linux/sched.h>
 #include <linux/thread_info.h>
 #include <linux/kvm_host.h>
 #include <asm/kvm_asm.h>
 #include <asm/kvm_host.h>
 #include <asm/kvm_mmu.h>
+#include <asm/sysreg.h>
 
 /*
  * Called on entry to KVM_RUN unless this vcpu previously ran at least
@@ -61,10 +62,16 @@ void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu)
 {
        BUG_ON(!current->mm);
 
-       vcpu->arch.flags &= ~(KVM_ARM64_FP_ENABLED | KVM_ARM64_HOST_SVE_IN_USE);
+       vcpu->arch.flags &= ~(KVM_ARM64_FP_ENABLED |
+                             KVM_ARM64_HOST_SVE_IN_USE |
+                             KVM_ARM64_HOST_SVE_ENABLED);
        vcpu->arch.flags |= KVM_ARM64_FP_HOST;
+
        if (test_thread_flag(TIF_SVE))
                vcpu->arch.flags |= KVM_ARM64_HOST_SVE_IN_USE;
+
+       if (read_sysreg(cpacr_el1) & CPACR_EL1_ZEN_EL0EN)
+               vcpu->arch.flags |= KVM_ARM64_HOST_SVE_ENABLED;
 }
 
 /*
@@ -92,19 +99,30 @@ void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu)
  */
 void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu)
 {
-       local_bh_disable();
+       unsigned long flags;
 
-       update_thread_flag(TIF_SVE,
-                          vcpu->arch.flags & KVM_ARM64_HOST_SVE_IN_USE);
+       local_irq_save(flags);
 
        if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED) {
                /* Clean guest FP state to memory and invalidate cpu view */
                fpsimd_save();
                fpsimd_flush_cpu_state();
-       } else if (!test_thread_flag(TIF_FOREIGN_FPSTATE)) {
-               /* Ensure user trap controls are correctly restored */
-               fpsimd_bind_task_to_cpu();
+       } else if (system_supports_sve()) {
+               /*
+                * The FPSIMD/SVE state in the CPU has not been touched, and we
+                * have SVE (and VHE): CPACR_EL1 (alias CPTR_EL2) has been
+                * reset to CPACR_EL1_DEFAULT by the Hyp code, disabling SVE
+                * for EL0.  To avoid spurious traps, restore the trap state
+                * seen by kvm_arch_vcpu_load_fp():
+                */
+               if (vcpu->arch.flags & KVM_ARM64_HOST_SVE_ENABLED)
+                       sysreg_clear_set(CPACR_EL1, 0, CPACR_EL1_ZEN_EL0EN);
+               else
+                       sysreg_clear_set(CPACR_EL1, CPACR_EL1_ZEN_EL0EN, 0);
        }
 
-       local_bh_enable();
+       update_thread_flag(TIF_SVE,
+                          vcpu->arch.flags & KVM_ARM64_HOST_SVE_IN_USE);
+
+       local_irq_restore(flags);
 }
index 137710f4dac30ac01c5c17856b730a996628a2e3..68755fd70dcf4c4164cb1453fbe2695a1bc8ff33 100644 (file)
@@ -1,5 +1,5 @@
 # SPDX-License-Identifier: GPL-2.0
-lib-y          := bitops.o clear_user.o delay.o copy_from_user.o       \
+lib-y          := clear_user.o delay.o copy_from_user.o                \
                   copy_to_user.o copy_in_user.o copy_page.o            \
                   clear_page.o memchr.o memcpy.o memmove.o memset.o    \
                   memcmp.o strcmp.o strncmp.o strlen.o strnlen.o       \
diff --git a/arch/arm64/lib/bitops.S b/arch/arm64/lib/bitops.S
deleted file mode 100644 (file)
index 43ac736..0000000
+++ /dev/null
@@ -1,76 +0,0 @@
-/*
- * Based on arch/arm/lib/bitops.h
- *
- * Copyright (C) 2013 ARM Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program.  If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include <linux/linkage.h>
-#include <asm/assembler.h>
-#include <asm/lse.h>
-
-/*
- * x0: bits 5:0  bit offset
- *     bits 31:6 word offset
- * x1: address
- */
-       .macro  bitop, name, llsc, lse
-ENTRY( \name   )
-       and     w3, w0, #63             // Get bit offset
-       eor     w0, w0, w3              // Clear low bits
-       mov     x2, #1
-       add     x1, x1, x0, lsr #3      // Get word offset
-alt_lse "      prfm    pstl1strm, [x1]",       "nop"
-       lsl     x3, x2, x3              // Create mask
-
-alt_lse        "1:     ldxr    x2, [x1]",              "\lse   x3, [x1]"
-alt_lse        "       \llsc   x2, x2, x3",            "nop"
-alt_lse        "       stxr    w0, x2, [x1]",          "nop"
-alt_lse        "       cbnz    w0, 1b",                "nop"
-
-       ret
-ENDPROC(\name  )
-       .endm
-
-       .macro  testop, name, llsc, lse
-ENTRY( \name   )
-       and     w3, w0, #63             // Get bit offset
-       eor     w0, w0, w3              // Clear low bits
-       mov     x2, #1
-       add     x1, x1, x0, lsr #3      // Get word offset
-alt_lse "      prfm    pstl1strm, [x1]",       "nop"
-       lsl     x4, x2, x3              // Create mask
-
-alt_lse        "1:     ldxr    x2, [x1]",              "\lse   x4, x2, [x1]"
-       lsr     x0, x2, x3
-alt_lse        "       \llsc   x2, x2, x4",            "nop"
-alt_lse        "       stlxr   w5, x2, [x1]",          "nop"
-alt_lse        "       cbnz    w5, 1b",                "nop"
-alt_lse        "       dmb     ish",                   "nop"
-
-       and     x0, x0, #1
-       ret
-ENDPROC(\name  )
-       .endm
-
-/*
- * Atomic bit operations.
- */
-       bitop   change_bit, eor, steor
-       bitop   clear_bit, bic, stclr
-       bitop   set_bit, orr, stset
-
-       testop  test_and_change_bit, eor, ldeoral
-       testop  test_and_clear_bit, bic, ldclral
-       testop  test_and_set_bit, orr, ldsetal
index 49e217ac7e1ec2087c440c60ec71126f0e48ec32..61e93f0b548228f57a08f25a14291a1e46437115 100644 (file)
@@ -583,13 +583,14 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size,
                                                    size >> PAGE_SHIFT);
                        return NULL;
                }
-               if (!coherent)
-                       __dma_flush_area(page_to_virt(page), iosize);
-
                addr = dma_common_contiguous_remap(page, size, VM_USERMAP,
                                                   prot,
                                                   __builtin_return_address(0));
-               if (!addr) {
+               if (addr) {
+                       memset(addr, 0, size);
+                       if (!coherent)
+                               __dma_flush_area(page_to_virt(page), iosize);
+               } else {
                        iommu_dma_unmap_page(dev, *handle, iosize, 0, attrs);
                        dma_release_from_contiguous(dev, page,
                                                    size >> PAGE_SHIFT);
index ecc6818191df961eac49e6ca0c7d8b8d38d0c855..192b3ba070755f70d41f13d3c68eaa18b2b7f17d 100644 (file)
@@ -108,7 +108,6 @@ static pte_t get_clear_flush(struct mm_struct *mm,
                             unsigned long pgsize,
                             unsigned long ncontig)
 {
-       struct vm_area_struct vma = { .vm_mm = mm };
        pte_t orig_pte = huge_ptep_get(ptep);
        bool valid = pte_valid(orig_pte);
        unsigned long i, saddr = addr;
@@ -125,8 +124,10 @@ static pte_t get_clear_flush(struct mm_struct *mm,
                        orig_pte = pte_mkdirty(orig_pte);
        }
 
-       if (valid)
+       if (valid) {
+               struct vm_area_struct vma = TLB_FLUSH_VMA(mm, 0);
                flush_tlb_range(&vma, saddr, addr);
+       }
        return orig_pte;
 }
 
@@ -145,7 +146,7 @@ static void clear_flush(struct mm_struct *mm,
                             unsigned long pgsize,
                             unsigned long ncontig)
 {
-       struct vm_area_struct vma = { .vm_mm = mm };
+       struct vm_area_struct vma = TLB_FLUSH_VMA(mm, 0);
        unsigned long i, saddr = addr;
 
        for (i = 0; i < ncontig; i++, addr += pgsize, ptep++)
index 325cfb3b858aa698a96b23433230503063375b17..9abf8a1e7b250c49b0064f6abc67d41eabdbc52c 100644 (file)
@@ -611,11 +611,13 @@ void __init mem_init(void)
        BUILD_BUG_ON(TASK_SIZE_32                       > TASK_SIZE_64);
 #endif
 
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
        /*
         * Make sure we chose the upper bound of sizeof(struct page)
-        * correctly.
+        * correctly when sizing the VMEMMAP array.
         */
        BUILD_BUG_ON(sizeof(struct page) > (1 << STRUCT_PAGE_MAX_SHIFT));
+#endif
 
        if (PAGE_SIZE >= 16384 && get_num_physpages() <= 128) {
                extern int sysctl_overcommit_memory;
index 493ff75670ffd98a1dc344a133f0f31a634f93ff..8ae5d7ae4af344967f3cfeb711f2eade43dcf76a 100644 (file)
@@ -977,12 +977,12 @@ int pmd_clear_huge(pmd_t *pmdp)
        return 1;
 }
 
-int pud_free_pmd_page(pud_t *pud)
+int pud_free_pmd_page(pud_t *pud, unsigned long addr)
 {
        return pud_none(*pud);
 }
 
-int pmd_free_pte_page(pmd_t *pmd)
+int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
 {
        return pmd_none(*pmd);
 }
index 5f9a73a4452c2b87dd9a922933b12c85ab008377..03646e6a2ef4f240412d1eb62a1cbc27d04705b0 100644 (file)
@@ -217,8 +217,9 @@ ENDPROC(idmap_cpu_replace_ttbr1)
 
        .macro __idmap_kpti_put_pgtable_ent_ng, type
        orr     \type, \type, #PTE_NG           // Same bit for blocks and pages
-       str     \type, [cur_\()\type\()p]       // Update the entry and ensure it
-       dc      civac, cur_\()\type\()p         // is visible to all CPUs.
+       str     \type, [cur_\()\type\()p]       // Update the entry and ensure
+       dmb     sy                              // that it is visible to all
+       dc      civac, cur_\()\type\()p         // CPUs.
        .endm
 
 /*
index 941e7554e886aea0339a19acc2823299cde3501f..c6b6a06231b2e2f9a5998385ceda0b2acc841e3e 100644 (file)
@@ -2,8 +2,10 @@
 #ifndef __ARCH_H8300_ATOMIC__
 #define __ARCH_H8300_ATOMIC__
 
+#include <linux/compiler.h>
 #include <linux/types.h>
 #include <asm/cmpxchg.h>
+#include <asm/irqflags.h>
 
 /*
  * Atomic operations that C can't guarantee us.  Useful for
@@ -15,8 +17,6 @@
 #define atomic_read(v)         READ_ONCE((v)->counter)
 #define atomic_set(v, i)       WRITE_ONCE(((v)->counter), (i))
 
-#include <linux/kernel.h>
-
 #define ATOMIC_OP_RETURN(op, c_op)                             \
 static inline int atomic_##op##_return(int i, atomic_t *v)     \
 {                                                              \
@@ -69,18 +69,6 @@ ATOMIC_OPS(sub, -=)
 #undef ATOMIC_OP_RETURN
 #undef ATOMIC_OP
 
-#define atomic_add_negative(a, v)      (atomic_add_return((a), (v)) < 0)
-#define atomic_sub_and_test(i, v)      (atomic_sub_return(i, v) == 0)
-
-#define atomic_inc_return(v)           atomic_add_return(1, v)
-#define atomic_dec_return(v)           atomic_sub_return(1, v)
-
-#define atomic_inc(v)                  (void)atomic_inc_return(v)
-#define atomic_inc_and_test(v)         (atomic_inc_return(v) == 0)
-
-#define atomic_dec(v)                  (void)atomic_dec_return(v)
-#define atomic_dec_and_test(v)         (atomic_dec_return(v) == 0)
-
 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
 {
        int ret;
@@ -94,7 +82,7 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
        return ret;
 }
 
-static inline int __atomic_add_unless(atomic_t *v, int a, int u)
+static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
 {
        int ret;
        h8300flags flags;
@@ -106,5 +94,6 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
        arch_local_irq_restore(flags);
        return ret;
 }
+#define atomic_fetch_add_unless                atomic_fetch_add_unless
 
 #endif /* __ARCH_H8300_ATOMIC __ */
index fb3dfb2a667ee11626cf9c999e5b303fcc9b9c83..311b9894ccc8d85d6a8b24078485c9f2ff3a16c3 100644 (file)
@@ -164,7 +164,7 @@ ATOMIC_OPS(xor)
 #undef ATOMIC_OP
 
 /**
- * __atomic_add_unless - add unless the number is a given value
+ * atomic_fetch_add_unless - add unless the number is a given value
  * @v: pointer to value
  * @a: amount to add
  * @u: unless value is equal to u
@@ -173,7 +173,7 @@ ATOMIC_OPS(xor)
  *
  */
 
-static inline int __atomic_add_unless(atomic_t *v, int a, int u)
+static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
 {
        int __oldval;
        register int tmp;
@@ -196,18 +196,6 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
        );
        return __oldval;
 }
-
-#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
-
-#define atomic_inc(v) atomic_add(1, (v))
-#define atomic_dec(v) atomic_sub(1, (v))
-
-#define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0)
-#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
-#define atomic_sub_and_test(i, v) (atomic_sub_return(i, (v)) == 0)
-#define atomic_add_negative(i, v) (atomic_add_return(i, (v)) < 0)
-
-#define atomic_inc_return(v) (atomic_add_return(1, v))
-#define atomic_dec_return(v) (atomic_sub_return(1, v))
+#define atomic_fetch_add_unless atomic_fetch_add_unless
 
 #endif
index 2524fb60fbc28518ad7635092ac1bdd5d62114fd..206530d0751b51b568a5c4f657af7d09ea27d942 100644 (file)
@@ -215,91 +215,10 @@ ATOMIC64_FETCH_OP(xor, ^)
        (cmpxchg(&((v)->counter), old, new))
 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
 
-static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
-{
-       int c, old;
-       c = atomic_read(v);
-       for (;;) {
-               if (unlikely(c == (u)))
-                       break;
-               old = atomic_cmpxchg((v), c, c + (a));
-               if (likely(old == c))
-                       break;
-               c = old;
-       }
-       return c;
-}
-
-
-static __inline__ long atomic64_add_unless(atomic64_t *v, long a, long u)
-{
-       long c, old;
-       c = atomic64_read(v);
-       for (;;) {
-               if (unlikely(c == (u)))
-                       break;
-               old = atomic64_cmpxchg((v), c, c + (a));
-               if (likely(old == c))
-                       break;
-               c = old;
-       }
-       return c != (u);
-}
-
-#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
-
-static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
-{
-       long c, old, dec;
-       c = atomic64_read(v);
-       for (;;) {
-               dec = c - 1;
-               if (unlikely(dec < 0))
-                       break;
-               old = atomic64_cmpxchg((v), c, dec);
-               if (likely(old == c))
-                       break;
-               c = old;
-       }
-       return dec;
-}
-
-/*
- * Atomically add I to V and return TRUE if the resulting value is
- * negative.
- */
-static __inline__ int
-atomic_add_negative (int i, atomic_t *v)
-{
-       return atomic_add_return(i, v) < 0;
-}
-
-static __inline__ long
-atomic64_add_negative (__s64 i, atomic64_t *v)
-{
-       return atomic64_add_return(i, v) < 0;
-}
-
-#define atomic_dec_return(v)           atomic_sub_return(1, (v))
-#define atomic_inc_return(v)           atomic_add_return(1, (v))
-#define atomic64_dec_return(v)         atomic64_sub_return(1, (v))
-#define atomic64_inc_return(v)         atomic64_add_return(1, (v))
-
-#define atomic_sub_and_test(i,v)       (atomic_sub_return((i), (v)) == 0)
-#define atomic_dec_and_test(v)         (atomic_sub_return(1, (v)) == 0)
-#define atomic_inc_and_test(v)         (atomic_add_return(1, (v)) == 0)
-#define atomic64_sub_and_test(i,v)     (atomic64_sub_return((i), (v)) == 0)
-#define atomic64_dec_and_test(v)       (atomic64_sub_return(1, (v)) == 0)
-#define atomic64_inc_and_test(v)       (atomic64_add_return(1, (v)) == 0)
-
 #define atomic_add(i,v)                        (void)atomic_add_return((i), (v))
 #define atomic_sub(i,v)                        (void)atomic_sub_return((i), (v))
-#define atomic_inc(v)                  atomic_add(1, (v))
-#define atomic_dec(v)                  atomic_sub(1, (v))
 
 #define atomic64_add(i,v)              (void)atomic64_add_return((i), (v))
 #define atomic64_sub(i,v)              (void)atomic64_sub_return((i), (v))
-#define atomic64_inc(v)                        atomic64_add(1, (v))
-#define atomic64_dec(v)                        atomic64_sub(1, (v))
 
 #endif /* _ASM_IA64_ATOMIC_H */
index 0302b366478919700fe06d900c4363fa12bc2a06..580356a2eea6ce95ba5878749988cd2eb44b1d6e 100644 (file)
@@ -82,8 +82,6 @@ struct prev_kprobe {
 #define ARCH_PREV_KPROBE_SZ 2
 struct kprobe_ctlblk {
        unsigned long kprobe_status;
-       struct pt_regs jprobe_saved_regs;
-       unsigned long jprobes_saved_stacked_regs[MAX_PARAM_RSE_SIZE];
        unsigned long *bsp;
        unsigned long cfm;
        atomic_t prev_kprobe_index;
index 44f0ac0df30823d49807d91c4234c6556d0a885b..516355a774bfe89b2dc8ce6413aa0f3a8e1e71c0 100644 (file)
@@ -115,12 +115,11 @@ ia64_tlb_flush_mmu_tlbonly(struct mmu_gather *tlb, unsigned long start, unsigned
                flush_tlb_all();
        } else {
                /*
-                * XXX fix me: flush_tlb_range() should take an mm pointer instead of a
-                * vma pointer.
+                * flush_tlb_range() takes a vma instead of a mm pointer because
+                * some architectures want the vm_flags for ITLB/DTLB flush.
                 */
-               struct vm_area_struct vma;
+               struct vm_area_struct vma = TLB_FLUSH_VMA(tlb->mm, 0);
 
-               vma.vm_mm = tlb->mm;
                /* flush the address range from the tlb: */
                flush_tlb_range(&vma, start, end);
                /* now flush the virt. page-table area mapping the address range: */
index 5d742bcb0018391cdb7fbc8bf153704e85fa9402..4ca110f0a94b7c9472dcb46ba62f0ea626a53c47 100644 (file)
@@ -14,7 +14,6 @@
  */
 #define __IA64_BREAK_KDB               0x80100
 #define __IA64_BREAK_KPROBE            0x81000 /* .. 0x81fff */
-#define __IA64_BREAK_JPROBE            0x82000
 
 /*
  * OS-specific break numbers:
index 498f3da3f225d2ed8479af540bbbc22786e2c55d..d0c0ccdd656a04bbcc20b8fa31b44756a552bc1e 100644 (file)
@@ -25,7 +25,7 @@ obj-$(CONFIG_NUMA)            += numa.o
 obj-$(CONFIG_PERFMON)          += perfmon_default_smpl.o
 obj-$(CONFIG_IA64_CYCLONE)     += cyclone.o
 obj-$(CONFIG_IA64_MCA_RECOVERY)        += mca_recovery.o
-obj-$(CONFIG_KPROBES)          += kprobes.o jprobes.o
+obj-$(CONFIG_KPROBES)          += kprobes.o
 obj-$(CONFIG_DYNAMIC_FTRACE)   += ftrace.o
 obj-$(CONFIG_KEXEC)            += machine_kexec.o relocate_kernel.o crash.o
 obj-$(CONFIG_CRASH_DUMP)       += crash_dump.o
diff --git a/arch/ia64/kernel/jprobes.S b/arch/ia64/kernel/jprobes.S
deleted file mode 100644 (file)
index f69389c..0000000
+++ /dev/null
@@ -1,90 +0,0 @@
-/*
- * Jprobe specific operations
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
- * Copyright (C) Intel Corporation, 2005
- *
- * 2005-May     Rusty Lynch <rusty.lynch@intel.com> and Anil S Keshavamurthy
- *              <anil.s.keshavamurthy@intel.com> initial implementation
- *
- * Jprobes (a.k.a. "jump probes" which is built on-top of kprobes) allow a
- * probe to be inserted into the beginning of a function call.  The fundamental
- * difference between a jprobe and a kprobe is the jprobe handler is executed
- * in the same context as the target function, while the kprobe handlers
- * are executed in interrupt context.
- *
- * For jprobes we initially gain control by placing a break point in the
- * first instruction of the targeted function.  When we catch that specific
- * break, we:
- *        * set the return address to our jprobe_inst_return() function
- *        * jump to the jprobe handler function
- *
- * Since we fixed up the return address, the jprobe handler will return to our
- * jprobe_inst_return() function, giving us control again.  At this point we
- * are back in the parents frame marker, so we do yet another call to our
- * jprobe_break() function to fix up the frame marker as it would normally
- * exist in the target function.
- *
- * Our jprobe_return function then transfers control back to kprobes.c by
- * executing a break instruction using one of our reserved numbers.  When we
- * catch that break in kprobes.c, we continue like we do for a normal kprobe
- * by single stepping the emulated instruction, and then returning execution
- * to the correct location.
- */
-#include <asm/asmmacro.h>
-#include <asm/break.h>
-
-       /*
-        * void jprobe_break(void)
-        */
-       .section .kprobes.text, "ax"
-ENTRY(jprobe_break)
-       break.m __IA64_BREAK_JPROBE
-END(jprobe_break)
-
-       /*
-        * void jprobe_inst_return(void)
-        */
-GLOBAL_ENTRY(jprobe_inst_return)
-       br.call.sptk.many b0=jprobe_break
-END(jprobe_inst_return)
-
-GLOBAL_ENTRY(invalidate_stacked_regs)
-       movl r16=invalidate_restore_cfm
-       ;;
-       mov b6=r16
-       ;;
-       br.ret.sptk.many b6
-       ;;
-invalidate_restore_cfm:
-       mov r16=ar.rsc
-       ;;
-       mov ar.rsc=r0
-       ;;
-       loadrs
-       ;;
-       mov ar.rsc=r16
-       ;;
-       br.cond.sptk.many rp
-END(invalidate_stacked_regs)
-
-GLOBAL_ENTRY(flush_register_stack)
-       // flush dirty regs to backing store (must be first in insn group)
-       flushrs
-       ;;
-       br.ret.sptk.many rp
-END(flush_register_stack)
-
index f5f3a5e6fcd19d78c5f694ff7bbcae2160d93a2f..aa41bd5cf9b771ecda27e50a8acf7c3f0cf500fb 100644 (file)
@@ -35,8 +35,6 @@
 #include <asm/sections.h>
 #include <asm/exception.h>
 
-extern void jprobe_inst_return(void);
-
 DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
 DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
 
@@ -480,12 +478,9 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
                         */
                        break;
        }
-
        kretprobe_assert(ri, orig_ret_address, trampoline_address);
 
-       reset_current_kprobe();
        kretprobe_hash_unlock(current, &flags);
-       preempt_enable_no_resched();
 
        hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
                hlist_del(&ri->hlist);
@@ -819,14 +814,6 @@ static int __kprobes pre_kprobes_handler(struct die_args *args)
                        prepare_ss(p, regs);
                        kcb->kprobe_status = KPROBE_REENTER;
                        return 1;
-               } else if (args->err == __IA64_BREAK_JPROBE) {
-                       /*
-                        * jprobe instrumented function just completed
-                        */
-                       p = __this_cpu_read(current_kprobe);
-                       if (p->break_handler && p->break_handler(p, regs)) {
-                               goto ss_probe;
-                       }
                } else if (!is_ia64_break_inst(regs)) {
                        /* The breakpoint instruction was removed by
                         * another cpu right after we hit, no further
@@ -861,15 +848,12 @@ static int __kprobes pre_kprobes_handler(struct die_args *args)
        set_current_kprobe(p, kcb);
        kcb->kprobe_status = KPROBE_HIT_ACTIVE;
 
-       if (p->pre_handler && p->pre_handler(p, regs))
-               /*
-                * Our pre-handler is specifically requesting that we just
-                * do a return.  This is used for both the jprobe pre-handler
-                * and the kretprobe trampoline
-                */
+       if (p->pre_handler && p->pre_handler(p, regs)) {
+               reset_current_kprobe();
+               preempt_enable_no_resched();
                return 1;
+       }
 
-ss_probe:
 #if !defined(CONFIG_PREEMPT)
        if (p->ainsn.inst_flag == INST_FLAG_BOOSTABLE && !p->post_handler) {
                /* Boost up -- we can execute copied instructions directly */
@@ -992,7 +976,6 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
        case DIE_BREAK:
                /* err is break number from ia64_bad_break() */
                if ((args->err >> 12) == (__IA64_BREAK_KPROBE >> 12)
-                       || args->err == __IA64_BREAK_JPROBE
                        || args->err == 0)
                        if (pre_kprobes_handler(args))
                                ret = NOTIFY_STOP;
@@ -1040,74 +1023,6 @@ unsigned long arch_deref_entry_point(void *entry)
        return ((struct fnptr *)entry)->ip;
 }
 
-int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
-{
-       struct jprobe *jp = container_of(p, struct jprobe, kp);
-       unsigned long addr = arch_deref_entry_point(jp->entry);
-       struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
-       struct param_bsp_cfm pa;
-       int bytes;
-
-       /*
-        * Callee owns the argument space and could overwrite it, eg
-        * tail call optimization. So to be absolutely safe
-        * we save the argument space before transferring the control
-        * to instrumented jprobe function which runs in
-        * the process context
-        */
-       pa.ip = regs->cr_iip;
-       unw_init_running(ia64_get_bsp_cfm, &pa);
-       bytes = (char *)ia64_rse_skip_regs(pa.bsp, pa.cfm & 0x3f)
-                               - (char *)pa.bsp;
-       memcpy( kcb->jprobes_saved_stacked_regs,
-               pa.bsp,
-               bytes );
-       kcb->bsp = pa.bsp;
-       kcb->cfm = pa.cfm;
-
-       /* save architectural state */
-       kcb->jprobe_saved_regs = *regs;
-
-       /* after rfi, execute the jprobe instrumented function */
-       regs->cr_iip = addr & ~0xFULL;
-       ia64_psr(regs)->ri = addr & 0xf;
-       regs->r1 = ((struct fnptr *)(jp->entry))->gp;
-
-       /*
-        * fix the return address to our jprobe_inst_return() function
-        * in the jprobes.S file
-        */
-       regs->b0 = ((struct fnptr *)(jprobe_inst_return))->ip;
-
-       return 1;
-}
-
-/* ia64 does not need this */
-void __kprobes jprobe_return(void)
-{
-}
-
-int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
-{
-       struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
-       int bytes;
-
-       /* restoring architectural state */
-       *regs = kcb->jprobe_saved_regs;
-
-       /* restoring the original argument space */
-       flush_register_stack();
-       bytes = (char *)ia64_rse_skip_regs(kcb->bsp, kcb->cfm & 0x3f)
-                               - (char *)kcb->bsp;
-       memcpy( kcb->bsp,
-               kcb->jprobes_saved_stacked_regs,
-               bytes );
-       invalidate_stacked_regs();
-
-       preempt_enable_no_resched();
-       return 1;
-}
-
 static struct kprobe trampoline_p = {
        .pre_handler = trampoline_probe_handler
 };
index 3b38c717008ac1993e5b54aa28a8fa6342ab7350..46bff16618362308ef016a2125cbdc6419c7124f 100644 (file)
@@ -2278,17 +2278,15 @@ pfm_smpl_buffer_alloc(struct task_struct *task, struct file *filp, pfm_context_t
        DPRINT(("smpl_buf @%p\n", smpl_buf));
 
        /* allocate vma */
-       vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
+       vma = vm_area_alloc(mm);
        if (!vma) {
                DPRINT(("Cannot allocate vma\n"));
                goto error_kmem;
        }
-       INIT_LIST_HEAD(&vma->anon_vma_chain);
 
        /*
         * partially initialize the vma for the sampling buffer
         */
-       vma->vm_mm           = mm;
        vma->vm_file         = get_file(filp);
        vma->vm_flags        = VM_READ|VM_MAYREAD|VM_DONTEXPAND|VM_DONTDUMP;
        vma->vm_page_prot    = PAGE_READONLY; /* XXX may need to change */
@@ -2346,7 +2344,7 @@ pfm_smpl_buffer_alloc(struct task_struct *task, struct file *filp, pfm_context_t
        return 0;
 
 error:
-       kmem_cache_free(vm_area_cachep, vma);
+       vm_area_free(vma);
 error_kmem:
        pfm_rvfree(smpl_buf, size);
 
index 18278b448530d3ac9302754cf170e261401fd008..3b85c3ecac38d2ae0860bc0d5aedd79f1cc96a39 100644 (file)
@@ -114,10 +114,9 @@ ia64_init_addr_space (void)
         * the problem.  When the process attempts to write to the register backing store
         * for the first time, it will get a SEGFAULT in this case.
         */
-       vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
+       vma = vm_area_alloc(current->mm);
        if (vma) {
-               INIT_LIST_HEAD(&vma->anon_vma_chain);
-               vma->vm_mm = current->mm;
+               vma_set_anonymous(vma);
                vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
                vma->vm_end = vma->vm_start + PAGE_SIZE;
                vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
@@ -125,7 +124,7 @@ ia64_init_addr_space (void)
                down_write(&current->mm->mmap_sem);
                if (insert_vm_struct(current->mm, vma)) {
                        up_write(&current->mm->mmap_sem);
-                       kmem_cache_free(vm_area_cachep, vma);
+                       vm_area_free(vma);
                        return;
                }
                up_write(&current->mm->mmap_sem);
@@ -133,10 +132,9 @@ ia64_init_addr_space (void)
 
        /* map NaT-page at address zero to speed up speculative dereferencing of NULL: */
        if (!(current->personality & MMAP_PAGE_ZERO)) {
-               vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
+               vma = vm_area_alloc(current->mm);
                if (vma) {
-                       INIT_LIST_HEAD(&vma->anon_vma_chain);
-                       vma->vm_mm = current->mm;
+                       vma_set_anonymous(vma);
                        vma->vm_end = PAGE_SIZE;
                        vma->vm_page_prot = __pgprot(pgprot_val(PAGE_READONLY) | _PAGE_MA_NAT);
                        vma->vm_flags = VM_READ | VM_MAYREAD | VM_IO |
@@ -144,7 +142,7 @@ ia64_init_addr_space (void)
                        down_write(&current->mm->mmap_sem);
                        if (insert_vm_struct(current->mm, vma)) {
                                up_write(&current->mm->mmap_sem);
-                               kmem_cache_free(vm_area_cachep, vma);
+                               vm_area_free(vma);
                                return;
                        }
                        up_write(&current->mm->mmap_sem);
@@ -277,7 +275,7 @@ static struct vm_area_struct gate_vma;
 
 static int __init gate_vma_init(void)
 {
-       gate_vma.vm_mm = NULL;
+       vma_init(&gate_vma, NULL);
        gate_vma.vm_start = FIXADDR_USER_START;
        gate_vma.vm_end = FIXADDR_USER_END;
        gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
index 785612b576f7df4de1f6287426ce79a49aed8255..b29f93774d95590b1008815fbfc129afa826d4a7 100644 (file)
@@ -2,6 +2,7 @@
 config M68K
        bool
        default y
+       select ARCH_HAS_SYNC_DMA_FOR_DEVICE if HAS_DMA
        select ARCH_MIGHT_HAVE_PC_PARPORT if ISA
        select ARCH_NO_COHERENT_DMA_MMAP if !MMU
        select HAVE_IDE
@@ -24,6 +25,10 @@ config M68K
        select MODULES_USE_ELF_RELA
        select OLD_SIGSUSPEND3
        select OLD_SIGACTION
+       select DMA_NONCOHERENT_OPS if HAS_DMA
+       select HAVE_MEMBLOCK
+       select ARCH_DISCARD_MEMBLOCK
+       select NO_BOOTMEM
 
 config CPU_BIG_ENDIAN
        def_bool y
index b2a6bc63f8cd1aab4806113812bf4c3987bbd8f8..aef8d42e078ddffe50f9557702bd493810a0ef13 100644 (file)
@@ -31,7 +31,6 @@ extern void dn_sched_init(irq_handler_t handler);
 extern void dn_init_IRQ(void);
 extern u32 dn_gettimeoffset(void);
 extern int dn_dummy_hwclk(int, struct rtc_time *);
-extern int dn_dummy_set_clock_mmss(unsigned long);
 extern void dn_dummy_reset(void);
 #ifdef CONFIG_HEARTBEAT
 static void dn_heartbeat(int on);
@@ -156,7 +155,6 @@ void __init config_apollo(void)
        arch_gettimeoffset   = dn_gettimeoffset;
        mach_max_dma_address = 0xffffffff;
        mach_hwclk           = dn_dummy_hwclk; /* */
-       mach_set_clock_mmss  = dn_dummy_set_clock_mmss; /* */
        mach_reset           = dn_dummy_reset;  /* */
 #ifdef CONFIG_HEARTBEAT
        mach_heartbeat = dn_heartbeat;
@@ -240,12 +238,6 @@ int dn_dummy_hwclk(int op, struct rtc_time *t) {
 
 }
 
-int dn_dummy_set_clock_mmss(unsigned long nowtime)
-{
-       pr_info("set_clock_mmss\n");
-       return 0;
-}
-
 void dn_dummy_reset(void) {
 
   dn_serial_print("The end !\n");
index 565c6f06ab0b8193ef02f8028d6a8baf8ff00b06..bd96702a1ad0977334a31ea21d8c782c7ddf1f00 100644 (file)
@@ -81,9 +81,6 @@ extern void atari_sched_init(irq_handler_t);
 extern u32 atari_gettimeoffset(void);
 extern int atari_mste_hwclk (int, struct rtc_time *);
 extern int atari_tt_hwclk (int, struct rtc_time *);
-extern int atari_mste_set_clock_mmss (unsigned long);
-extern int atari_tt_set_clock_mmss (unsigned long);
-
 
 /* ++roman: This is a more elaborate test for an SCC chip, since the plain
  * Medusa board generates DTACK at the SCC's standard addresses, but a SCC
@@ -362,13 +359,11 @@ void __init config_atari(void)
                ATARIHW_SET(TT_CLK);
                pr_cont(" TT_CLK");
                mach_hwclk = atari_tt_hwclk;
-               mach_set_clock_mmss = atari_tt_set_clock_mmss;
        }
        if (hwreg_present(&mste_rtc.sec_ones)) {
                ATARIHW_SET(MSTE_CLK);
                pr_cont(" MSTE_CLK");
                mach_hwclk = atari_mste_hwclk;
-               mach_set_clock_mmss = atari_mste_set_clock_mmss;
        }
        if (!MACH_IS_MEDUSA && hwreg_present(&dma_wd.fdc_speed) &&
            hwreg_write(&dma_wd.fdc_speed, 0)) {
index c549b48174ec8342620816d846ea7d492f36d0b1..9cca64286464c31ae9b961609f67c3afbbb25d73 100644 (file)
@@ -285,69 +285,6 @@ int atari_tt_hwclk( int op, struct rtc_time *t )
     return( 0 );
 }
 
-
-int atari_mste_set_clock_mmss (unsigned long nowtime)
-{
-    short real_seconds = nowtime % 60, real_minutes = (nowtime / 60) % 60;
-    struct MSTE_RTC val;
-    unsigned char rtc_minutes;
-
-    mste_read(&val);
-    rtc_minutes= val.min_ones + val.min_tens * 10;
-    if ((rtc_minutes < real_minutes
-         ? real_minutes - rtc_minutes
-         : rtc_minutes - real_minutes) < 30)
-    {
-        val.sec_ones = real_seconds % 10;
-        val.sec_tens = real_seconds / 10;
-        val.min_ones = real_minutes % 10;
-        val.min_tens = real_minutes / 10;
-        mste_write(&val);
-    }
-    else
-        return -1;
-    return 0;
-}
-
-int atari_tt_set_clock_mmss (unsigned long nowtime)
-{
-    int retval = 0;
-    short real_seconds = nowtime % 60, real_minutes = (nowtime / 60) % 60;
-    unsigned char save_control, save_freq_select, rtc_minutes;
-
-    save_control = RTC_READ (RTC_CONTROL); /* tell the clock it's being set */
-    RTC_WRITE (RTC_CONTROL, save_control | RTC_SET);
-
-    save_freq_select = RTC_READ (RTC_FREQ_SELECT); /* stop and reset prescaler */
-    RTC_WRITE (RTC_FREQ_SELECT, save_freq_select | RTC_DIV_RESET2);
-
-    rtc_minutes = RTC_READ (RTC_MINUTES);
-    if (!(save_control & RTC_DM_BINARY))
-       rtc_minutes = bcd2bin(rtc_minutes);
-
-    /* Since we're only adjusting minutes and seconds, don't interfere
-       with hour overflow.  This avoids messing with unknown time zones
-       but requires your RTC not to be off by more than 30 minutes.  */
-    if ((rtc_minutes < real_minutes
-         ? real_minutes - rtc_minutes
-         : rtc_minutes - real_minutes) < 30)
-        {
-            if (!(save_control & RTC_DM_BINARY))
-                {
-                   real_seconds = bin2bcd(real_seconds);
-                   real_minutes = bin2bcd(real_minutes);
-                }
-            RTC_WRITE (RTC_SECONDS, real_seconds);
-            RTC_WRITE (RTC_MINUTES, real_minutes);
-        }
-    else
-        retval = -1;
-
-    RTC_WRITE (RTC_FREQ_SELECT, save_freq_select);
-    RTC_WRITE (RTC_CONTROL, save_control);
-    return retval;
-}
-
 /*
  * Local variables:
  *  c-indent-level: 4
index 2cfff47650407479cae907205df89ea039934a37..143ee9fa3893ecce16e13c87309354a6d5c6211e 100644 (file)
@@ -41,7 +41,6 @@ static void bvme6000_get_model(char *model);
 extern void bvme6000_sched_init(irq_handler_t handler);
 extern u32 bvme6000_gettimeoffset(void);
 extern int bvme6000_hwclk (int, struct rtc_time *);
-extern int bvme6000_set_clock_mmss (unsigned long);
 extern void bvme6000_reset (void);
 void bvme6000_set_vectors (void);
 
@@ -113,7 +112,6 @@ void __init config_bvme6000(void)
     mach_init_IRQ        = bvme6000_init_IRQ;
     arch_gettimeoffset   = bvme6000_gettimeoffset;
     mach_hwclk           = bvme6000_hwclk;
-    mach_set_clock_mmss         = bvme6000_set_clock_mmss;
     mach_reset          = bvme6000_reset;
     mach_get_model       = bvme6000_get_model;
 
@@ -305,46 +303,3 @@ int bvme6000_hwclk(int op, struct rtc_time *t)
 
        return 0;
 }
-
-/*
- * Set the minutes and seconds from seconds value 'nowtime'.  Fail if
- * clock is out by > 30 minutes.  Logic lifted from atari code.
- * Algorithm is to wait for the 10ms register to change, and then to
- * wait a short while, and then set it.
- */
-
-int bvme6000_set_clock_mmss (unsigned long nowtime)
-{
-       int retval = 0;
-       short real_seconds = nowtime % 60, real_minutes = (nowtime / 60) % 60;
-       unsigned char rtc_minutes, rtc_tenms;
-       volatile RtcPtr_t rtc = (RtcPtr_t)BVME_RTC_BASE;
-       unsigned char msr = rtc->msr & 0xc0;
-       unsigned long flags;
-       volatile int i;
-
-       rtc->msr = 0;           /* Ensure clock accessible */
-       rtc_minutes = bcd2bin (rtc->bcd_min);
-
-       if ((rtc_minutes < real_minutes
-               ? real_minutes - rtc_minutes
-                       : rtc_minutes - real_minutes) < 30)
-       {
-               local_irq_save(flags);
-               rtc_tenms = rtc->bcd_tenms;
-               while (rtc_tenms == rtc->bcd_tenms)
-                       ;
-               for (i = 0; i < 1000; i++)
-                       ;
-               rtc->bcd_min = bin2bcd(real_minutes);
-               rtc->bcd_sec = bin2bcd(real_seconds);
-               local_irq_restore(flags);
-       }
-       else
-               retval = -1;
-
-       rtc->msr = msr;
-
-       return retval;
-}
-
index a874e54404d10c94243c8b47bdde41afd353cc4f..1d5483f6e457b0a0fe62d3722e1a0e3377063823 100644 (file)
@@ -52,6 +52,7 @@ CONFIG_UNIX_DIAG=m
 CONFIG_TLS=m
 CONFIG_XFRM_MIGRATE=y
 CONFIG_NET_KEY=y
+CONFIG_XDP_SOCKETS=y
 CONFIG_INET=y
 CONFIG_IP_PNP=y
 CONFIG_IP_PNP_DHCP=y
@@ -98,18 +99,14 @@ CONFIG_NF_CONNTRACK_SANE=m
 CONFIG_NF_CONNTRACK_SIP=m
 CONFIG_NF_CONNTRACK_TFTP=m
 CONFIG_NF_TABLES=m
+CONFIG_NF_TABLES_SET=m
 CONFIG_NF_TABLES_INET=y
 CONFIG_NF_TABLES_NETDEV=y
-CONFIG_NFT_EXTHDR=m
-CONFIG_NFT_META=m
-CONFIG_NFT_RT=m
 CONFIG_NFT_NUMGEN=m
 CONFIG_NFT_CT=m
 CONFIG_NFT_FLOW_OFFLOAD=m
-CONFIG_NFT_SET_RBTREE=m
-CONFIG_NFT_SET_HASH=m
-CONFIG_NFT_SET_BITMAP=m
 CONFIG_NFT_COUNTER=m
+CONFIG_NFT_CONNLIMIT=m
 CONFIG_NFT_LOG=m
 CONFIG_NFT_LIMIT=m
 CONFIG_NFT_MASQ=m
@@ -122,6 +119,7 @@ CONFIG_NFT_REJECT=m
 CONFIG_NFT_COMPAT=m
 CONFIG_NFT_HASH=m
 CONFIG_NFT_FIB_INET=m
+CONFIG_NFT_SOCKET=m
 CONFIG_NFT_DUP_NETDEV=m
 CONFIG_NFT_FWD_NETDEV=m
 CONFIG_NFT_FIB_NETDEV=m
@@ -200,7 +198,6 @@ CONFIG_IP_SET_HASH_NETPORT=m
 CONFIG_IP_SET_HASH_NETIFACE=m
 CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_NF_SOCKET_IPV4=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
 CONFIG_NFT_DUP_IPV4=m
 CONFIG_NFT_FIB_IPV4=m
@@ -231,7 +228,6 @@ CONFIG_IP_NF_ARPTABLES=m
 CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_NF_SOCKET_IPV6=m
 CONFIG_NFT_CHAIN_ROUTE_IPV6=m
 CONFIG_NFT_CHAIN_NAT_IPV6=m
 CONFIG_NFT_MASQ_IPV6=m
@@ -260,7 +256,6 @@ CONFIG_IP6_NF_NAT=m
 CONFIG_IP6_NF_TARGET_MASQUERADE=m
 CONFIG_IP6_NF_TARGET_NPT=m
 CONFIG_NF_TABLES_BRIDGE=y
-CONFIG_NFT_BRIDGE_META=m
 CONFIG_NFT_BRIDGE_REJECT=m
 CONFIG_NF_LOG_BRIDGE=m
 CONFIG_BRIDGE_NF_EBTABLES=m
@@ -301,6 +296,7 @@ CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
 CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
 CONFIG_DNS_RESOLVER=y
 CONFIG_BATMAN_ADV=m
+# CONFIG_BATMAN_ADV_BATMAN_V is not set
 CONFIG_BATMAN_ADV_DAT=y
 CONFIG_BATMAN_ADV_NC=y
 CONFIG_BATMAN_ADV_MCAST=y
@@ -356,6 +352,7 @@ CONFIG_A2091_SCSI=y
 CONFIG_GVP11_SCSI=y
 CONFIG_SCSI_A4000T=y
 CONFIG_SCSI_ZORRO7XX=y
+CONFIG_SCSI_ZORRO_ESP=y
 CONFIG_MD=y
 CONFIG_MD_LINEAR=m
 CONFIG_BLK_DEV_DM=m
@@ -363,6 +360,7 @@ CONFIG_DM_UNSTRIPED=m
 CONFIG_DM_CRYPT=m
 CONFIG_DM_SNAPSHOT=m
 CONFIG_DM_THIN_PROVISIONING=m
+CONFIG_DM_WRITECACHE=m
 CONFIG_DM_ERA=m
 CONFIG_DM_MIRROR=m
 CONFIG_DM_RAID=m
@@ -402,8 +400,8 @@ CONFIG_A2065=y
 CONFIG_ARIADNE=y
 # CONFIG_NET_VENDOR_AQUANTIA is not set
 # CONFIG_NET_VENDOR_ARC is not set
-# CONFIG_NET_CADENCE is not set
 # CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_CADENCE is not set
 # CONFIG_NET_VENDOR_CIRRUS is not set
 # CONFIG_NET_VENDOR_CORTINA is not set
 # CONFIG_NET_VENDOR_EZCHIP is not set
@@ -412,8 +410,10 @@ CONFIG_ARIADNE=y
 # CONFIG_NET_VENDOR_INTEL is not set
 # CONFIG_NET_VENDOR_MARVELL is not set
 # CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MICROSEMI is not set
 # CONFIG_NET_VENDOR_NETRONOME is not set
 # CONFIG_NET_VENDOR_NI is not set
+CONFIG_XSURF100=y
 CONFIG_HYDRA=y
 CONFIG_APNE=y
 CONFIG_ZORRO8390=y
@@ -426,9 +426,9 @@ CONFIG_ZORRO8390=y
 # CONFIG_NET_VENDOR_SMSC is not set
 # CONFIG_NET_VENDOR_SOCIONEXT is not set
 # CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
 # CONFIG_NET_VENDOR_VIA is not set
 # CONFIG_NET_VENDOR_WIZNET is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
 CONFIG_PPP=m
 CONFIG_PPP_BSDCOMP=m
 CONFIG_PPP_DEFLATE=m
@@ -478,6 +478,7 @@ CONFIG_HIDRAW=y
 CONFIG_UHID=m
 # CONFIG_HID_GENERIC is not set
 # CONFIG_HID_ITE is not set
+# CONFIG_HID_REDRAGON is not set
 # CONFIG_USB_SUPPORT is not set
 CONFIG_RTC_CLASS=y
 # CONFIG_RTC_NVMEM is not set
@@ -499,7 +500,7 @@ CONFIG_FS_ENCRYPTION=m
 CONFIG_FANOTIFY=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 # CONFIG_PRINT_QUOTA_WARNING is not set
-CONFIG_AUTOFS4_FS=m
+CONFIG_AUTOFS_FS=m
 CONFIG_FUSE_FS=m
 CONFIG_CUSE=m
 CONFIG_OVERLAY_FS=m
@@ -600,6 +601,7 @@ CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
 CONFIG_TEST_BITMAP=m
 CONFIG_TEST_UUID=m
+CONFIG_TEST_OVERFLOW=m
 CONFIG_TEST_RHASHTABLE=m
 CONFIG_TEST_HASH=m
 CONFIG_TEST_USER_COPY=m
@@ -622,6 +624,11 @@ CONFIG_CRYPTO_CRYPTD=m
 CONFIG_CRYPTO_MCRYPTD=m
 CONFIG_CRYPTO_TEST=m
 CONFIG_CRYPTO_CHACHA20POLY1305=m
+CONFIG_CRYPTO_AEGIS128=m
+CONFIG_CRYPTO_AEGIS128L=m
+CONFIG_CRYPTO_AEGIS256=m
+CONFIG_CRYPTO_MORUS640=m
+CONFIG_CRYPTO_MORUS1280=m
 CONFIG_CRYPTO_CFB=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
@@ -657,6 +664,7 @@ CONFIG_CRYPTO_LZO=m
 CONFIG_CRYPTO_842=m
 CONFIG_CRYPTO_LZ4=m
 CONFIG_CRYPTO_LZ4HC=m
+CONFIG_CRYPTO_ZSTD=m
 CONFIG_CRYPTO_ANSI_CPRNG=m
 CONFIG_CRYPTO_DRBG_HASH=y
 CONFIG_CRYPTO_DRBG_CTR=y
index 8ce39e23aa4272440426a263c97833814d0a0122..52a0af127951f5f0ed11e9022822fe6992d1140c 100644 (file)
@@ -50,6 +50,7 @@ CONFIG_UNIX_DIAG=m
 CONFIG_TLS=m
 CONFIG_XFRM_MIGRATE=y
 CONFIG_NET_KEY=y
+CONFIG_XDP_SOCKETS=y
 CONFIG_INET=y
 CONFIG_IP_PNP=y
 CONFIG_IP_PNP_DHCP=y
@@ -96,18 +97,14 @@ CONFIG_NF_CONNTRACK_SANE=m
 CONFIG_NF_CONNTRACK_SIP=m
 CONFIG_NF_CONNTRACK_TFTP=m
 CONFIG_NF_TABLES=m
+CONFIG_NF_TABLES_SET=m
 CONFIG_NF_TABLES_INET=y
 CONFIG_NF_TABLES_NETDEV=y
-CONFIG_NFT_EXTHDR=m
-CONFIG_NFT_META=m
-CONFIG_NFT_RT=m
 CONFIG_NFT_NUMGEN=m
 CONFIG_NFT_CT=m
 CONFIG_NFT_FLOW_OFFLOAD=m
-CONFIG_NFT_SET_RBTREE=m
-CONFIG_NFT_SET_HASH=m
-CONFIG_NFT_SET_BITMAP=m
 CONFIG_NFT_COUNTER=m
+CONFIG_NFT_CONNLIMIT=m
 CONFIG_NFT_LOG=m
 CONFIG_NFT_LIMIT=m
 CONFIG_NFT_MASQ=m
@@ -120,6 +117,7 @@ CONFIG_NFT_REJECT=m
 CONFIG_NFT_COMPAT=m
 CONFIG_NFT_HASH=m
 CONFIG_NFT_FIB_INET=m
+CONFIG_NFT_SOCKET=m
 CONFIG_NFT_DUP_NETDEV=m
 CONFIG_NFT_FWD_NETDEV=m
 CONFIG_NFT_FIB_NETDEV=m
@@ -198,7 +196,6 @@ CONFIG_IP_SET_HASH_NETPORT=m
 CONFIG_IP_SET_HASH_NETIFACE=m
 CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_NF_SOCKET_IPV4=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
 CONFIG_NFT_DUP_IPV4=m
 CONFIG_NFT_FIB_IPV4=m
@@ -229,7 +226,6 @@ CONFIG_IP_NF_ARPTABLES=m
 CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_NF_SOCKET_IPV6=m
 CONFIG_NFT_CHAIN_ROUTE_IPV6=m
 CONFIG_NFT_CHAIN_NAT_IPV6=m
 CONFIG_NFT_MASQ_IPV6=m
@@ -258,7 +254,6 @@ CONFIG_IP6_NF_NAT=m
 CONFIG_IP6_NF_TARGET_MASQUERADE=m
 CONFIG_IP6_NF_TARGET_NPT=m
 CONFIG_NF_TABLES_BRIDGE=y
-CONFIG_NFT_BRIDGE_META=m
 CONFIG_NFT_BRIDGE_REJECT=m
 CONFIG_NF_LOG_BRIDGE=m
 CONFIG_BRIDGE_NF_EBTABLES=m
@@ -299,6 +294,7 @@ CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
 CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
 CONFIG_DNS_RESOLVER=y
 CONFIG_BATMAN_ADV=m
+# CONFIG_BATMAN_ADV_BATMAN_V is not set
 CONFIG_BATMAN_ADV_DAT=y
 CONFIG_BATMAN_ADV_NC=y
 CONFIG_BATMAN_ADV_MCAST=y
@@ -345,6 +341,7 @@ CONFIG_DM_UNSTRIPED=m
 CONFIG_DM_CRYPT=m
 CONFIG_DM_SNAPSHOT=m
 CONFIG_DM_THIN_PROVISIONING=m
+CONFIG_DM_WRITECACHE=m
 CONFIG_DM_ERA=m
 CONFIG_DM_MIRROR=m
 CONFIG_DM_RAID=m
@@ -381,14 +378,15 @@ CONFIG_VETH=m
 # CONFIG_NET_VENDOR_AMAZON is not set
 # CONFIG_NET_VENDOR_AQUANTIA is not set
 # CONFIG_NET_VENDOR_ARC is not set
-# CONFIG_NET_CADENCE is not set
 # CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_CADENCE is not set
 # CONFIG_NET_VENDOR_CORTINA is not set
 # CONFIG_NET_VENDOR_EZCHIP is not set
 # CONFIG_NET_VENDOR_HUAWEI is not set
 # CONFIG_NET_VENDOR_INTEL is not set
 # CONFIG_NET_VENDOR_MARVELL is not set
 # CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MICROSEMI is not set
 # CONFIG_NET_VENDOR_NATSEMI is not set
 # CONFIG_NET_VENDOR_NETRONOME is not set
 # CONFIG_NET_VENDOR_NI is not set
@@ -400,9 +398,9 @@ CONFIG_VETH=m
 # CONFIG_NET_VENDOR_SOLARFLARE is not set
 # CONFIG_NET_VENDOR_SOCIONEXT is not set
 # CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
 # CONFIG_NET_VENDOR_VIA is not set
 # CONFIG_NET_VENDOR_WIZNET is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
 CONFIG_PPP=m
 CONFIG_PPP_BSDCOMP=m
 CONFIG_PPP_DEFLATE=m
@@ -440,6 +438,7 @@ CONFIG_HIDRAW=y
 CONFIG_UHID=m
 # CONFIG_HID_GENERIC is not set
 # CONFIG_HID_ITE is not set
+# CONFIG_HID_REDRAGON is not set
 # CONFIG_USB_SUPPORT is not set
 CONFIG_RTC_CLASS=y
 # CONFIG_RTC_NVMEM is not set
@@ -458,7 +457,7 @@ CONFIG_FS_ENCRYPTION=m
 CONFIG_FANOTIFY=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 # CONFIG_PRINT_QUOTA_WARNING is not set
-CONFIG_AUTOFS4_FS=m
+CONFIG_AUTOFS_FS=m
 CONFIG_FUSE_FS=m
 CONFIG_CUSE=m
 CONFIG_OVERLAY_FS=m
@@ -559,6 +558,7 @@ CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
 CONFIG_TEST_BITMAP=m
 CONFIG_TEST_UUID=m
+CONFIG_TEST_OVERFLOW=m
 CONFIG_TEST_RHASHTABLE=m
 CONFIG_TEST_HASH=m
 CONFIG_TEST_USER_COPY=m
@@ -581,6 +581,11 @@ CONFIG_CRYPTO_CRYPTD=m
 CONFIG_CRYPTO_MCRYPTD=m
 CONFIG_CRYPTO_TEST=m
 CONFIG_CRYPTO_CHACHA20POLY1305=m
+CONFIG_CRYPTO_AEGIS128=m
+CONFIG_CRYPTO_AEGIS128L=m
+CONFIG_CRYPTO_AEGIS256=m
+CONFIG_CRYPTO_MORUS640=m
+CONFIG_CRYPTO_MORUS1280=m
 CONFIG_CRYPTO_CFB=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
@@ -616,6 +621,7 @@ CONFIG_CRYPTO_LZO=m
 CONFIG_CRYPTO_842=m
 CONFIG_CRYPTO_LZ4=m
 CONFIG_CRYPTO_LZ4HC=m
+CONFIG_CRYPTO_ZSTD=m
 CONFIG_CRYPTO_ANSI_CPRNG=m
 CONFIG_CRYPTO_DRBG_HASH=y
 CONFIG_CRYPTO_DRBG_CTR=y
index 346c4e75edf869b25d6910ee2ffe01b3ef334a60..b3103e51268a31759ae841a28559720a6f1aadac 100644 (file)
@@ -50,6 +50,7 @@ CONFIG_UNIX_DIAG=m
 CONFIG_TLS=m
 CONFIG_XFRM_MIGRATE=y
 CONFIG_NET_KEY=y
+CONFIG_XDP_SOCKETS=y
 CONFIG_INET=y
 CONFIG_IP_PNP=y
 CONFIG_IP_PNP_DHCP=y
@@ -96,18 +97,14 @@ CONFIG_NF_CONNTRACK_SANE=m
 CONFIG_NF_CONNTRACK_SIP=m
 CONFIG_NF_CONNTRACK_TFTP=m
 CONFIG_NF_TABLES=m
+CONFIG_NF_TABLES_SET=m
 CONFIG_NF_TABLES_INET=y
 CONFIG_NF_TABLES_NETDEV=y
-CONFIG_NFT_EXTHDR=m
-CONFIG_NFT_META=m
-CONFIG_NFT_RT=m
 CONFIG_NFT_NUMGEN=m
 CONFIG_NFT_CT=m
 CONFIG_NFT_FLOW_OFFLOAD=m
-CONFIG_NFT_SET_RBTREE=m
-CONFIG_NFT_SET_HASH=m
-CONFIG_NFT_SET_BITMAP=m
 CONFIG_NFT_COUNTER=m
+CONFIG_NFT_CONNLIMIT=m
 CONFIG_NFT_LOG=m
 CONFIG_NFT_LIMIT=m
 CONFIG_NFT_MASQ=m
@@ -120,6 +117,7 @@ CONFIG_NFT_REJECT=m
 CONFIG_NFT_COMPAT=m
 CONFIG_NFT_HASH=m
 CONFIG_NFT_FIB_INET=m
+CONFIG_NFT_SOCKET=m
 CONFIG_NFT_DUP_NETDEV=m
 CONFIG_NFT_FWD_NETDEV=m
 CONFIG_NFT_FIB_NETDEV=m
@@ -198,7 +196,6 @@ CONFIG_IP_SET_HASH_NETPORT=m
 CONFIG_IP_SET_HASH_NETIFACE=m
 CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_NF_SOCKET_IPV4=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
 CONFIG_NFT_DUP_IPV4=m
 CONFIG_NFT_FIB_IPV4=m
@@ -229,7 +226,6 @@ CONFIG_IP_NF_ARPTABLES=m
 CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_NF_SOCKET_IPV6=m
 CONFIG_NFT_CHAIN_ROUTE_IPV6=m
 CONFIG_NFT_CHAIN_NAT_IPV6=m
 CONFIG_NFT_MASQ_IPV6=m
@@ -258,7 +254,6 @@ CONFIG_IP6_NF_NAT=m
 CONFIG_IP6_NF_TARGET_MASQUERADE=m
 CONFIG_IP6_NF_TARGET_NPT=m
 CONFIG_NF_TABLES_BRIDGE=y
-CONFIG_NFT_BRIDGE_META=m
 CONFIG_NFT_BRIDGE_REJECT=m
 CONFIG_NF_LOG_BRIDGE=m
 CONFIG_BRIDGE_NF_EBTABLES=m
@@ -299,6 +294,7 @@ CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
 CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
 CONFIG_DNS_RESOLVER=y
 CONFIG_BATMAN_ADV=m
+# CONFIG_BATMAN_ADV_BATMAN_V is not set
 CONFIG_BATMAN_ADV_DAT=y
 CONFIG_BATMAN_ADV_NC=y
 CONFIG_BATMAN_ADV_MCAST=y
@@ -354,6 +350,7 @@ CONFIG_DM_UNSTRIPED=m
 CONFIG_DM_CRYPT=m
 CONFIG_DM_SNAPSHOT=m
 CONFIG_DM_THIN_PROVISIONING=m
+CONFIG_DM_WRITECACHE=m
 CONFIG_DM_ERA=m
 CONFIG_DM_MIRROR=m
 CONFIG_DM_RAID=m
@@ -391,14 +388,15 @@ CONFIG_VETH=m
 CONFIG_ATARILANCE=y
 # CONFIG_NET_VENDOR_AQUANTIA is not set
 # CONFIG_NET_VENDOR_ARC is not set
-# CONFIG_NET_CADENCE is not set
 # CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_CADENCE is not set
 # CONFIG_NET_VENDOR_CORTINA is not set
 # CONFIG_NET_VENDOR_EZCHIP is not set
 # CONFIG_NET_VENDOR_HUAWEI is not set
 # CONFIG_NET_VENDOR_INTEL is not set
 # CONFIG_NET_VENDOR_MARVELL is not set
 # CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MICROSEMI is not set
 # CONFIG_NET_VENDOR_NETRONOME is not set
 # CONFIG_NET_VENDOR_NI is not set
 CONFIG_NE2000=y
@@ -411,9 +409,9 @@ CONFIG_NE2000=y
 CONFIG_SMC91X=y
 # CONFIG_NET_VENDOR_SOCIONEXT is not set
 # CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
 # CONFIG_NET_VENDOR_VIA is not set
 # CONFIG_NET_VENDOR_WIZNET is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
 CONFIG_PPP=m
 CONFIG_PPP_BSDCOMP=m
 CONFIG_PPP_DEFLATE=m
@@ -480,7 +478,7 @@ CONFIG_FS_ENCRYPTION=m
 CONFIG_FANOTIFY=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 # CONFIG_PRINT_QUOTA_WARNING is not set
-CONFIG_AUTOFS4_FS=m
+CONFIG_AUTOFS_FS=m
 CONFIG_FUSE_FS=m
 CONFIG_CUSE=m
 CONFIG_OVERLAY_FS=m
@@ -581,6 +579,7 @@ CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
 CONFIG_TEST_BITMAP=m
 CONFIG_TEST_UUID=m
+CONFIG_TEST_OVERFLOW=m
 CONFIG_TEST_RHASHTABLE=m
 CONFIG_TEST_HASH=m
 CONFIG_TEST_USER_COPY=m
@@ -603,6 +602,11 @@ CONFIG_CRYPTO_CRYPTD=m
 CONFIG_CRYPTO_MCRYPTD=m
 CONFIG_CRYPTO_TEST=m
 CONFIG_CRYPTO_CHACHA20POLY1305=m
+CONFIG_CRYPTO_AEGIS128=m
+CONFIG_CRYPTO_AEGIS128L=m
+CONFIG_CRYPTO_AEGIS256=m
+CONFIG_CRYPTO_MORUS640=m
+CONFIG_CRYPTO_MORUS1280=m
 CONFIG_CRYPTO_CFB=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
@@ -638,6 +642,7 @@ CONFIG_CRYPTO_LZO=m
 CONFIG_CRYPTO_842=m
 CONFIG_CRYPTO_LZ4=m
 CONFIG_CRYPTO_LZ4HC=m
+CONFIG_CRYPTO_ZSTD=m
 CONFIG_CRYPTO_ANSI_CPRNG=m
 CONFIG_CRYPTO_DRBG_HASH=y
 CONFIG_CRYPTO_DRBG_CTR=y
index fca9c7aa71a34e072420ca0482d34a4a7a850246..fb7d651a4cabe203b02e455f55d0dff0e19b1e55 100644 (file)
@@ -48,6 +48,7 @@ CONFIG_UNIX_DIAG=m
 CONFIG_TLS=m
 CONFIG_XFRM_MIGRATE=y
 CONFIG_NET_KEY=y
+CONFIG_XDP_SOCKETS=y
 CONFIG_INET=y
 CONFIG_IP_PNP=y
 CONFIG_IP_PNP_DHCP=y
@@ -94,18 +95,14 @@ CONFIG_NF_CONNTRACK_SANE=m
 CONFIG_NF_CONNTRACK_SIP=m
 CONFIG_NF_CONNTRACK_TFTP=m
 CONFIG_NF_TABLES=m
+CONFIG_NF_TABLES_SET=m
 CONFIG_NF_TABLES_INET=y
 CONFIG_NF_TABLES_NETDEV=y
-CONFIG_NFT_EXTHDR=m
-CONFIG_NFT_META=m
-CONFIG_NFT_RT=m
 CONFIG_NFT_NUMGEN=m
 CONFIG_NFT_CT=m
 CONFIG_NFT_FLOW_OFFLOAD=m
-CONFIG_NFT_SET_RBTREE=m
-CONFIG_NFT_SET_HASH=m
-CONFIG_NFT_SET_BITMAP=m
 CONFIG_NFT_COUNTER=m
+CONFIG_NFT_CONNLIMIT=m
 CONFIG_NFT_LOG=m
 CONFIG_NFT_LIMIT=m
 CONFIG_NFT_MASQ=m
@@ -118,6 +115,7 @@ CONFIG_NFT_REJECT=m
 CONFIG_NFT_COMPAT=m
 CONFIG_NFT_HASH=m
 CONFIG_NFT_FIB_INET=m
+CONFIG_NFT_SOCKET=m
 CONFIG_NFT_DUP_NETDEV=m
 CONFIG_NFT_FWD_NETDEV=m
 CONFIG_NFT_FIB_NETDEV=m
@@ -196,7 +194,6 @@ CONFIG_IP_SET_HASH_NETPORT=m
 CONFIG_IP_SET_HASH_NETIFACE=m
 CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_NF_SOCKET_IPV4=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
 CONFIG_NFT_DUP_IPV4=m
 CONFIG_NFT_FIB_IPV4=m
@@ -227,7 +224,6 @@ CONFIG_IP_NF_ARPTABLES=m
 CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_NF_SOCKET_IPV6=m
 CONFIG_NFT_CHAIN_ROUTE_IPV6=m
 CONFIG_NFT_CHAIN_NAT_IPV6=m
 CONFIG_NFT_MASQ_IPV6=m
@@ -256,7 +252,6 @@ CONFIG_IP6_NF_NAT=m
 CONFIG_IP6_NF_TARGET_MASQUERADE=m
 CONFIG_IP6_NF_TARGET_NPT=m
 CONFIG_NF_TABLES_BRIDGE=y
-CONFIG_NFT_BRIDGE_META=m
 CONFIG_NFT_BRIDGE_REJECT=m
 CONFIG_NF_LOG_BRIDGE=m
 CONFIG_BRIDGE_NF_EBTABLES=m
@@ -297,6 +292,7 @@ CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
 CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
 CONFIG_DNS_RESOLVER=y
 CONFIG_BATMAN_ADV=m
+# CONFIG_BATMAN_ADV_BATMAN_V is not set
 CONFIG_BATMAN_ADV_DAT=y
 CONFIG_BATMAN_ADV_NC=y
 CONFIG_BATMAN_ADV_MCAST=y
@@ -344,6 +340,7 @@ CONFIG_DM_UNSTRIPED=m
 CONFIG_DM_CRYPT=m
 CONFIG_DM_SNAPSHOT=m
 CONFIG_DM_THIN_PROVISIONING=m
+CONFIG_DM_WRITECACHE=m
 CONFIG_DM_ERA=m
 CONFIG_DM_MIRROR=m
 CONFIG_DM_RAID=m
@@ -380,14 +377,15 @@ CONFIG_VETH=m
 # CONFIG_NET_VENDOR_AMAZON is not set
 # CONFIG_NET_VENDOR_AQUANTIA is not set
 # CONFIG_NET_VENDOR_ARC is not set
-# CONFIG_NET_CADENCE is not set
 # CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_CADENCE is not set
 # CONFIG_NET_VENDOR_CORTINA is not set
 # CONFIG_NET_VENDOR_EZCHIP is not set
 # CONFIG_NET_VENDOR_HUAWEI is not set
 CONFIG_BVME6000_NET=y
 # CONFIG_NET_VENDOR_MARVELL is not set
 # CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MICROSEMI is not set
 # CONFIG_NET_VENDOR_NATSEMI is not set
 # CONFIG_NET_VENDOR_NETRONOME is not set
 # CONFIG_NET_VENDOR_NI is not set
@@ -399,9 +397,9 @@ CONFIG_BVME6000_NET=y
 # CONFIG_NET_VENDOR_SOLARFLARE is not set
 # CONFIG_NET_VENDOR_SOCIONEXT is not set
 # CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
 # CONFIG_NET_VENDOR_VIA is not set
 # CONFIG_NET_VENDOR_WIZNET is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
 CONFIG_PPP=m
 CONFIG_PPP_BSDCOMP=m
 CONFIG_PPP_DEFLATE=m
@@ -433,6 +431,7 @@ CONFIG_HIDRAW=y
 CONFIG_UHID=m
 # CONFIG_HID_GENERIC is not set
 # CONFIG_HID_ITE is not set
+# CONFIG_HID_REDRAGON is not set
 # CONFIG_USB_SUPPORT is not set
 CONFIG_RTC_CLASS=y
 # CONFIG_RTC_NVMEM is not set
@@ -450,7 +449,7 @@ CONFIG_FS_ENCRYPTION=m
 CONFIG_FANOTIFY=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 # CONFIG_PRINT_QUOTA_WARNING is not set
-CONFIG_AUTOFS4_FS=m
+CONFIG_AUTOFS_FS=m
 CONFIG_FUSE_FS=m
 CONFIG_CUSE=m
 CONFIG_OVERLAY_FS=m
@@ -551,6 +550,7 @@ CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
 CONFIG_TEST_BITMAP=m
 CONFIG_TEST_UUID=m
+CONFIG_TEST_OVERFLOW=m
 CONFIG_TEST_RHASHTABLE=m
 CONFIG_TEST_HASH=m
 CONFIG_TEST_USER_COPY=m
@@ -573,6 +573,11 @@ CONFIG_CRYPTO_CRYPTD=m
 CONFIG_CRYPTO_MCRYPTD=m
 CONFIG_CRYPTO_TEST=m
 CONFIG_CRYPTO_CHACHA20POLY1305=m
+CONFIG_CRYPTO_AEGIS128=m
+CONFIG_CRYPTO_AEGIS128L=m
+CONFIG_CRYPTO_AEGIS256=m
+CONFIG_CRYPTO_MORUS640=m
+CONFIG_CRYPTO_MORUS1280=m
 CONFIG_CRYPTO_CFB=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
@@ -608,6 +613,7 @@ CONFIG_CRYPTO_LZO=m
 CONFIG_CRYPTO_842=m
 CONFIG_CRYPTO_LZ4=m
 CONFIG_CRYPTO_LZ4HC=m
+CONFIG_CRYPTO_ZSTD=m
 CONFIG_CRYPTO_ANSI_CPRNG=m
 CONFIG_CRYPTO_DRBG_HASH=y
 CONFIG_CRYPTO_DRBG_CTR=y
index f9eab174915c59bb5ab8da593b04574cbc765b9c..6b37f5537c3905c5e96b8d65a7653acc490abc65 100644 (file)
@@ -50,6 +50,7 @@ CONFIG_UNIX_DIAG=m
 CONFIG_TLS=m
 CONFIG_XFRM_MIGRATE=y
 CONFIG_NET_KEY=y
+CONFIG_XDP_SOCKETS=y
 CONFIG_INET=y
 CONFIG_IP_PNP=y
 CONFIG_IP_PNP_DHCP=y
@@ -96,18 +97,14 @@ CONFIG_NF_CONNTRACK_SANE=m
 CONFIG_NF_CONNTRACK_SIP=m
 CONFIG_NF_CONNTRACK_TFTP=m
 CONFIG_NF_TABLES=m
+CONFIG_NF_TABLES_SET=m
 CONFIG_NF_TABLES_INET=y
 CONFIG_NF_TABLES_NETDEV=y
-CONFIG_NFT_EXTHDR=m
-CONFIG_NFT_META=m
-CONFIG_NFT_RT=m
 CONFIG_NFT_NUMGEN=m
 CONFIG_NFT_CT=m
 CONFIG_NFT_FLOW_OFFLOAD=m
-CONFIG_NFT_SET_RBTREE=m
-CONFIG_NFT_SET_HASH=m
-CONFIG_NFT_SET_BITMAP=m
 CONFIG_NFT_COUNTER=m
+CONFIG_NFT_CONNLIMIT=m
 CONFIG_NFT_LOG=m
 CONFIG_NFT_LIMIT=m
 CONFIG_NFT_MASQ=m
@@ -120,6 +117,7 @@ CONFIG_NFT_REJECT=m
 CONFIG_NFT_COMPAT=m
 CONFIG_NFT_HASH=m
 CONFIG_NFT_FIB_INET=m
+CONFIG_NFT_SOCKET=m
 CONFIG_NFT_DUP_NETDEV=m
 CONFIG_NFT_FWD_NETDEV=m
 CONFIG_NFT_FIB_NETDEV=m
@@ -198,7 +196,6 @@ CONFIG_IP_SET_HASH_NETPORT=m
 CONFIG_IP_SET_HASH_NETIFACE=m
 CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_NF_SOCKET_IPV4=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
 CONFIG_NFT_DUP_IPV4=m
 CONFIG_NFT_FIB_IPV4=m
@@ -229,7 +226,6 @@ CONFIG_IP_NF_ARPTABLES=m
 CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_NF_SOCKET_IPV6=m
 CONFIG_NFT_CHAIN_ROUTE_IPV6=m
 CONFIG_NFT_CHAIN_NAT_IPV6=m
 CONFIG_NFT_MASQ_IPV6=m
@@ -258,7 +254,6 @@ CONFIG_IP6_NF_NAT=m
 CONFIG_IP6_NF_TARGET_MASQUERADE=m
 CONFIG_IP6_NF_TARGET_NPT=m
 CONFIG_NF_TABLES_BRIDGE=y
-CONFIG_NFT_BRIDGE_META=m
 CONFIG_NFT_BRIDGE_REJECT=m
 CONFIG_NF_LOG_BRIDGE=m
 CONFIG_BRIDGE_NF_EBTABLES=m
@@ -299,6 +294,7 @@ CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
 CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
 CONFIG_DNS_RESOLVER=y
 CONFIG_BATMAN_ADV=m
+# CONFIG_BATMAN_ADV_BATMAN_V is not set
 CONFIG_BATMAN_ADV_DAT=y
 CONFIG_BATMAN_ADV_NC=y
 CONFIG_BATMAN_ADV_MCAST=y
@@ -345,6 +341,7 @@ CONFIG_DM_UNSTRIPED=m
 CONFIG_DM_CRYPT=m
 CONFIG_DM_SNAPSHOT=m
 CONFIG_DM_THIN_PROVISIONING=m
+CONFIG_DM_WRITECACHE=m
 CONFIG_DM_ERA=m
 CONFIG_DM_MIRROR=m
 CONFIG_DM_RAID=m
@@ -382,14 +379,15 @@ CONFIG_VETH=m
 CONFIG_HPLANCE=y
 # CONFIG_NET_VENDOR_AQUANTIA is not set
 # CONFIG_NET_VENDOR_ARC is not set
-# CONFIG_NET_CADENCE is not set
 # CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_CADENCE is not set
 # CONFIG_NET_VENDOR_CORTINA is not set
 # CONFIG_NET_VENDOR_EZCHIP is not set
 # CONFIG_NET_VENDOR_HUAWEI is not set
 # CONFIG_NET_VENDOR_INTEL is not set
 # CONFIG_NET_VENDOR_MARVELL is not set
 # CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MICROSEMI is not set
 # CONFIG_NET_VENDOR_NATSEMI is not set
 # CONFIG_NET_VENDOR_NETRONOME is not set
 # CONFIG_NET_VENDOR_NI is not set
@@ -401,9 +399,9 @@ CONFIG_HPLANCE=y
 # CONFIG_NET_VENDOR_SOLARFLARE is not set
 # CONFIG_NET_VENDOR_SOCIONEXT is not set
 # CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
 # CONFIG_NET_VENDOR_VIA is not set
 # CONFIG_NET_VENDOR_WIZNET is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
 CONFIG_PPP=m
 CONFIG_PPP_BSDCOMP=m
 CONFIG_PPP_DEFLATE=m
@@ -443,6 +441,7 @@ CONFIG_HIDRAW=y
 CONFIG_UHID=m
 # CONFIG_HID_GENERIC is not set
 # CONFIG_HID_ITE is not set
+# CONFIG_HID_REDRAGON is not set
 # CONFIG_USB_SUPPORT is not set
 CONFIG_RTC_CLASS=y
 # CONFIG_RTC_NVMEM is not set
@@ -460,7 +459,7 @@ CONFIG_FS_ENCRYPTION=m
 CONFIG_FANOTIFY=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 # CONFIG_PRINT_QUOTA_WARNING is not set
-CONFIG_AUTOFS4_FS=m
+CONFIG_AUTOFS_FS=m
 CONFIG_FUSE_FS=m
 CONFIG_CUSE=m
 CONFIG_OVERLAY_FS=m
@@ -561,6 +560,7 @@ CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
 CONFIG_TEST_BITMAP=m
 CONFIG_TEST_UUID=m
+CONFIG_TEST_OVERFLOW=m
 CONFIG_TEST_RHASHTABLE=m
 CONFIG_TEST_HASH=m
 CONFIG_TEST_USER_COPY=m
@@ -583,6 +583,11 @@ CONFIG_CRYPTO_CRYPTD=m
 CONFIG_CRYPTO_MCRYPTD=m
 CONFIG_CRYPTO_TEST=m
 CONFIG_CRYPTO_CHACHA20POLY1305=m
+CONFIG_CRYPTO_AEGIS128=m
+CONFIG_CRYPTO_AEGIS128L=m
+CONFIG_CRYPTO_AEGIS256=m
+CONFIG_CRYPTO_MORUS640=m
+CONFIG_CRYPTO_MORUS1280=m
 CONFIG_CRYPTO_CFB=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
@@ -618,6 +623,7 @@ CONFIG_CRYPTO_LZO=m
 CONFIG_CRYPTO_842=m
 CONFIG_CRYPTO_LZ4=m
 CONFIG_CRYPTO_LZ4HC=m
+CONFIG_CRYPTO_ZSTD=m
 CONFIG_CRYPTO_ANSI_CPRNG=m
 CONFIG_CRYPTO_DRBG_HASH=y
 CONFIG_CRYPTO_DRBG_CTR=y
index b52e597899eb33f42b911541a0969eb70a61c29c..930cc2965a113c9bdd8ef4de54fc88a1ced4e6b9 100644 (file)
@@ -49,6 +49,7 @@ CONFIG_UNIX_DIAG=m
 CONFIG_TLS=m
 CONFIG_XFRM_MIGRATE=y
 CONFIG_NET_KEY=y
+CONFIG_XDP_SOCKETS=y
 CONFIG_INET=y
 CONFIG_IP_PNP=y
 CONFIG_IP_PNP_DHCP=y
@@ -95,18 +96,14 @@ CONFIG_NF_CONNTRACK_SANE=m
 CONFIG_NF_CONNTRACK_SIP=m
 CONFIG_NF_CONNTRACK_TFTP=m
 CONFIG_NF_TABLES=m
+CONFIG_NF_TABLES_SET=m
 CONFIG_NF_TABLES_INET=y
 CONFIG_NF_TABLES_NETDEV=y
-CONFIG_NFT_EXTHDR=m
-CONFIG_NFT_META=m
-CONFIG_NFT_RT=m
 CONFIG_NFT_NUMGEN=m
 CONFIG_NFT_CT=m
 CONFIG_NFT_FLOW_OFFLOAD=m
-CONFIG_NFT_SET_RBTREE=m
-CONFIG_NFT_SET_HASH=m
-CONFIG_NFT_SET_BITMAP=m
 CONFIG_NFT_COUNTER=m
+CONFIG_NFT_CONNLIMIT=m
 CONFIG_NFT_LOG=m
 CONFIG_NFT_LIMIT=m
 CONFIG_NFT_MASQ=m
@@ -119,6 +116,7 @@ CONFIG_NFT_REJECT=m
 CONFIG_NFT_COMPAT=m
 CONFIG_NFT_HASH=m
 CONFIG_NFT_FIB_INET=m
+CONFIG_NFT_SOCKET=m
 CONFIG_NFT_DUP_NETDEV=m
 CONFIG_NFT_FWD_NETDEV=m
 CONFIG_NFT_FIB_NETDEV=m
@@ -197,7 +195,6 @@ CONFIG_IP_SET_HASH_NETPORT=m
 CONFIG_IP_SET_HASH_NETIFACE=m
 CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_NF_SOCKET_IPV4=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
 CONFIG_NFT_DUP_IPV4=m
 CONFIG_NFT_FIB_IPV4=m
@@ -228,7 +225,6 @@ CONFIG_IP_NF_ARPTABLES=m
 CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_NF_SOCKET_IPV6=m
 CONFIG_NFT_CHAIN_ROUTE_IPV6=m
 CONFIG_NFT_CHAIN_NAT_IPV6=m
 CONFIG_NFT_MASQ_IPV6=m
@@ -257,7 +253,6 @@ CONFIG_IP6_NF_NAT=m
 CONFIG_IP6_NF_TARGET_MASQUERADE=m
 CONFIG_IP6_NF_TARGET_NPT=m
 CONFIG_NF_TABLES_BRIDGE=y
-CONFIG_NFT_BRIDGE_META=m
 CONFIG_NFT_BRIDGE_REJECT=m
 CONFIG_NF_LOG_BRIDGE=m
 CONFIG_BRIDGE_NF_EBTABLES=m
@@ -301,6 +296,7 @@ CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
 CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
 CONFIG_DNS_RESOLVER=y
 CONFIG_BATMAN_ADV=m
+# CONFIG_BATMAN_ADV_BATMAN_V is not set
 CONFIG_BATMAN_ADV_DAT=y
 CONFIG_BATMAN_ADV_NC=y
 CONFIG_BATMAN_ADV_MCAST=y
@@ -354,6 +350,7 @@ CONFIG_DM_UNSTRIPED=m
 CONFIG_DM_CRYPT=m
 CONFIG_DM_SNAPSHOT=m
 CONFIG_DM_THIN_PROVISIONING=m
+CONFIG_DM_WRITECACHE=m
 CONFIG_DM_ERA=m
 CONFIG_DM_MIRROR=m
 CONFIG_DM_RAID=m
@@ -398,8 +395,8 @@ CONFIG_VETH=m
 CONFIG_MACMACE=y
 # CONFIG_NET_VENDOR_AQUANTIA is not set
 # CONFIG_NET_VENDOR_ARC is not set
-# CONFIG_NET_CADENCE is not set
 # CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_CADENCE is not set
 CONFIG_MAC89x0=y
 # CONFIG_NET_VENDOR_CORTINA is not set
 # CONFIG_NET_VENDOR_EZCHIP is not set
@@ -407,6 +404,7 @@ CONFIG_MAC89x0=y
 # CONFIG_NET_VENDOR_INTEL is not set
 # CONFIG_NET_VENDOR_MARVELL is not set
 # CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MICROSEMI is not set
 CONFIG_MACSONIC=y
 # CONFIG_NET_VENDOR_NETRONOME is not set
 # CONFIG_NET_VENDOR_NI is not set
@@ -420,9 +418,9 @@ CONFIG_MAC8390=y
 # CONFIG_NET_VENDOR_SMSC is not set
 # CONFIG_NET_VENDOR_SOCIONEXT is not set
 # CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
 # CONFIG_NET_VENDOR_VIA is not set
 # CONFIG_NET_VENDOR_WIZNET is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
 CONFIG_PPP=m
 CONFIG_PPP_BSDCOMP=m
 CONFIG_PPP_DEFLATE=m
@@ -465,6 +463,7 @@ CONFIG_HIDRAW=y
 CONFIG_UHID=m
 # CONFIG_HID_GENERIC is not set
 # CONFIG_HID_ITE is not set
+# CONFIG_HID_REDRAGON is not set
 # CONFIG_USB_SUPPORT is not set
 CONFIG_RTC_CLASS=y
 # CONFIG_RTC_NVMEM is not set
@@ -482,7 +481,7 @@ CONFIG_FS_ENCRYPTION=m
 CONFIG_FANOTIFY=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 # CONFIG_PRINT_QUOTA_WARNING is not set
-CONFIG_AUTOFS4_FS=m
+CONFIG_AUTOFS_FS=m
 CONFIG_FUSE_FS=m
 CONFIG_CUSE=m
 CONFIG_OVERLAY_FS=m
@@ -583,6 +582,7 @@ CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
 CONFIG_TEST_BITMAP=m
 CONFIG_TEST_UUID=m
+CONFIG_TEST_OVERFLOW=m
 CONFIG_TEST_RHASHTABLE=m
 CONFIG_TEST_HASH=m
 CONFIG_TEST_USER_COPY=m
@@ -605,6 +605,11 @@ CONFIG_CRYPTO_CRYPTD=m
 CONFIG_CRYPTO_MCRYPTD=m
 CONFIG_CRYPTO_TEST=m
 CONFIG_CRYPTO_CHACHA20POLY1305=m
+CONFIG_CRYPTO_AEGIS128=m
+CONFIG_CRYPTO_AEGIS128L=m
+CONFIG_CRYPTO_AEGIS256=m
+CONFIG_CRYPTO_MORUS640=m
+CONFIG_CRYPTO_MORUS1280=m
 CONFIG_CRYPTO_CFB=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
@@ -640,6 +645,7 @@ CONFIG_CRYPTO_LZO=m
 CONFIG_CRYPTO_842=m
 CONFIG_CRYPTO_LZ4=m
 CONFIG_CRYPTO_LZ4HC=m
+CONFIG_CRYPTO_ZSTD=m
 CONFIG_CRYPTO_ANSI_CPRNG=m
 CONFIG_CRYPTO_DRBG_HASH=y
 CONFIG_CRYPTO_DRBG_CTR=y
index 2a84eeec5b02af12e72cb0155c7c530a146427ff..e7dd2530012759c3e8ef328044fab541d4a53741 100644 (file)
@@ -59,6 +59,7 @@ CONFIG_UNIX_DIAG=m
 CONFIG_TLS=m
 CONFIG_XFRM_MIGRATE=y
 CONFIG_NET_KEY=y
+CONFIG_XDP_SOCKETS=y
 CONFIG_INET=y
 CONFIG_IP_PNP=y
 CONFIG_IP_PNP_DHCP=y
@@ -105,18 +106,14 @@ CONFIG_NF_CONNTRACK_SANE=m
 CONFIG_NF_CONNTRACK_SIP=m
 CONFIG_NF_CONNTRACK_TFTP=m
 CONFIG_NF_TABLES=m
+CONFIG_NF_TABLES_SET=m
 CONFIG_NF_TABLES_INET=y
 CONFIG_NF_TABLES_NETDEV=y
-CONFIG_NFT_EXTHDR=m
-CONFIG_NFT_META=m
-CONFIG_NFT_RT=m
 CONFIG_NFT_NUMGEN=m
 CONFIG_NFT_CT=m
 CONFIG_NFT_FLOW_OFFLOAD=m
-CONFIG_NFT_SET_RBTREE=m
-CONFIG_NFT_SET_HASH=m
-CONFIG_NFT_SET_BITMAP=m
 CONFIG_NFT_COUNTER=m
+CONFIG_NFT_CONNLIMIT=m
 CONFIG_NFT_LOG=m
 CONFIG_NFT_LIMIT=m
 CONFIG_NFT_MASQ=m
@@ -129,6 +126,7 @@ CONFIG_NFT_REJECT=m
 CONFIG_NFT_COMPAT=m
 CONFIG_NFT_HASH=m
 CONFIG_NFT_FIB_INET=m
+CONFIG_NFT_SOCKET=m
 CONFIG_NFT_DUP_NETDEV=m
 CONFIG_NFT_FWD_NETDEV=m
 CONFIG_NFT_FIB_NETDEV=m
@@ -207,7 +205,6 @@ CONFIG_IP_SET_HASH_NETPORT=m
 CONFIG_IP_SET_HASH_NETIFACE=m
 CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_NF_SOCKET_IPV4=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
 CONFIG_NFT_DUP_IPV4=m
 CONFIG_NFT_FIB_IPV4=m
@@ -238,7 +235,6 @@ CONFIG_IP_NF_ARPTABLES=m
 CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_NF_SOCKET_IPV6=m
 CONFIG_NFT_CHAIN_ROUTE_IPV6=m
 CONFIG_NFT_CHAIN_NAT_IPV6=m
 CONFIG_NFT_MASQ_IPV6=m
@@ -267,7 +263,6 @@ CONFIG_IP6_NF_NAT=m
 CONFIG_IP6_NF_TARGET_MASQUERADE=m
 CONFIG_IP6_NF_TARGET_NPT=m
 CONFIG_NF_TABLES_BRIDGE=y
-CONFIG_NFT_BRIDGE_META=m
 CONFIG_NFT_BRIDGE_REJECT=m
 CONFIG_NF_LOG_BRIDGE=m
 CONFIG_BRIDGE_NF_EBTABLES=m
@@ -311,6 +306,7 @@ CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
 CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
 CONFIG_DNS_RESOLVER=y
 CONFIG_BATMAN_ADV=m
+# CONFIG_BATMAN_ADV_BATMAN_V is not set
 CONFIG_BATMAN_ADV_DAT=y
 CONFIG_BATMAN_ADV_NC=y
 CONFIG_BATMAN_ADV_MCAST=y
@@ -373,6 +369,7 @@ CONFIG_A2091_SCSI=y
 CONFIG_GVP11_SCSI=y
 CONFIG_SCSI_A4000T=y
 CONFIG_SCSI_ZORRO7XX=y
+CONFIG_SCSI_ZORRO_ESP=y
 CONFIG_ATARI_SCSI=y
 CONFIG_MAC_SCSI=y
 CONFIG_SCSI_MAC_ESP=y
@@ -387,6 +384,7 @@ CONFIG_DM_UNSTRIPED=m
 CONFIG_DM_CRYPT=m
 CONFIG_DM_SNAPSHOT=m
 CONFIG_DM_THIN_PROVISIONING=m
+CONFIG_DM_WRITECACHE=m
 CONFIG_DM_ERA=m
 CONFIG_DM_MIRROR=m
 CONFIG_DM_RAID=m
@@ -438,8 +436,8 @@ CONFIG_SUN3LANCE=y
 CONFIG_MACMACE=y
 # CONFIG_NET_VENDOR_AQUANTIA is not set
 # CONFIG_NET_VENDOR_ARC is not set
-# CONFIG_NET_CADENCE is not set
 # CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_CADENCE is not set
 CONFIG_MAC89x0=y
 # CONFIG_NET_VENDOR_CORTINA is not set
 # CONFIG_NET_VENDOR_EZCHIP is not set
@@ -449,9 +447,11 @@ CONFIG_BVME6000_NET=y
 CONFIG_MVME16x_NET=y
 # CONFIG_NET_VENDOR_MARVELL is not set
 # CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MICROSEMI is not set
 CONFIG_MACSONIC=y
 # CONFIG_NET_VENDOR_NETRONOME is not set
 # CONFIG_NET_VENDOR_NI is not set
+CONFIG_XSURF100=y
 CONFIG_HYDRA=y
 CONFIG_MAC8390=y
 CONFIG_NE2000=y
@@ -466,9 +466,9 @@ CONFIG_ZORRO8390=y
 CONFIG_SMC91X=y
 # CONFIG_NET_VENDOR_SOCIONEXT is not set
 # CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
 # CONFIG_NET_VENDOR_VIA is not set
 # CONFIG_NET_VENDOR_WIZNET is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
 CONFIG_PLIP=m
 CONFIG_PPP=m
 CONFIG_PPP_BSDCOMP=m
@@ -533,6 +533,7 @@ CONFIG_HIDRAW=y
 CONFIG_UHID=m
 # CONFIG_HID_GENERIC is not set
 # CONFIG_HID_ITE is not set
+# CONFIG_HID_REDRAGON is not set
 # CONFIG_USB_SUPPORT is not set
 CONFIG_RTC_CLASS=y
 # CONFIG_RTC_NVMEM is not set
@@ -562,7 +563,7 @@ CONFIG_FS_ENCRYPTION=m
 CONFIG_FANOTIFY=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 # CONFIG_PRINT_QUOTA_WARNING is not set
-CONFIG_AUTOFS4_FS=m
+CONFIG_AUTOFS_FS=m
 CONFIG_FUSE_FS=m
 CONFIG_CUSE=m
 CONFIG_OVERLAY_FS=m
@@ -663,6 +664,7 @@ CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
 CONFIG_TEST_BITMAP=m
 CONFIG_TEST_UUID=m
+CONFIG_TEST_OVERFLOW=m
 CONFIG_TEST_RHASHTABLE=m
 CONFIG_TEST_HASH=m
 CONFIG_TEST_USER_COPY=m
@@ -685,6 +687,11 @@ CONFIG_CRYPTO_CRYPTD=m
 CONFIG_CRYPTO_MCRYPTD=m
 CONFIG_CRYPTO_TEST=m
 CONFIG_CRYPTO_CHACHA20POLY1305=m
+CONFIG_CRYPTO_AEGIS128=m
+CONFIG_CRYPTO_AEGIS128L=m
+CONFIG_CRYPTO_AEGIS256=m
+CONFIG_CRYPTO_MORUS640=m
+CONFIG_CRYPTO_MORUS1280=m
 CONFIG_CRYPTO_CFB=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
@@ -720,6 +727,7 @@ CONFIG_CRYPTO_LZO=m
 CONFIG_CRYPTO_842=m
 CONFIG_CRYPTO_LZ4=m
 CONFIG_CRYPTO_LZ4HC=m
+CONFIG_CRYPTO_ZSTD=m
 CONFIG_CRYPTO_ANSI_CPRNG=m
 CONFIG_CRYPTO_DRBG_HASH=y
 CONFIG_CRYPTO_DRBG_CTR=y
index 476e69994340ebdf90a0ae6cd78dce6bf8a6d210..b383327fd77a9411922848696e9f299a0ba8909d 100644 (file)
@@ -47,6 +47,7 @@ CONFIG_UNIX_DIAG=m
 CONFIG_TLS=m
 CONFIG_XFRM_MIGRATE=y
 CONFIG_NET_KEY=y
+CONFIG_XDP_SOCKETS=y
 CONFIG_INET=y
 CONFIG_IP_PNP=y
 CONFIG_IP_PNP_DHCP=y
@@ -93,18 +94,14 @@ CONFIG_NF_CONNTRACK_SANE=m
 CONFIG_NF_CONNTRACK_SIP=m
 CONFIG_NF_CONNTRACK_TFTP=m
 CONFIG_NF_TABLES=m
+CONFIG_NF_TABLES_SET=m
 CONFIG_NF_TABLES_INET=y
 CONFIG_NF_TABLES_NETDEV=y
-CONFIG_NFT_EXTHDR=m
-CONFIG_NFT_META=m
-CONFIG_NFT_RT=m
 CONFIG_NFT_NUMGEN=m
 CONFIG_NFT_CT=m
 CONFIG_NFT_FLOW_OFFLOAD=m
-CONFIG_NFT_SET_RBTREE=m
-CONFIG_NFT_SET_HASH=m
-CONFIG_NFT_SET_BITMAP=m
 CONFIG_NFT_COUNTER=m
+CONFIG_NFT_CONNLIMIT=m
 CONFIG_NFT_LOG=m
 CONFIG_NFT_LIMIT=m
 CONFIG_NFT_MASQ=m
@@ -117,6 +114,7 @@ CONFIG_NFT_REJECT=m
 CONFIG_NFT_COMPAT=m
 CONFIG_NFT_HASH=m
 CONFIG_NFT_FIB_INET=m
+CONFIG_NFT_SOCKET=m
 CONFIG_NFT_DUP_NETDEV=m
 CONFIG_NFT_FWD_NETDEV=m
 CONFIG_NFT_FIB_NETDEV=m
@@ -195,7 +193,6 @@ CONFIG_IP_SET_HASH_NETPORT=m
 CONFIG_IP_SET_HASH_NETIFACE=m
 CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_NF_SOCKET_IPV4=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
 CONFIG_NFT_DUP_IPV4=m
 CONFIG_NFT_FIB_IPV4=m
@@ -226,7 +223,6 @@ CONFIG_IP_NF_ARPTABLES=m
 CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_NF_SOCKET_IPV6=m
 CONFIG_NFT_CHAIN_ROUTE_IPV6=m
 CONFIG_NFT_CHAIN_NAT_IPV6=m
 CONFIG_NFT_MASQ_IPV6=m
@@ -255,7 +251,6 @@ CONFIG_IP6_NF_NAT=m
 CONFIG_IP6_NF_TARGET_MASQUERADE=m
 CONFIG_IP6_NF_TARGET_NPT=m
 CONFIG_NF_TABLES_BRIDGE=y
-CONFIG_NFT_BRIDGE_META=m
 CONFIG_NFT_BRIDGE_REJECT=m
 CONFIG_NF_LOG_BRIDGE=m
 CONFIG_BRIDGE_NF_EBTABLES=m
@@ -296,6 +291,7 @@ CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
 CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
 CONFIG_DNS_RESOLVER=y
 CONFIG_BATMAN_ADV=m
+# CONFIG_BATMAN_ADV_BATMAN_V is not set
 CONFIG_BATMAN_ADV_DAT=y
 CONFIG_BATMAN_ADV_NC=y
 CONFIG_BATMAN_ADV_MCAST=y
@@ -343,6 +339,7 @@ CONFIG_DM_UNSTRIPED=m
 CONFIG_DM_CRYPT=m
 CONFIG_DM_SNAPSHOT=m
 CONFIG_DM_THIN_PROVISIONING=m
+CONFIG_DM_WRITECACHE=m
 CONFIG_DM_ERA=m
 CONFIG_DM_MIRROR=m
 CONFIG_DM_RAID=m
@@ -380,14 +377,15 @@ CONFIG_VETH=m
 CONFIG_MVME147_NET=y
 # CONFIG_NET_VENDOR_AQUANTIA is not set
 # CONFIG_NET_VENDOR_ARC is not set
-# CONFIG_NET_CADENCE is not set
 # CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_CADENCE is not set
 # CONFIG_NET_VENDOR_CORTINA is not set
 # CONFIG_NET_VENDOR_EZCHIP is not set
 # CONFIG_NET_VENDOR_HUAWEI is not set
 # CONFIG_NET_VENDOR_INTEL is not set
 # CONFIG_NET_VENDOR_MARVELL is not set
 # CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MICROSEMI is not set
 # CONFIG_NET_VENDOR_NATSEMI is not set
 # CONFIG_NET_VENDOR_NETRONOME is not set
 # CONFIG_NET_VENDOR_NI is not set
@@ -399,9 +397,9 @@ CONFIG_MVME147_NET=y
 # CONFIG_NET_VENDOR_SOLARFLARE is not set
 # CONFIG_NET_VENDOR_SOCIONEXT is not set
 # CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
 # CONFIG_NET_VENDOR_VIA is not set
 # CONFIG_NET_VENDOR_WIZNET is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
 CONFIG_PPP=m
 CONFIG_PPP_BSDCOMP=m
 CONFIG_PPP_DEFLATE=m
@@ -433,6 +431,7 @@ CONFIG_HIDRAW=y
 CONFIG_UHID=m
 # CONFIG_HID_GENERIC is not set
 # CONFIG_HID_ITE is not set
+# CONFIG_HID_REDRAGON is not set
 # CONFIG_USB_SUPPORT is not set
 CONFIG_RTC_CLASS=y
 # CONFIG_RTC_NVMEM is not set
@@ -450,7 +449,7 @@ CONFIG_FS_ENCRYPTION=m
 CONFIG_FANOTIFY=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 # CONFIG_PRINT_QUOTA_WARNING is not set
-CONFIG_AUTOFS4_FS=m
+CONFIG_AUTOFS_FS=m
 CONFIG_FUSE_FS=m
 CONFIG_CUSE=m
 CONFIG_OVERLAY_FS=m
@@ -551,6 +550,7 @@ CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
 CONFIG_TEST_BITMAP=m
 CONFIG_TEST_UUID=m
+CONFIG_TEST_OVERFLOW=m
 CONFIG_TEST_RHASHTABLE=m
 CONFIG_TEST_HASH=m
 CONFIG_TEST_USER_COPY=m
@@ -573,6 +573,11 @@ CONFIG_CRYPTO_CRYPTD=m
 CONFIG_CRYPTO_MCRYPTD=m
 CONFIG_CRYPTO_TEST=m
 CONFIG_CRYPTO_CHACHA20POLY1305=m
+CONFIG_CRYPTO_AEGIS128=m
+CONFIG_CRYPTO_AEGIS128L=m
+CONFIG_CRYPTO_AEGIS256=m
+CONFIG_CRYPTO_MORUS640=m
+CONFIG_CRYPTO_MORUS1280=m
 CONFIG_CRYPTO_CFB=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
@@ -608,6 +613,7 @@ CONFIG_CRYPTO_LZO=m
 CONFIG_CRYPTO_842=m
 CONFIG_CRYPTO_LZ4=m
 CONFIG_CRYPTO_LZ4HC=m
+CONFIG_CRYPTO_ZSTD=m
 CONFIG_CRYPTO_ANSI_CPRNG=m
 CONFIG_CRYPTO_DRBG_HASH=y
 CONFIG_CRYPTO_DRBG_CTR=y
index 1477cda9146e76927c6e42e038aab6b9da57642e..9783d3deb9e9d8525ba864ca9eeeae02ca6a46cb 100644 (file)
@@ -48,6 +48,7 @@ CONFIG_UNIX_DIAG=m
 CONFIG_TLS=m
 CONFIG_XFRM_MIGRATE=y
 CONFIG_NET_KEY=y
+CONFIG_XDP_SOCKETS=y
 CONFIG_INET=y
 CONFIG_IP_PNP=y
 CONFIG_IP_PNP_DHCP=y
@@ -94,18 +95,14 @@ CONFIG_NF_CONNTRACK_SANE=m
 CONFIG_NF_CONNTRACK_SIP=m
 CONFIG_NF_CONNTRACK_TFTP=m
 CONFIG_NF_TABLES=m
+CONFIG_NF_TABLES_SET=m
 CONFIG_NF_TABLES_INET=y
 CONFIG_NF_TABLES_NETDEV=y
-CONFIG_NFT_EXTHDR=m
-CONFIG_NFT_META=m
-CONFIG_NFT_RT=m
 CONFIG_NFT_NUMGEN=m
 CONFIG_NFT_CT=m
 CONFIG_NFT_FLOW_OFFLOAD=m
-CONFIG_NFT_SET_RBTREE=m
-CONFIG_NFT_SET_HASH=m
-CONFIG_NFT_SET_BITMAP=m
 CONFIG_NFT_COUNTER=m
+CONFIG_NFT_CONNLIMIT=m
 CONFIG_NFT_LOG=m
 CONFIG_NFT_LIMIT=m
 CONFIG_NFT_MASQ=m
@@ -118,6 +115,7 @@ CONFIG_NFT_REJECT=m
 CONFIG_NFT_COMPAT=m
 CONFIG_NFT_HASH=m
 CONFIG_NFT_FIB_INET=m
+CONFIG_NFT_SOCKET=m
 CONFIG_NFT_DUP_NETDEV=m
 CONFIG_NFT_FWD_NETDEV=m
 CONFIG_NFT_FIB_NETDEV=m
@@ -196,7 +194,6 @@ CONFIG_IP_SET_HASH_NETPORT=m
 CONFIG_IP_SET_HASH_NETIFACE=m
 CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_NF_SOCKET_IPV4=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
 CONFIG_NFT_DUP_IPV4=m
 CONFIG_NFT_FIB_IPV4=m
@@ -227,7 +224,6 @@ CONFIG_IP_NF_ARPTABLES=m
 CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_NF_SOCKET_IPV6=m
 CONFIG_NFT_CHAIN_ROUTE_IPV6=m
 CONFIG_NFT_CHAIN_NAT_IPV6=m
 CONFIG_NFT_MASQ_IPV6=m
@@ -256,7 +252,6 @@ CONFIG_IP6_NF_NAT=m
 CONFIG_IP6_NF_TARGET_MASQUERADE=m
 CONFIG_IP6_NF_TARGET_NPT=m
 CONFIG_NF_TABLES_BRIDGE=y
-CONFIG_NFT_BRIDGE_META=m
 CONFIG_NFT_BRIDGE_REJECT=m
 CONFIG_NF_LOG_BRIDGE=m
 CONFIG_BRIDGE_NF_EBTABLES=m
@@ -297,6 +292,7 @@ CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
 CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
 CONFIG_DNS_RESOLVER=y
 CONFIG_BATMAN_ADV=m
+# CONFIG_BATMAN_ADV_BATMAN_V is not set
 CONFIG_BATMAN_ADV_DAT=y
 CONFIG_BATMAN_ADV_NC=y
 CONFIG_BATMAN_ADV_MCAST=y
@@ -344,6 +340,7 @@ CONFIG_DM_UNSTRIPED=m
 CONFIG_DM_CRYPT=m
 CONFIG_DM_SNAPSHOT=m
 CONFIG_DM_THIN_PROVISIONING=m
+CONFIG_DM_WRITECACHE=m
 CONFIG_DM_ERA=m
 CONFIG_DM_MIRROR=m
 CONFIG_DM_RAID=m
@@ -380,14 +377,15 @@ CONFIG_VETH=m
 # CONFIG_NET_VENDOR_AMAZON is not set
 # CONFIG_NET_VENDOR_AQUANTIA is not set
 # CONFIG_NET_VENDOR_ARC is not set
-# CONFIG_NET_CADENCE is not set
 # CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_CADENCE is not set
 # CONFIG_NET_VENDOR_CORTINA is not set
 # CONFIG_NET_VENDOR_EZCHIP is not set
 # CONFIG_NET_VENDOR_HUAWEI is not set
 CONFIG_MVME16x_NET=y
 # CONFIG_NET_VENDOR_MARVELL is not set
 # CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MICROSEMI is not set
 # CONFIG_NET_VENDOR_NATSEMI is not set
 # CONFIG_NET_VENDOR_NETRONOME is not set
 # CONFIG_NET_VENDOR_NI is not set
@@ -399,9 +397,9 @@ CONFIG_MVME16x_NET=y
 # CONFIG_NET_VENDOR_SOLARFLARE is not set
 # CONFIG_NET_VENDOR_SOCIONEXT is not set
 # CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
 # CONFIG_NET_VENDOR_VIA is not set
 # CONFIG_NET_VENDOR_WIZNET is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
 CONFIG_PPP=m
 CONFIG_PPP_BSDCOMP=m
 CONFIG_PPP_DEFLATE=m
@@ -433,6 +431,7 @@ CONFIG_HIDRAW=y
 CONFIG_UHID=m
 # CONFIG_HID_GENERIC is not set
 # CONFIG_HID_ITE is not set
+# CONFIG_HID_REDRAGON is not set
 # CONFIG_USB_SUPPORT is not set
 CONFIG_RTC_CLASS=y
 # CONFIG_RTC_NVMEM is not set
@@ -450,7 +449,7 @@ CONFIG_FS_ENCRYPTION=m
 CONFIG_FANOTIFY=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 # CONFIG_PRINT_QUOTA_WARNING is not set
-CONFIG_AUTOFS4_FS=m
+CONFIG_AUTOFS_FS=m
 CONFIG_FUSE_FS=m
 CONFIG_CUSE=m
 CONFIG_OVERLAY_FS=m
@@ -551,6 +550,7 @@ CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
 CONFIG_TEST_BITMAP=m
 CONFIG_TEST_UUID=m
+CONFIG_TEST_OVERFLOW=m
 CONFIG_TEST_RHASHTABLE=m
 CONFIG_TEST_HASH=m
 CONFIG_TEST_USER_COPY=m
@@ -573,6 +573,11 @@ CONFIG_CRYPTO_CRYPTD=m
 CONFIG_CRYPTO_MCRYPTD=m
 CONFIG_CRYPTO_TEST=m
 CONFIG_CRYPTO_CHACHA20POLY1305=m
+CONFIG_CRYPTO_AEGIS128=m
+CONFIG_CRYPTO_AEGIS128L=m
+CONFIG_CRYPTO_AEGIS256=m
+CONFIG_CRYPTO_MORUS640=m
+CONFIG_CRYPTO_MORUS1280=m
 CONFIG_CRYPTO_CFB=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
@@ -608,6 +613,7 @@ CONFIG_CRYPTO_LZO=m
 CONFIG_CRYPTO_842=m
 CONFIG_CRYPTO_LZ4=m
 CONFIG_CRYPTO_LZ4HC=m
+CONFIG_CRYPTO_ZSTD=m
 CONFIG_CRYPTO_ANSI_CPRNG=m
 CONFIG_CRYPTO_DRBG_HASH=y
 CONFIG_CRYPTO_DRBG_CTR=y
index b3a543dc48a072ef31fba56f7f395fee25ea6014..a35d10ee10cb709b9e16b27f97d0f6144b9692b1 100644 (file)
@@ -48,6 +48,7 @@ CONFIG_UNIX_DIAG=m
 CONFIG_TLS=m
 CONFIG_XFRM_MIGRATE=y
 CONFIG_NET_KEY=y
+CONFIG_XDP_SOCKETS=y
 CONFIG_INET=y
 CONFIG_IP_PNP=y
 CONFIG_IP_PNP_DHCP=y
@@ -94,18 +95,14 @@ CONFIG_NF_CONNTRACK_SANE=m
 CONFIG_NF_CONNTRACK_SIP=m
 CONFIG_NF_CONNTRACK_TFTP=m
 CONFIG_NF_TABLES=m
+CONFIG_NF_TABLES_SET=m
 CONFIG_NF_TABLES_INET=y
 CONFIG_NF_TABLES_NETDEV=y
-CONFIG_NFT_EXTHDR=m
-CONFIG_NFT_META=m
-CONFIG_NFT_RT=m
 CONFIG_NFT_NUMGEN=m
 CONFIG_NFT_CT=m
 CONFIG_NFT_FLOW_OFFLOAD=m
-CONFIG_NFT_SET_RBTREE=m
-CONFIG_NFT_SET_HASH=m
-CONFIG_NFT_SET_BITMAP=m
 CONFIG_NFT_COUNTER=m
+CONFIG_NFT_CONNLIMIT=m
 CONFIG_NFT_LOG=m
 CONFIG_NFT_LIMIT=m
 CONFIG_NFT_MASQ=m
@@ -118,6 +115,7 @@ CONFIG_NFT_REJECT=m
 CONFIG_NFT_COMPAT=m
 CONFIG_NFT_HASH=m
 CONFIG_NFT_FIB_INET=m
+CONFIG_NFT_SOCKET=m
 CONFIG_NFT_DUP_NETDEV=m
 CONFIG_NFT_FWD_NETDEV=m
 CONFIG_NFT_FIB_NETDEV=m
@@ -196,7 +194,6 @@ CONFIG_IP_SET_HASH_NETPORT=m
 CONFIG_IP_SET_HASH_NETIFACE=m
 CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_NF_SOCKET_IPV4=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
 CONFIG_NFT_DUP_IPV4=m
 CONFIG_NFT_FIB_IPV4=m
@@ -227,7 +224,6 @@ CONFIG_IP_NF_ARPTABLES=m
 CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_NF_SOCKET_IPV6=m
 CONFIG_NFT_CHAIN_ROUTE_IPV6=m
 CONFIG_NFT_CHAIN_NAT_IPV6=m
 CONFIG_NFT_MASQ_IPV6=m
@@ -256,7 +252,6 @@ CONFIG_IP6_NF_NAT=m
 CONFIG_IP6_NF_TARGET_MASQUERADE=m
 CONFIG_IP6_NF_TARGET_NPT=m
 CONFIG_NF_TABLES_BRIDGE=y
-CONFIG_NFT_BRIDGE_META=m
 CONFIG_NFT_BRIDGE_REJECT=m
 CONFIG_NF_LOG_BRIDGE=m
 CONFIG_BRIDGE_NF_EBTABLES=m
@@ -297,6 +292,7 @@ CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
 CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
 CONFIG_DNS_RESOLVER=y
 CONFIG_BATMAN_ADV=m
+# CONFIG_BATMAN_ADV_BATMAN_V is not set
 CONFIG_BATMAN_ADV_DAT=y
 CONFIG_BATMAN_ADV_NC=y
 CONFIG_BATMAN_ADV_MCAST=y
@@ -350,6 +346,7 @@ CONFIG_DM_UNSTRIPED=m
 CONFIG_DM_CRYPT=m
 CONFIG_DM_SNAPSHOT=m
 CONFIG_DM_THIN_PROVISIONING=m
+CONFIG_DM_WRITECACHE=m
 CONFIG_DM_ERA=m
 CONFIG_DM_MIRROR=m
 CONFIG_DM_RAID=m
@@ -388,8 +385,8 @@ CONFIG_VETH=m
 # CONFIG_NET_VENDOR_AMD is not set
 # CONFIG_NET_VENDOR_AQUANTIA is not set
 # CONFIG_NET_VENDOR_ARC is not set
-# CONFIG_NET_CADENCE is not set
 # CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_CADENCE is not set
 # CONFIG_NET_VENDOR_CIRRUS is not set
 # CONFIG_NET_VENDOR_CORTINA is not set
 # CONFIG_NET_VENDOR_EZCHIP is not set
@@ -398,6 +395,7 @@ CONFIG_VETH=m
 # CONFIG_NET_VENDOR_INTEL is not set
 # CONFIG_NET_VENDOR_MARVELL is not set
 # CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MICROSEMI is not set
 # CONFIG_NET_VENDOR_NETRONOME is not set
 # CONFIG_NET_VENDOR_NI is not set
 CONFIG_NE2000=y
@@ -410,9 +408,9 @@ CONFIG_NE2000=y
 # CONFIG_NET_VENDOR_SMSC is not set
 # CONFIG_NET_VENDOR_SOCIONEXT is not set
 # CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
 # CONFIG_NET_VENDOR_VIA is not set
 # CONFIG_NET_VENDOR_WIZNET is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
 CONFIG_PLIP=m
 CONFIG_PPP=m
 CONFIG_PPP_BSDCOMP=m
@@ -455,6 +453,7 @@ CONFIG_HIDRAW=y
 CONFIG_UHID=m
 # CONFIG_HID_GENERIC is not set
 # CONFIG_HID_ITE is not set
+# CONFIG_HID_REDRAGON is not set
 # CONFIG_USB_SUPPORT is not set
 CONFIG_RTC_CLASS=y
 # CONFIG_RTC_NVMEM is not set
@@ -473,7 +472,7 @@ CONFIG_FS_ENCRYPTION=m
 CONFIG_FANOTIFY=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 # CONFIG_PRINT_QUOTA_WARNING is not set
-CONFIG_AUTOFS4_FS=m
+CONFIG_AUTOFS_FS=m
 CONFIG_FUSE_FS=m
 CONFIG_CUSE=m
 CONFIG_OVERLAY_FS=m
@@ -574,6 +573,7 @@ CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
 CONFIG_TEST_BITMAP=m
 CONFIG_TEST_UUID=m
+CONFIG_TEST_OVERFLOW=m
 CONFIG_TEST_RHASHTABLE=m
 CONFIG_TEST_HASH=m
 CONFIG_TEST_USER_COPY=m
@@ -596,6 +596,11 @@ CONFIG_CRYPTO_CRYPTD=m
 CONFIG_CRYPTO_MCRYPTD=m
 CONFIG_CRYPTO_TEST=m
 CONFIG_CRYPTO_CHACHA20POLY1305=m
+CONFIG_CRYPTO_AEGIS128=m
+CONFIG_CRYPTO_AEGIS128L=m
+CONFIG_CRYPTO_AEGIS256=m
+CONFIG_CRYPTO_MORUS640=m
+CONFIG_CRYPTO_MORUS1280=m
 CONFIG_CRYPTO_CFB=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
@@ -631,6 +636,7 @@ CONFIG_CRYPTO_LZO=m
 CONFIG_CRYPTO_842=m
 CONFIG_CRYPTO_LZ4=m
 CONFIG_CRYPTO_LZ4HC=m
+CONFIG_CRYPTO_ZSTD=m
 CONFIG_CRYPTO_ANSI_CPRNG=m
 CONFIG_CRYPTO_DRBG_HASH=y
 CONFIG_CRYPTO_DRBG_CTR=y
index d543ed5dfa967b5608713e26e321a692926b2ac2..573bf922d44823b767db2208204d07907a9cce5b 100644 (file)
@@ -45,6 +45,7 @@ CONFIG_UNIX_DIAG=m
 CONFIG_TLS=m
 CONFIG_XFRM_MIGRATE=y
 CONFIG_NET_KEY=y
+CONFIG_XDP_SOCKETS=y
 CONFIG_INET=y
 CONFIG_IP_PNP=y
 CONFIG_IP_PNP_DHCP=y
@@ -91,18 +92,14 @@ CONFIG_NF_CONNTRACK_SANE=m
 CONFIG_NF_CONNTRACK_SIP=m
 CONFIG_NF_CONNTRACK_TFTP=m
 CONFIG_NF_TABLES=m
+CONFIG_NF_TABLES_SET=m
 CONFIG_NF_TABLES_INET=y
 CONFIG_NF_TABLES_NETDEV=y
-CONFIG_NFT_EXTHDR=m
-CONFIG_NFT_META=m
-CONFIG_NFT_RT=m
 CONFIG_NFT_NUMGEN=m
 CONFIG_NFT_CT=m
 CONFIG_NFT_FLOW_OFFLOAD=m
-CONFIG_NFT_SET_RBTREE=m
-CONFIG_NFT_SET_HASH=m
-CONFIG_NFT_SET_BITMAP=m
 CONFIG_NFT_COUNTER=m
+CONFIG_NFT_CONNLIMIT=m
 CONFIG_NFT_LOG=m
 CONFIG_NFT_LIMIT=m
 CONFIG_NFT_MASQ=m
@@ -115,6 +112,7 @@ CONFIG_NFT_REJECT=m
 CONFIG_NFT_COMPAT=m
 CONFIG_NFT_HASH=m
 CONFIG_NFT_FIB_INET=m
+CONFIG_NFT_SOCKET=m
 CONFIG_NFT_DUP_NETDEV=m
 CONFIG_NFT_FWD_NETDEV=m
 CONFIG_NFT_FIB_NETDEV=m
@@ -193,7 +191,6 @@ CONFIG_IP_SET_HASH_NETPORT=m
 CONFIG_IP_SET_HASH_NETIFACE=m
 CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_NF_SOCKET_IPV4=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
 CONFIG_NFT_DUP_IPV4=m
 CONFIG_NFT_FIB_IPV4=m
@@ -224,7 +221,6 @@ CONFIG_IP_NF_ARPTABLES=m
 CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_NF_SOCKET_IPV6=m
 CONFIG_NFT_CHAIN_ROUTE_IPV6=m
 CONFIG_NFT_CHAIN_NAT_IPV6=m
 CONFIG_NFT_MASQ_IPV6=m
@@ -253,7 +249,6 @@ CONFIG_IP6_NF_NAT=m
 CONFIG_IP6_NF_TARGET_MASQUERADE=m
 CONFIG_IP6_NF_TARGET_NPT=m
 CONFIG_NF_TABLES_BRIDGE=y
-CONFIG_NFT_BRIDGE_META=m
 CONFIG_NFT_BRIDGE_REJECT=m
 CONFIG_NF_LOG_BRIDGE=m
 CONFIG_BRIDGE_NF_EBTABLES=m
@@ -294,6 +289,7 @@ CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
 CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
 CONFIG_DNS_RESOLVER=y
 CONFIG_BATMAN_ADV=m
+# CONFIG_BATMAN_ADV_BATMAN_V is not set
 CONFIG_BATMAN_ADV_DAT=y
 CONFIG_BATMAN_ADV_NC=y
 CONFIG_BATMAN_ADV_MCAST=y
@@ -341,6 +337,7 @@ CONFIG_DM_UNSTRIPED=m
 CONFIG_DM_CRYPT=m
 CONFIG_DM_SNAPSHOT=m
 CONFIG_DM_THIN_PROVISIONING=m
+CONFIG_DM_WRITECACHE=m
 CONFIG_DM_ERA=m
 CONFIG_DM_MIRROR=m
 CONFIG_DM_RAID=m
@@ -385,6 +382,7 @@ CONFIG_SUN3LANCE=y
 CONFIG_SUN3_82586=y
 # CONFIG_NET_VENDOR_MARVELL is not set
 # CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MICROSEMI is not set
 # CONFIG_NET_VENDOR_NATSEMI is not set
 # CONFIG_NET_VENDOR_NETRONOME is not set
 # CONFIG_NET_VENDOR_NI is not set
@@ -397,9 +395,9 @@ CONFIG_SUN3_82586=y
 # CONFIG_NET_VENDOR_SOCIONEXT is not set
 # CONFIG_NET_VENDOR_STMICRO is not set
 # CONFIG_NET_VENDOR_SUN is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
 # CONFIG_NET_VENDOR_VIA is not set
 # CONFIG_NET_VENDOR_WIZNET is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
 CONFIG_PPP=m
 CONFIG_PPP_BSDCOMP=m
 CONFIG_PPP_DEFLATE=m
@@ -435,6 +433,7 @@ CONFIG_HIDRAW=y
 CONFIG_UHID=m
 # CONFIG_HID_GENERIC is not set
 # CONFIG_HID_ITE is not set
+# CONFIG_HID_REDRAGON is not set
 # CONFIG_USB_SUPPORT is not set
 CONFIG_RTC_CLASS=y
 # CONFIG_RTC_NVMEM is not set
@@ -452,7 +451,7 @@ CONFIG_FS_ENCRYPTION=m
 CONFIG_FANOTIFY=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 # CONFIG_PRINT_QUOTA_WARNING is not set
-CONFIG_AUTOFS4_FS=m
+CONFIG_AUTOFS_FS=m
 CONFIG_FUSE_FS=m
 CONFIG_CUSE=m
 CONFIG_OVERLAY_FS=m
@@ -553,6 +552,7 @@ CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
 CONFIG_TEST_BITMAP=m
 CONFIG_TEST_UUID=m
+CONFIG_TEST_OVERFLOW=m
 CONFIG_TEST_RHASHTABLE=m
 CONFIG_TEST_HASH=m
 CONFIG_TEST_USER_COPY=m
@@ -574,6 +574,11 @@ CONFIG_CRYPTO_CRYPTD=m
 CONFIG_CRYPTO_MCRYPTD=m
 CONFIG_CRYPTO_TEST=m
 CONFIG_CRYPTO_CHACHA20POLY1305=m
+CONFIG_CRYPTO_AEGIS128=m
+CONFIG_CRYPTO_AEGIS128L=m
+CONFIG_CRYPTO_AEGIS256=m
+CONFIG_CRYPTO_MORUS640=m
+CONFIG_CRYPTO_MORUS1280=m
 CONFIG_CRYPTO_CFB=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
@@ -609,6 +614,7 @@ CONFIG_CRYPTO_LZO=m
 CONFIG_CRYPTO_842=m
 CONFIG_CRYPTO_LZ4=m
 CONFIG_CRYPTO_LZ4HC=m
+CONFIG_CRYPTO_ZSTD=m
 CONFIG_CRYPTO_ANSI_CPRNG=m
 CONFIG_CRYPTO_DRBG_HASH=y
 CONFIG_CRYPTO_DRBG_CTR=y
index a67e54246023e5e4b73ca117df70334e409b9f2b..efb27a7fcc559d5bb778998330d90006a46f9b0b 100644 (file)
@@ -45,6 +45,7 @@ CONFIG_UNIX_DIAG=m
 CONFIG_TLS=m
 CONFIG_XFRM_MIGRATE=y
 CONFIG_NET_KEY=y
+CONFIG_XDP_SOCKETS=y
 CONFIG_INET=y
 CONFIG_IP_PNP=y
 CONFIG_IP_PNP_DHCP=y
@@ -91,18 +92,14 @@ CONFIG_NF_CONNTRACK_SANE=m
 CONFIG_NF_CONNTRACK_SIP=m
 CONFIG_NF_CONNTRACK_TFTP=m
 CONFIG_NF_TABLES=m
+CONFIG_NF_TABLES_SET=m
 CONFIG_NF_TABLES_INET=y
 CONFIG_NF_TABLES_NETDEV=y
-CONFIG_NFT_EXTHDR=m
-CONFIG_NFT_META=m
-CONFIG_NFT_RT=m
 CONFIG_NFT_NUMGEN=m
 CONFIG_NFT_CT=m
 CONFIG_NFT_FLOW_OFFLOAD=m
-CONFIG_NFT_SET_RBTREE=m
-CONFIG_NFT_SET_HASH=m
-CONFIG_NFT_SET_BITMAP=m
 CONFIG_NFT_COUNTER=m
+CONFIG_NFT_CONNLIMIT=m
 CONFIG_NFT_LOG=m
 CONFIG_NFT_LIMIT=m
 CONFIG_NFT_MASQ=m
@@ -115,6 +112,7 @@ CONFIG_NFT_REJECT=m
 CONFIG_NFT_COMPAT=m
 CONFIG_NFT_HASH=m
 CONFIG_NFT_FIB_INET=m
+CONFIG_NFT_SOCKET=m
 CONFIG_NFT_DUP_NETDEV=m
 CONFIG_NFT_FWD_NETDEV=m
 CONFIG_NFT_FIB_NETDEV=m
@@ -193,7 +191,6 @@ CONFIG_IP_SET_HASH_NETPORT=m
 CONFIG_IP_SET_HASH_NETIFACE=m
 CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_NF_SOCKET_IPV4=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
 CONFIG_NFT_DUP_IPV4=m
 CONFIG_NFT_FIB_IPV4=m
@@ -224,7 +221,6 @@ CONFIG_IP_NF_ARPTABLES=m
 CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_NF_SOCKET_IPV6=m
 CONFIG_NFT_CHAIN_ROUTE_IPV6=m
 CONFIG_NFT_CHAIN_NAT_IPV6=m
 CONFIG_NFT_MASQ_IPV6=m
@@ -253,7 +249,6 @@ CONFIG_IP6_NF_NAT=m
 CONFIG_IP6_NF_TARGET_MASQUERADE=m
 CONFIG_IP6_NF_TARGET_NPT=m
 CONFIG_NF_TABLES_BRIDGE=y
-CONFIG_NFT_BRIDGE_META=m
 CONFIG_NFT_BRIDGE_REJECT=m
 CONFIG_NF_LOG_BRIDGE=m
 CONFIG_BRIDGE_NF_EBTABLES=m
@@ -294,6 +289,7 @@ CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
 CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
 CONFIG_DNS_RESOLVER=y
 CONFIG_BATMAN_ADV=m
+# CONFIG_BATMAN_ADV_BATMAN_V is not set
 CONFIG_BATMAN_ADV_DAT=y
 CONFIG_BATMAN_ADV_NC=y
 CONFIG_BATMAN_ADV_MCAST=y
@@ -341,6 +337,7 @@ CONFIG_DM_UNSTRIPED=m
 CONFIG_DM_CRYPT=m
 CONFIG_DM_SNAPSHOT=m
 CONFIG_DM_THIN_PROVISIONING=m
+CONFIG_DM_WRITECACHE=m
 CONFIG_DM_ERA=m
 CONFIG_DM_MIRROR=m
 CONFIG_DM_RAID=m
@@ -378,14 +375,15 @@ CONFIG_VETH=m
 CONFIG_SUN3LANCE=y
 # CONFIG_NET_VENDOR_AQUANTIA is not set
 # CONFIG_NET_VENDOR_ARC is not set
-# CONFIG_NET_CADENCE is not set
 # CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_CADENCE is not set
 # CONFIG_NET_VENDOR_CORTINA is not set
 # CONFIG_NET_VENDOR_EZCHIP is not set
 # CONFIG_NET_VENDOR_HUAWEI is not set
 # CONFIG_NET_VENDOR_INTEL is not set
 # CONFIG_NET_VENDOR_MARVELL is not set
 # CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MICROSEMI is not set
 # CONFIG_NET_VENDOR_NATSEMI is not set
 # CONFIG_NET_VENDOR_NETRONOME is not set
 # CONFIG_NET_VENDOR_NI is not set
@@ -397,9 +395,9 @@ CONFIG_SUN3LANCE=y
 # CONFIG_NET_VENDOR_SOLARFLARE is not set
 # CONFIG_NET_VENDOR_SOCIONEXT is not set
 # CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
 # CONFIG_NET_VENDOR_VIA is not set
 # CONFIG_NET_VENDOR_WIZNET is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
 CONFIG_PPP=m
 CONFIG_PPP_BSDCOMP=m
 CONFIG_PPP_DEFLATE=m
@@ -435,6 +433,7 @@ CONFIG_HIDRAW=y
 CONFIG_UHID=m
 # CONFIG_HID_GENERIC is not set
 # CONFIG_HID_ITE is not set
+# CONFIG_HID_REDRAGON is not set
 # CONFIG_USB_SUPPORT is not set
 CONFIG_RTC_CLASS=y
 # CONFIG_RTC_NVMEM is not set
@@ -452,7 +451,7 @@ CONFIG_FS_ENCRYPTION=m
 CONFIG_FANOTIFY=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 # CONFIG_PRINT_QUOTA_WARNING is not set
-CONFIG_AUTOFS4_FS=m
+CONFIG_AUTOFS_FS=m
 CONFIG_FUSE_FS=m
 CONFIG_CUSE=m
 CONFIG_OVERLAY_FS=m
@@ -553,6 +552,7 @@ CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
 CONFIG_TEST_BITMAP=m
 CONFIG_TEST_UUID=m
+CONFIG_TEST_OVERFLOW=m
 CONFIG_TEST_RHASHTABLE=m
 CONFIG_TEST_HASH=m
 CONFIG_TEST_USER_COPY=m
@@ -575,6 +575,11 @@ CONFIG_CRYPTO_CRYPTD=m
 CONFIG_CRYPTO_MCRYPTD=m
 CONFIG_CRYPTO_TEST=m
 CONFIG_CRYPTO_CHACHA20POLY1305=m
+CONFIG_CRYPTO_AEGIS128=m
+CONFIG_CRYPTO_AEGIS128L=m
+CONFIG_CRYPTO_AEGIS256=m
+CONFIG_CRYPTO_MORUS640=m
+CONFIG_CRYPTO_MORUS1280=m
 CONFIG_CRYPTO_CFB=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
@@ -610,6 +615,7 @@ CONFIG_CRYPTO_LZO=m
 CONFIG_CRYPTO_842=m
 CONFIG_CRYPTO_LZ4=m
 CONFIG_CRYPTO_LZ4HC=m
+CONFIG_CRYPTO_ZSTD=m
 CONFIG_CRYPTO_ANSI_CPRNG=m
 CONFIG_CRYPTO_DRBG_HASH=y
 CONFIG_CRYPTO_DRBG_CTR=y
index 4d8d68c4e3ddd5193c9826680b60a3b009c7aa91..a4b8d3331a9e2c6c73e0de5120f62303e791854e 100644 (file)
@@ -1,6 +1,7 @@
 generic-y += barrier.h
 generic-y += compat.h
 generic-y += device.h
+generic-y += dma-mapping.h
 generic-y += emergency-restart.h
 generic-y += exec.h
 generic-y += extable.h
index e993e2860ee141ec711a5115ccbee577dad35b58..47228b0d4163f38002709bb76308d63917f7dca0 100644 (file)
@@ -126,11 +126,13 @@ static inline void atomic_inc(atomic_t *v)
 {
        __asm__ __volatile__("addql #1,%0" : "+m" (*v));
 }
+#define atomic_inc atomic_inc
 
 static inline void atomic_dec(atomic_t *v)
 {
        __asm__ __volatile__("subql #1,%0" : "+m" (*v));
 }
+#define atomic_dec atomic_dec
 
 static inline int atomic_dec_and_test(atomic_t *v)
 {
@@ -138,6 +140,7 @@ static inline int atomic_dec_and_test(atomic_t *v)
        __asm__ __volatile__("subql #1,%1; seq %0" : "=d" (c), "+m" (*v));
        return c != 0;
 }
+#define atomic_dec_and_test atomic_dec_and_test
 
 static inline int atomic_dec_and_test_lt(atomic_t *v)
 {
@@ -155,6 +158,7 @@ static inline int atomic_inc_and_test(atomic_t *v)
        __asm__ __volatile__("addql #1,%1; seq %0" : "=d" (c), "+m" (*v));
        return c != 0;
 }
+#define atomic_inc_and_test atomic_inc_and_test
 
 #ifdef CONFIG_RMW_INSNS
 
@@ -190,9 +194,6 @@ static inline int atomic_xchg(atomic_t *v, int new)
 
 #endif /* !CONFIG_RMW_INSNS */
 
-#define atomic_dec_return(v)   atomic_sub_return(1, (v))
-#define atomic_inc_return(v)   atomic_add_return(1, (v))
-
 static inline int atomic_sub_and_test(int i, atomic_t *v)
 {
        char c;
@@ -201,6 +202,7 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
                             : ASM_DI (i));
        return c != 0;
 }
+#define atomic_sub_and_test atomic_sub_and_test
 
 static inline int atomic_add_negative(int i, atomic_t *v)
 {
@@ -210,20 +212,6 @@ static inline int atomic_add_negative(int i, atomic_t *v)
                             : ASM_DI (i));
        return c != 0;
 }
-
-static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
-{
-       int c, old;
-       c = atomic_read(v);
-       for (;;) {
-               if (unlikely(c == (u)))
-                       break;
-               old = atomic_cmpxchg((v), c, c + (a));
-               if (likely(old == c))
-                       break;
-               c = old;
-       }
-       return c;
-}
+#define atomic_add_negative atomic_add_negative
 
 #endif /* __ARCH_M68K_ATOMIC __ */
index 93b47b1f6fb420a1d7d52bd096f611b6668eceb0..d979f38af751cfeaa8a83cfc7389b15088ef3e21 100644 (file)
@@ -454,7 +454,7 @@ static inline unsigned long ffz(unsigned long word)
  */
 #if (defined(__mcfisaaplus__) || defined(__mcfisac__)) && \
        !defined(CONFIG_M68000) && !defined(CONFIG_MCPU32)
-static inline int __ffs(int x)
+static inline unsigned long __ffs(unsigned long x)
 {
        __asm__ __volatile__ ("bitrev %0; ff1 %0"
                : "=d" (x)
@@ -493,7 +493,11 @@ static inline int ffs(int x)
                : "dm" (x & -x));
        return 32 - cnt;
 }
-#define __ffs(x) (ffs(x) - 1)
+
+static inline unsigned long __ffs(unsigned long x)
+{
+       return ffs(x) - 1;
+}
 
 /*
  *     fls: find last bit set.
@@ -515,12 +519,16 @@ static inline int __fls(int x)
 
 #endif
 
+/* Simple test-and-set bit locks */
+#define test_and_set_bit_lock  test_and_set_bit
+#define clear_bit_unlock       clear_bit
+#define __clear_bit_unlock     clear_bit_unlock
+
 #include <asm-generic/bitops/ext2-atomic.h>
 #include <asm-generic/bitops/le.h>
 #include <asm-generic/bitops/fls64.h>
 #include <asm-generic/bitops/sched.h>
 #include <asm-generic/bitops/hweight.h>
-#include <asm-generic/bitops/lock.h>
 #endif /* __KERNEL__ */
 
 #endif /* _M68K_BITOPS_H */
diff --git a/arch/m68k/include/asm/dma-mapping.h b/arch/m68k/include/asm/dma-mapping.h
deleted file mode 100644 (file)
index e3722ed..0000000
+++ /dev/null
@@ -1,12 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _M68K_DMA_MAPPING_H
-#define _M68K_DMA_MAPPING_H
-
-extern const struct dma_map_ops m68k_dma_ops;
-
-static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
-{
-        return &m68k_dma_ops;
-}
-
-#endif  /* _M68K_DMA_MAPPING_H */
index ca2849afb0877339e18866885b2e8f76401b6ccd..aabe6420ead2a5996d233a00af822d5dff0eac87 100644 (file)
@@ -1,6 +1,13 @@
 /* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _M68K_IO_H
+#define _M68K_IO_H
+
 #if defined(__uClinux__) || defined(CONFIG_COLDFIRE)
 #include <asm/io_no.h>
 #else
 #include <asm/io_mm.h>
 #endif
+
+#include <asm-generic/io.h>
+
+#endif /* _M68K_IO_H */
index fe485f4f5fac4d92a5bbc9ad34d4f217b8b8b416..782b78f8a04890b315685b0ea78cab16a2d219af 100644 (file)
  *    isa_readX(),isa_writeX()  are for ISA memory
  */
 
-#ifndef _IO_H
-#define _IO_H
+#ifndef _M68K_IO_MM_H
+#define _M68K_IO_MM_H
 
 #ifdef __KERNEL__
 
-#define ARCH_HAS_IOREMAP_WT
-
 #include <linux/compiler.h>
 #include <asm/raw_io.h>
 #include <asm/virtconvert.h>
@@ -369,40 +367,6 @@ static inline void isa_delay(void)
 #define writew(val, addr)      out_le16((addr), (val))
 #endif /* CONFIG_ATARI_ROM_ISA */
 
-#if !defined(CONFIG_ISA) && !defined(CONFIG_ATARI_ROM_ISA)
-/*
- * We need to define dummy functions for GENERIC_IOMAP support.
- */
-#define inb(port)          0xff
-#define inb_p(port)        0xff
-#define outb(val,port)     ((void)0)
-#define outb_p(val,port)   ((void)0)
-#define inw(port)          0xffff
-#define inw_p(port)        0xffff
-#define outw(val,port)     ((void)0)
-#define outw_p(val,port)   ((void)0)
-#define inl(port)          0xffffffffUL
-#define inl_p(port)        0xffffffffUL
-#define outl(val,port)     ((void)0)
-#define outl_p(val,port)   ((void)0)
-
-#define insb(port,buf,nr)  ((void)0)
-#define outsb(port,buf,nr) ((void)0)
-#define insw(port,buf,nr)  ((void)0)
-#define outsw(port,buf,nr) ((void)0)
-#define insl(port,buf,nr)  ((void)0)
-#define outsl(port,buf,nr) ((void)0)
-
-/*
- * These should be valid on any ioremap()ed region
- */
-#define readb(addr)      in_8(addr)
-#define writeb(val,addr) out_8((addr),(val))
-#define readw(addr)      in_le16(addr)
-#define writew(val,addr) out_le16((addr),(val))
-
-#endif /* !CONFIG_ISA && !CONFIG_ATARI_ROM_ISA */
-
 #define readl(addr)      in_le32(addr)
 #define writel(val,addr) out_le32((addr),(val))
 
@@ -444,4 +408,4 @@ static inline void isa_delay(void)
 #define writew_relaxed(b, addr)        writew(b, addr)
 #define writel_relaxed(b, addr)        writel(b, addr)
 
-#endif /* _IO_H */
+#endif /* _M68K_IO_MM_H */
index 83a0a6d449f44bdd5fa7396a836cba2339478108..0498192e1d983292e1ac3fe7ada7e154cdebbe98 100644 (file)
@@ -131,19 +131,7 @@ static inline void writel(u32 value, volatile void __iomem *addr)
 #define PCI_SPACE_LIMIT        PCI_IO_MASK
 #endif /* CONFIG_PCI */
 
-/*
- * These are defined in kmap.h as static inline functions. To maintain
- * previous behavior we put these define guards here so io_mm.h doesn't
- * see them.
- */
-#ifdef CONFIG_MMU
-#define memset_io memset_io
-#define memcpy_fromio memcpy_fromio
-#define memcpy_toio memcpy_toio
-#endif
-
 #include <asm/kmap.h>
 #include <asm/virtconvert.h>
-#include <asm-generic/io.h>
 
 #endif /* _M68KNOMMU_IO_H */
index 84b8333db8ad1987ae214dec025eb813d502f6c4..aac7f045f7f0aa8509cf5ef288f8030de8276bce 100644 (file)
@@ -4,6 +4,8 @@
 
 #ifdef CONFIG_MMU
 
+#define ARCH_HAS_IOREMAP_WT
+
 /* Values for nocacheflag and cmode */
 #define IOMAP_FULL_CACHING             0
 #define IOMAP_NOCACHE_SER              1
@@ -16,6 +18,7 @@
  */
 extern void __iomem *__ioremap(unsigned long physaddr, unsigned long size,
                               int cacheflag);
+#define iounmap iounmap
 extern void iounmap(void __iomem *addr);
 extern void __iounmap(void *addr, unsigned long size);
 
@@ -33,31 +36,35 @@ static inline void __iomem *ioremap_nocache(unsigned long physaddr,
 }
 
 #define ioremap_uc ioremap_nocache
+#define ioremap_wt ioremap_wt
 static inline void __iomem *ioremap_wt(unsigned long physaddr,
                                       unsigned long size)
 {
        return __ioremap(physaddr, size, IOMAP_WRITETHROUGH);
 }
 
-#define ioremap_fillcache ioremap_fullcache
+#define ioremap_fullcache ioremap_fullcache
 static inline void __iomem *ioremap_fullcache(unsigned long physaddr,
                                              unsigned long size)
 {
        return __ioremap(physaddr, size, IOMAP_FULL_CACHING);
 }
 
+#define memset_io memset_io
 static inline void memset_io(volatile void __iomem *addr, unsigned char val,
                             int count)
 {
        __builtin_memset((void __force *) addr, val, count);
 }
 
+#define memcpy_fromio memcpy_fromio
 static inline void memcpy_fromio(void *dst, const volatile void __iomem *src,
                                 int count)
 {
        __builtin_memcpy(dst, (void __force *) src, count);
 }
 
+#define memcpy_toio memcpy_toio
 static inline void memcpy_toio(volatile void __iomem *dst, const void *src,
                               int count)
 {
index 1605da48ebf2244556be005c1ca3b7e822eeb961..49bd3266b4b1b79ad9581f0b40d454b25228ffcd 100644 (file)
@@ -22,7 +22,6 @@ extern int (*mach_hwclk)(int, struct rtc_time*);
 extern unsigned int (*mach_get_ss)(void);
 extern int (*mach_get_rtc_pll)(struct rtc_pll_info *);
 extern int (*mach_set_rtc_pll)(struct rtc_pll_info *);
-extern int (*mach_set_clock_mmss)(unsigned long);
 extern void (*mach_reset)( void );
 extern void (*mach_halt)( void );
 extern void (*mach_power_off)( void );
index 9b840c03ebb7ab06592a23d89290abecb0baaadb..08cee11180e6998d2069b1591e78ad57288f9134 100644 (file)
@@ -57,7 +57,6 @@ struct mac_model
 #define MAC_SCSI_IIFX          5
 #define MAC_SCSI_DUO           6
 #define MAC_SCSI_LC            7
-#define MAC_SCSI_LATE          8
 
 #define MAC_IDE_NONE           0
 #define MAC_IDE_QUADRA         1
index 8b707c249026032ac8bef80c6f1666c105d9b50d..12fe700632f458ea632a18bb9cdccd6660efd241 100644 (file)
@@ -44,6 +44,7 @@ extern inline pmd_t *pmd_alloc_kernel(pgd_t *pgd, unsigned long address)
 static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t page,
                                  unsigned long address)
 {
+       pgtable_page_dtor(page);
        __free_page(page);
 }
 
@@ -74,8 +75,9 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm,
        return page;
 }
 
-extern inline void pte_free(struct mm_struct *mm, struct page *page)
+static inline void pte_free(struct mm_struct *mm, struct page *page)
 {
+       pgtable_page_dtor(page);
        __free_page(page);
 }
 
index e644c4daf540e97e30aaba6c2761364ea5f17f55..6bbe52025de3c5c68518371a7d41e23003b6ece1 100644 (file)
@@ -18,7 +18,7 @@ extern unsigned long memory_end;
 #define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
 
 #define __pa(vaddr)            ((unsigned long)(vaddr))
-#define __va(paddr)            ((void *)(paddr))
+#define __va(paddr)            ((void *)((unsigned long)(paddr)))
 
 #define virt_to_pfn(kaddr)     (__pa(kaddr) >> PAGE_SHIFT)
 #define pfn_to_virt(pfn)       __va((pfn) << PAGE_SHIFT)
index 463572c4943f195b57c9a69e0100a405c3b441a6..e99993c57d6b6c0a7fc8bfeac7fdffb4d25c06c8 100644 (file)
@@ -6,7 +6,7 @@
 
 #undef DEBUG
 
-#include <linux/dma-mapping.h>
+#include <linux/dma-noncoherent.h>
 #include <linux/device.h>
 #include <linux/kernel.h>
 #include <linux/platform_device.h>
@@ -19,7 +19,7 @@
 
 #if defined(CONFIG_MMU) && !defined(CONFIG_COLDFIRE)
 
-static void *m68k_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
+void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
                gfp_t flag, unsigned long attrs)
 {
        struct page *page, **map;
@@ -62,7 +62,7 @@ static void *m68k_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
        return addr;
 }
 
-static void m68k_dma_free(struct device *dev, size_t size, void *addr,
+void arch_dma_free(struct device *dev, size_t size, void *addr,
                dma_addr_t handle, unsigned long attrs)
 {
        pr_debug("dma_free_coherent: %p, %x\n", addr, handle);
@@ -73,8 +73,8 @@ static void m68k_dma_free(struct device *dev, size_t size, void *addr,
 
 #include <asm/cacheflush.h>
 
-static void *m68k_dma_alloc(struct device *dev, size_t size,
-               dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
+void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
+               gfp_t gfp, unsigned long attrs)
 {
        void *ret;
 
@@ -89,7 +89,7 @@ static void *m68k_dma_alloc(struct device *dev, size_t size,
        return ret;
 }
 
-static void m68k_dma_free(struct device *dev, size_t size, void *vaddr,
+void arch_dma_free(struct device *dev, size_t size, void *vaddr,
                dma_addr_t dma_handle, unsigned long attrs)
 {
        free_pages((unsigned long)vaddr, get_order(size));
@@ -97,8 +97,8 @@ static void m68k_dma_free(struct device *dev, size_t size, void *vaddr,
 
 #endif /* CONFIG_MMU && !CONFIG_COLDFIRE */
 
-static void m68k_dma_sync_single_for_device(struct device *dev,
-               dma_addr_t handle, size_t size, enum dma_data_direction dir)
+void arch_sync_dma_for_device(struct device *dev, phys_addr_t handle,
+               size_t size, enum dma_data_direction dir)
 {
        switch (dir) {
        case DMA_BIDIRECTIONAL:
@@ -115,58 +115,6 @@ static void m68k_dma_sync_single_for_device(struct device *dev,
        }
 }
 
-static void m68k_dma_sync_sg_for_device(struct device *dev,
-               struct scatterlist *sglist, int nents, enum dma_data_direction dir)
-{
-       int i;
-       struct scatterlist *sg;
-
-       for_each_sg(sglist, sg, nents, i) {
-               dma_sync_single_for_device(dev, sg->dma_address, sg->length,
-                                          dir);
-       }
-}
-
-static dma_addr_t m68k_dma_map_page(struct device *dev, struct page *page,
-               unsigned long offset, size_t size, enum dma_data_direction dir,
-               unsigned long attrs)
-{
-       dma_addr_t handle = page_to_phys(page) + offset;
-
-       if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
-               dma_sync_single_for_device(dev, handle, size, dir);
-
-       return handle;
-}
-
-static int m68k_dma_map_sg(struct device *dev, struct scatterlist *sglist,
-               int nents, enum dma_data_direction dir, unsigned long attrs)
-{
-       int i;
-       struct scatterlist *sg;
-
-       for_each_sg(sglist, sg, nents, i) {
-               sg->dma_address = sg_phys(sg);
-
-               if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
-                       continue;
-
-               dma_sync_single_for_device(dev, sg->dma_address, sg->length,
-                                          dir);
-       }
-       return nents;
-}
-
-const struct dma_map_ops m68k_dma_ops = {
-       .alloc                  = m68k_dma_alloc,
-       .free                   = m68k_dma_free,
-       .map_page               = m68k_dma_map_page,
-       .map_sg                 = m68k_dma_map_sg,
-       .sync_single_for_device = m68k_dma_sync_single_for_device,
-       .sync_sg_for_device     = m68k_dma_sync_sg_for_device,
-};
-EXPORT_SYMBOL(m68k_dma_ops);
-
 void arch_setup_pdev_archdata(struct platform_device *pdev)
 {
        if (pdev->dev.coherent_dma_mask == DMA_MASK_NONE &&
index f35e3ebd6331f846d4d523f2ea6b58f59cdb5641..5d3596c180f9f75efab10fa416199e554fba3794 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/string.h>
 #include <linux/init.h>
 #include <linux/bootmem.h>
+#include <linux/memblock.h>
 #include <linux/proc_fs.h>
 #include <linux/seq_file.h>
 #include <linux/module.h>
@@ -88,7 +89,6 @@ void (*mach_get_hardware_list) (struct seq_file *m);
 /* machine dependent timer functions */
 int (*mach_hwclk) (int, struct rtc_time*);
 EXPORT_SYMBOL(mach_hwclk);
-int (*mach_set_clock_mmss) (unsigned long);
 unsigned int (*mach_get_ss)(void);
 int (*mach_get_rtc_pll)(struct rtc_pll_info *);
 int (*mach_set_rtc_pll)(struct rtc_pll_info *);
@@ -165,6 +165,8 @@ static void __init m68k_parse_bootinfo(const struct bi_record *record)
                                        be32_to_cpu(m->addr);
                                m68k_memory[m68k_num_memory].size =
                                        be32_to_cpu(m->size);
+                               memblock_add(m68k_memory[m68k_num_memory].addr,
+                                            m68k_memory[m68k_num_memory].size);
                                m68k_num_memory++;
                        } else
                                pr_warn("%s: too many memory chunks\n",
@@ -224,10 +226,6 @@ static void __init m68k_parse_bootinfo(const struct bi_record *record)
 
 void __init setup_arch(char **cmdline_p)
 {
-#ifndef CONFIG_SUN3
-       int i;
-#endif
-
        /* The bootinfo is located right after the kernel */
        if (!CPU_IS_COLDFIRE)
                m68k_parse_bootinfo((const struct bi_record *)_end);
@@ -356,14 +354,9 @@ void __init setup_arch(char **cmdline_p)
 #endif
 
 #ifndef CONFIG_SUN3
-       for (i = 1; i < m68k_num_memory; i++)
-               free_bootmem_node(NODE_DATA(i), m68k_memory[i].addr,
-                                 m68k_memory[i].size);
 #ifdef CONFIG_BLK_DEV_INITRD
        if (m68k_ramdisk.size) {
-               reserve_bootmem_node(__virt_to_node(phys_to_virt(m68k_ramdisk.addr)),
-                                    m68k_ramdisk.addr, m68k_ramdisk.size,
-                                    BOOTMEM_DEFAULT);
+               memblock_reserve(m68k_ramdisk.addr, m68k_ramdisk.size);
                initrd_start = (unsigned long)phys_to_virt(m68k_ramdisk.addr);
                initrd_end = initrd_start + m68k_ramdisk.size;
                pr_info("initrd: %08lx - %08lx\n", initrd_start, initrd_end);
index a98af10182016c4cc8426c68f05573d616712833..cfd5475bfc3152aa0c47246db970d8468e4fa944 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/errno.h>
 #include <linux/string.h>
 #include <linux/bootmem.h>
+#include <linux/memblock.h>
 #include <linux/seq_file.h>
 #include <linux/init.h>
 #include <linux/initrd.h>
@@ -51,7 +52,6 @@ char __initdata command_line[COMMAND_LINE_SIZE];
 
 /* machine dependent timer functions */
 void (*mach_sched_init)(irq_handler_t handler) __initdata = NULL;
-int (*mach_set_clock_mmss)(unsigned long);
 int (*mach_hwclk) (int, struct rtc_time*);
 
 /* machine dependent reboot functions */
@@ -86,8 +86,6 @@ void (*mach_power_off)(void);
 
 void __init setup_arch(char **cmdline_p)
 {
-       int bootmap_size;
-
        memory_start = PAGE_ALIGN(_ramstart);
        memory_end = _ramend;
 
@@ -142,6 +140,8 @@ void __init setup_arch(char **cmdline_p)
        pr_debug("MEMORY -> ROMFS=0x%p-0x%06lx MEM=0x%06lx-0x%06lx\n ",
                 __bss_stop, memory_start, memory_start, memory_end);
 
+       memblock_add(memory_start, memory_end - memory_start);
+
        /* Keep a copy of command line */
        *cmdline_p = &command_line[0];
        memcpy(boot_command_line, command_line, COMMAND_LINE_SIZE);
@@ -158,23 +158,10 @@ void __init setup_arch(char **cmdline_p)
        min_low_pfn = PFN_DOWN(memory_start);
        max_pfn = max_low_pfn = PFN_DOWN(memory_end);
 
-       bootmap_size = init_bootmem_node(
-                       NODE_DATA(0),
-                       min_low_pfn,            /* map goes here */
-                       PFN_DOWN(PAGE_OFFSET),
-                       max_pfn);
-       /*
-        * Free the usable memory, we have to make sure we do not free
-        * the bootmem bitmap so we then reserve it after freeing it :-)
-        */
-       free_bootmem(memory_start, memory_end - memory_start);
-       reserve_bootmem(memory_start, bootmap_size, BOOTMEM_DEFAULT);
-
 #if defined(CONFIG_UBOOT) && defined(CONFIG_BLK_DEV_INITRD)
        if ((initrd_start > 0) && (initrd_start < initrd_end) &&
                        (initrd_end < memory_end))
-               reserve_bootmem(initrd_start, initrd_end - initrd_start,
-                                BOOTMEM_DEFAULT);
+               memblock_reserve(initrd_start, initrd_end - initrd_start);
 #endif /* if defined(CONFIG_BLK_DEV_INITRD) */
 
        /*
index e522307db47ccbd9d371239ea36e2c13488aec06..b02d7254b73a9fe3c68834f36839022548cd06ea 100644 (file)
@@ -57,7 +57,6 @@ static unsigned long mac_orig_videoaddr;
 /* Mac specific timer functions */
 extern u32 mac_gettimeoffset(void);
 extern int mac_hwclk(int, struct rtc_time *);
-extern int mac_set_clock_mmss(unsigned long);
 extern void iop_preinit(void);
 extern void iop_init(void);
 extern void via_init(void);
@@ -158,7 +157,6 @@ void __init config_mac(void)
        mach_get_model = mac_get_model;
        arch_gettimeoffset = mac_gettimeoffset;
        mach_hwclk = mac_hwclk;
-       mach_set_clock_mmss = mac_set_clock_mmss;
        mach_reset = mac_reset;
        mach_halt = mac_poweroff;
        mach_power_off = mac_poweroff;
@@ -709,7 +707,7 @@ static struct mac_model mac_data_table[] = {
                .name           = "PowerBook 520",
                .adb_type       = MAC_ADB_PB2,
                .via_type       = MAC_VIA_QUADRA,
-               .scsi_type      = MAC_SCSI_LATE,
+               .scsi_type      = MAC_SCSI_OLD,
                .scc_type       = MAC_SCC_QUADRA,
                .ether_type     = MAC_ETHER_SONIC,
                .floppy_type    = MAC_FLOPPY_SWIM_ADDR2,
@@ -943,18 +941,6 @@ static const struct resource mac_scsi_old_rsrc[] __initconst = {
        },
 };
 
-static const struct resource mac_scsi_late_rsrc[] __initconst = {
-       {
-               .flags = IORESOURCE_IRQ,
-               .start = IRQ_MAC_SCSI,
-               .end   = IRQ_MAC_SCSI,
-       }, {
-               .flags = IORESOURCE_MEM,
-               .start = 0x50010000,
-               .end   = 0x50011FFF,
-       },
-};
-
 static const struct resource mac_scsi_ccl_rsrc[] __initconst = {
        {
                .flags = IORESOURCE_IRQ,
@@ -1064,11 +1050,6 @@ int __init mac_platform_init(void)
                platform_device_register_simple("mac_scsi", 0,
                        mac_scsi_old_rsrc, ARRAY_SIZE(mac_scsi_old_rsrc));
                break;
-       case MAC_SCSI_LATE:
-               /* XXX PDMA support for PowerBook 500 series needs testing */
-               platform_device_register_simple("mac_scsi", 0,
-                       mac_scsi_late_rsrc, ARRAY_SIZE(mac_scsi_late_rsrc));
-               break;
        case MAC_SCSI_LC:
                /* Addresses from Mac LC data in Designing Cards & Drivers 3ed.
                 * Also from the Developer Notes for Classic II, LC III,
index c68054361615a32eef13ab46c17c99d75793baa0..19e9d8eef1f282c7c930be14dbd1d97b64f0df87 100644 (file)
 
 #include <asm/machdep.h>
 
-/* Offset between Unix time (1970-based) and Mac time (1904-based) */
+/*
+ * Offset between Unix time (1970-based) and Mac time (1904-based). Cuda and PMU
+ * times wrap in 2040. If we need to handle later times, the read_time functions
+ * need to be changed to interpret wrapped times as post-2040.
+ */
 
 #define RTC_OFFSET 2082844800
 
 static void (*rom_reset)(void);
 
 #ifdef CONFIG_ADB_CUDA
-static long cuda_read_time(void)
+static time64_t cuda_read_time(void)
 {
        struct adb_request req;
-       long time;
+       time64_t time;
 
        if (cuda_request(&req, NULL, 2, CUDA_PACKET, CUDA_GET_TIME) < 0)
                return 0;
        while (!req.complete)
                cuda_poll();
 
-       time = (req.reply[3] << 24) | (req.reply[4] << 16) |
-              (req.reply[5] << 8) | req.reply[6];
+       time = (u32)((req.reply[3] << 24) | (req.reply[4] << 16) |
+                    (req.reply[5] << 8) | req.reply[6]);
+
        return time - RTC_OFFSET;
 }
 
-static void cuda_write_time(long data)
+static void cuda_write_time(time64_t time)
 {
        struct adb_request req;
+       u32 data = lower_32_bits(time + RTC_OFFSET);
 
-       data += RTC_OFFSET;
        if (cuda_request(&req, NULL, 6, CUDA_PACKET, CUDA_SET_TIME,
                         (data >> 24) & 0xFF, (data >> 16) & 0xFF,
                         (data >> 8) & 0xFF, data & 0xFF) < 0)
@@ -86,26 +91,27 @@ static void cuda_write_pram(int offset, __u8 data)
 #endif /* CONFIG_ADB_CUDA */
 
 #ifdef CONFIG_ADB_PMU68K
-static long pmu_read_time(void)
+static time64_t pmu_read_time(void)
 {
        struct adb_request req;
-       long time;
+       time64_t time;
 
        if (pmu_request(&req, NULL, 1, PMU_READ_RTC) < 0)
                return 0;
        while (!req.complete)
                pmu_poll();
 
-       time = (req.reply[1] << 24) | (req.reply[2] << 16) |
-              (req.reply[3] << 8) | req.reply[4];
+       time = (u32)((req.reply[1] << 24) | (req.reply[2] << 16) |
+                    (req.reply[3] << 8) | req.reply[4]);
+
        return time - RTC_OFFSET;
 }
 
-static void pmu_write_time(long data)
+static void pmu_write_time(time64_t time)
 {
        struct adb_request req;
+       u32 data = lower_32_bits(time + RTC_OFFSET);
 
-       data += RTC_OFFSET;
        if (pmu_request(&req, NULL, 5, PMU_SET_RTC,
                        (data >> 24) & 0xFF, (data >> 16) & 0xFF,
                        (data >> 8) & 0xFF, data & 0xFF) < 0)
@@ -245,11 +251,11 @@ static void via_write_pram(int offset, __u8 data)
  * is basically any machine with Mac II-style ADB.
  */
 
-static long via_read_time(void)
+static time64_t via_read_time(void)
 {
        union {
                __u8 cdata[4];
-               long idata;
+               __u32 idata;
        } result, last_result;
        int count = 1;
 
@@ -270,7 +276,7 @@ static long via_read_time(void)
                via_pram_command(0x8D, &result.cdata[0]);
 
                if (result.idata == last_result.idata)
-                       return result.idata - RTC_OFFSET;
+                       return (time64_t)result.idata - RTC_OFFSET;
 
                if (++count > 10)
                        break;
@@ -278,8 +284,8 @@ static long via_read_time(void)
                last_result.idata = result.idata;
        }
 
-       pr_err("via_read_time: failed to read a stable value; got 0x%08lx then 0x%08lx\n",
-              last_result.idata, result.idata);
+       pr_err("%s: failed to read a stable value; got 0x%08x then 0x%08x\n",
+              __func__, last_result.idata, result.idata);
 
        return 0;
 }
@@ -291,11 +297,11 @@ static long via_read_time(void)
  * is basically any machine with Mac II-style ADB.
  */
 
-static void via_write_time(long time)
+static void via_write_time(time64_t time)
 {
        union {
                __u8 cdata[4];
-               long idata;
+               __u32 idata;
        } data;
        __u8 temp;
 
@@ -304,7 +310,7 @@ static void via_write_time(long time)
        temp = 0x55;
        via_pram_command(0x35, &temp);
 
-       data.idata = time + RTC_OFFSET;
+       data.idata = lower_32_bits(time + RTC_OFFSET);
        via_pram_command(0x01, &data.cdata[3]);
        via_pram_command(0x05, &data.cdata[2]);
        via_pram_command(0x09, &data.cdata[1]);
@@ -585,12 +591,15 @@ void mac_reset(void)
  * This function translates seconds since 1970 into a proper date.
  *
  * Algorithm cribbed from glibc2.1, __offtime().
+ *
+ * This is roughly same as rtc_time64_to_tm(), which we should probably
+ * use here, but it's only available when CONFIG_RTC_LIB is enabled.
  */
 #define SECS_PER_MINUTE (60)
 #define SECS_PER_HOUR  (SECS_PER_MINUTE * 60)
 #define SECS_PER_DAY   (SECS_PER_HOUR * 24)
 
-static void unmktime(unsigned long time, long offset,
+static void unmktime(time64_t time, long offset,
                     int *yearp, int *monp, int *dayp,
                     int *hourp, int *minp, int *secp)
 {
@@ -602,11 +611,10 @@ static void unmktime(unsigned long time, long offset,
                /* Leap years.  */
                { 0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366 }
        };
-       long int days, rem, y, wday, yday;
+       int days, rem, y, wday, yday;
        const unsigned short int *ip;
 
-       days = time / SECS_PER_DAY;
-       rem = time % SECS_PER_DAY;
+       days = div_u64_rem(time, SECS_PER_DAY, &rem);
        rem += offset;
        while (rem < 0) {
                rem += SECS_PER_DAY;
@@ -657,7 +665,7 @@ static void unmktime(unsigned long time, long offset,
 
 int mac_hwclk(int op, struct rtc_time *t)
 {
-       unsigned long now;
+       time64_t now;
 
        if (!op) { /* read */
                switch (macintosh_config->adb_type) {
@@ -693,8 +701,8 @@ int mac_hwclk(int op, struct rtc_time *t)
                         __func__, t->tm_year + 1900, t->tm_mon + 1, t->tm_mday,
                         t->tm_hour, t->tm_min, t->tm_sec);
 
-               now = mktime(t->tm_year + 1900, t->tm_mon + 1, t->tm_mday,
-                            t->tm_hour, t->tm_min, t->tm_sec);
+               now = mktime64(t->tm_year + 1900, t->tm_mon + 1, t->tm_mday,
+                              t->tm_hour, t->tm_min, t->tm_sec);
 
                switch (macintosh_config->adb_type) {
                case MAC_ADB_IOP:
@@ -719,19 +727,3 @@ int mac_hwclk(int op, struct rtc_time *t)
        }
        return 0;
 }
-
-/*
- * Set minutes/seconds in the hardware clock
- */
-
-int mac_set_clock_mmss (unsigned long nowtime)
-{
-       struct rtc_time now;
-
-       mac_hwclk(0, &now);
-       now.tm_sec = nowtime % 60;
-       now.tm_min = (nowtime / 60) % 60;
-       mac_hwclk(1, &now);
-
-       return 0;
-}
index 8827b7f914025595341b7f9daad3770f095de440..38e2b272c220cbfaee5fdd4032c4461ab45c5e2e 100644 (file)
@@ -71,7 +71,6 @@ void __init m68k_setup_node(int node)
                pg_data_table[i] = pg_data_map + node;
        }
 #endif
-       pg_data_map[node].bdata = bootmem_node_data + node;
        node_set_online(node);
 }
 
index 2925d795d71a2bff61796dd651ecbfe70ca6901f..70dde040779b56feaeaa650a7c342a23750a1fce 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/init.h>
 #include <linux/string.h>
 #include <linux/bootmem.h>
+#include <linux/memblock.h>
 
 #include <asm/setup.h>
 #include <asm/page.h>
@@ -153,31 +154,31 @@ int cf_tlb_miss(struct pt_regs *regs, int write, int dtlb, int extension_word)
 
 void __init cf_bootmem_alloc(void)
 {
-       unsigned long start_pfn;
        unsigned long memstart;
 
        /* _rambase and _ramend will be naturally page aligned */
        m68k_memory[0].addr = _rambase;
        m68k_memory[0].size = _ramend - _rambase;
 
+       memblock_add(m68k_memory[0].addr, m68k_memory[0].size);
+
        /* compute total pages in system */
        num_pages = PFN_DOWN(_ramend - _rambase);
 
        /* page numbers */
        memstart = PAGE_ALIGN(_ramstart);
        min_low_pfn = PFN_DOWN(_rambase);
-       start_pfn = PFN_DOWN(memstart);
        max_pfn = max_low_pfn = PFN_DOWN(_ramend);
        high_memory = (void *)_ramend;
 
+       /* Reserve kernel text/data/bss */
+       memblock_reserve(memstart, memstart - _rambase);
+
        m68k_virt_to_node_shift = fls(_ramend - 1) - 6;
        module_fixup(NULL, __start_fixup, __stop_fixup);
 
-       /* setup bootmem data */
+       /* setup node data */
        m68k_setup_node(0);
-       memstart += init_bootmem_node(NODE_DATA(0), start_pfn,
-               min_low_pfn, max_low_pfn);
-       free_bootmem_node(NODE_DATA(0), memstart, _ramend - memstart);
 }
 
 /*
index e490ecc7842c9db3e8c6ace9d6d11061ed49cad1..4e17ecb5928aae85155c9f6a5b0b0704a24b1100 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/types.h>
 #include <linux/init.h>
 #include <linux/bootmem.h>
+#include <linux/memblock.h>
 #include <linux/gfp.h>
 
 #include <asm/setup.h>
@@ -208,7 +209,7 @@ void __init paging_init(void)
 {
        unsigned long zones_size[MAX_NR_ZONES] = { 0, };
        unsigned long min_addr, max_addr;
-       unsigned long addr, size, end;
+       unsigned long addr;
        int i;
 
 #ifdef DEBUG
@@ -253,34 +254,20 @@ void __init paging_init(void)
        min_low_pfn = availmem >> PAGE_SHIFT;
        max_pfn = max_low_pfn = max_addr >> PAGE_SHIFT;
 
-       for (i = 0; i < m68k_num_memory; i++) {
-               addr = m68k_memory[i].addr;
-               end = addr + m68k_memory[i].size;
-               m68k_setup_node(i);
-               availmem = PAGE_ALIGN(availmem);
-               availmem += init_bootmem_node(NODE_DATA(i),
-                                             availmem >> PAGE_SHIFT,
-                                             addr >> PAGE_SHIFT,
-                                             end >> PAGE_SHIFT);
-       }
+       /* Reserve kernel text/data/bss and the memory allocated in head.S */
+       memblock_reserve(m68k_memory[0].addr, availmem - m68k_memory[0].addr);
 
        /*
         * Map the physical memory available into the kernel virtual
-        * address space. First initialize the bootmem allocator with
-        * the memory we already mapped, so map_node() has something
-        * to allocate.
+        * address space. Make sure memblock will not try to allocate
+        * pages beyond the memory we already mapped in head.S
         */
-       addr = m68k_memory[0].addr;
-       size = m68k_memory[0].size;
-       free_bootmem_node(NODE_DATA(0), availmem,
-                         min(m68k_init_mapped_size, size) - (availmem - addr));
-       map_node(0);
-       if (size > m68k_init_mapped_size)
-               free_bootmem_node(NODE_DATA(0), addr + m68k_init_mapped_size,
-                                 size - m68k_init_mapped_size);
-
-       for (i = 1; i < m68k_num_memory; i++)
+       memblock_set_bottom_up(true);
+
+       for (i = 0; i < m68k_num_memory; i++) {
+               m68k_setup_node(i);
                map_node(i);
+       }
 
        flush_tlb_all();
 
index f8a710fd84cd68aea7467653890f1cd14dd45cf4..adea549d240e9eb555200c7ec7db49582373e2b7 100644 (file)
@@ -40,7 +40,6 @@ static void mvme147_get_model(char *model);
 extern void mvme147_sched_init(irq_handler_t handler);
 extern u32 mvme147_gettimeoffset(void);
 extern int mvme147_hwclk (int, struct rtc_time *);
-extern int mvme147_set_clock_mmss (unsigned long);
 extern void mvme147_reset (void);
 
 
@@ -92,7 +91,6 @@ void __init config_mvme147(void)
        mach_init_IRQ           = mvme147_init_IRQ;
        arch_gettimeoffset      = mvme147_gettimeoffset;
        mach_hwclk              = mvme147_hwclk;
-       mach_set_clock_mmss     = mvme147_set_clock_mmss;
        mach_reset              = mvme147_reset;
        mach_get_model          = mvme147_get_model;
 
@@ -164,8 +162,3 @@ int mvme147_hwclk(int op, struct rtc_time *t)
        }
        return 0;
 }
-
-int mvme147_set_clock_mmss (unsigned long nowtime)
-{
-       return 0;
-}
index 4ffd9ef98de4db20cb6b151132f53b75e64e84d5..6ee36a5b528d80b9b519c8ea97775be38114704f 100644 (file)
@@ -46,7 +46,6 @@ static void mvme16x_get_model(char *model);
 extern void mvme16x_sched_init(irq_handler_t handler);
 extern u32 mvme16x_gettimeoffset(void);
 extern int mvme16x_hwclk (int, struct rtc_time *);
-extern int mvme16x_set_clock_mmss (unsigned long);
 extern void mvme16x_reset (void);
 
 int bcd2int (unsigned char b);
@@ -280,7 +279,6 @@ void __init config_mvme16x(void)
     mach_init_IRQ        = mvme16x_init_IRQ;
     arch_gettimeoffset   = mvme16x_gettimeoffset;
     mach_hwclk           = mvme16x_hwclk;
-    mach_set_clock_mmss         = mvme16x_set_clock_mmss;
     mach_reset          = mvme16x_reset;
     mach_get_model       = mvme16x_get_model;
     mach_get_hardware_list = mvme16x_get_hardware_list;
@@ -411,9 +409,3 @@ int mvme16x_hwclk(int op, struct rtc_time *t)
        }
        return 0;
 }
-
-int mvme16x_set_clock_mmss (unsigned long nowtime)
-{
-       return 0;
-}
-
index 71c0867ecf20f201a99950ad584378d2607a0383..96810d91da2bd9cf2318e0dda4b40c092c3ea7d7 100644 (file)
@@ -43,7 +43,6 @@ extern void q40_sched_init(irq_handler_t handler);
 static u32 q40_gettimeoffset(void);
 static int q40_hwclk(int, struct rtc_time *);
 static unsigned int q40_get_ss(void);
-static int q40_set_clock_mmss(unsigned long);
 static int q40_get_rtc_pll(struct rtc_pll_info *pll);
 static int q40_set_rtc_pll(struct rtc_pll_info *pll);
 
@@ -175,7 +174,6 @@ void __init config_q40(void)
        mach_get_ss = q40_get_ss;
        mach_get_rtc_pll = q40_get_rtc_pll;
        mach_set_rtc_pll = q40_set_rtc_pll;
-       mach_set_clock_mmss = q40_set_clock_mmss;
 
        mach_reset = q40_reset;
        mach_get_model = q40_get_model;
@@ -267,34 +265,6 @@ static unsigned int q40_get_ss(void)
        return bcd2bin(Q40_RTC_SECS);
 }
 
-/*
- * Set the minutes and seconds from seconds value 'nowtime'.  Fail if
- * clock is out by > 30 minutes.  Logic lifted from atari code.
- */
-
-static int q40_set_clock_mmss(unsigned long nowtime)
-{
-       int retval = 0;
-       short real_seconds = nowtime % 60, real_minutes = (nowtime / 60) % 60;
-
-       int rtc_minutes;
-
-       rtc_minutes = bcd2bin(Q40_RTC_MINS);
-
-       if ((rtc_minutes < real_minutes ?
-            real_minutes - rtc_minutes :
-            rtc_minutes - real_minutes) < 30) {
-               Q40_RTC_CTRL |= Q40_RTC_WRITE;
-               Q40_RTC_MINS = bin2bcd(real_minutes);
-               Q40_RTC_SECS = bin2bcd(real_seconds);
-               Q40_RTC_CTRL &= ~(Q40_RTC_WRITE);
-       } else
-               retval = -1;
-
-       return retval;
-}
-
-
 /* get and set PLL calibration of RTC clock */
 #define Q40_RTC_PLL_MASK ((1<<5)-1)
 #define Q40_RTC_PLL_SIGN (1<<5)
index 1d28d380e8cc10379b5f155adb1a91194145150a..79a2bb85790615c308ecea9b2c3fdf9807054cbb 100644 (file)
@@ -123,10 +123,6 @@ static void __init sun3_bootmem_alloc(unsigned long memory_start,
        availmem = memory_start;
 
        m68k_setup_node(0);
-       availmem += init_bootmem(start_page, num_pages);
-       availmem = (availmem + (PAGE_SIZE-1)) & PAGE_MASK;
-
-       free_bootmem(__pa(availmem), memory_end - (availmem));
 }
 
 
index 331a3bb66297baa39404fbefa273663ebd1871fe..93a737c8d1a6448d5bb4dcb2d71c8d8b5241e0d7 100644 (file)
@@ -8,11 +8,4 @@ config TRACE_IRQFLAGS_SUPPORT
 
 source "lib/Kconfig.debug"
 
-config HEART_BEAT
-       bool "Heart beat function for kernel"
-       default n
-       help
-         This option turns on/off heart beat kernel functionality.
-         First GPIO node is taken.
-
 endmenu
index d5384f6f36f777d4487ea00f2908b39fe5a26403..ce9b7b7861569501c0339491338da38a2cdb0050 100644 (file)
@@ -19,15 +19,10 @@ extern char cmd_line[COMMAND_LINE_SIZE];
 
 extern char *klimit;
 
-void microblaze_heartbeat(void);
-void microblaze_setup_heartbeat(void);
-
 #   ifdef CONFIG_MMU
 extern void mmu_reset(void);
 #   endif /* CONFIG_MMU */
 
-extern void of_platform_reset_gpio_probe(void);
-
 void time_init(void);
 void init_IRQ(void);
 void machine_early_init(const char *cmdline, unsigned int ram,
index 9774e1d9507baebbd6efe9bcf67bb88bcd214d82..a62d09420a47b725cf67e12b99784a2259e24d2f 100644 (file)
@@ -38,6 +38,6 @@
 
 #endif /* __ASSEMBLY__ */
 
-#define __NR_syscalls         399
+#define __NR_syscalls         401
 
 #endif /* _ASM_MICROBLAZE_UNISTD_H */
index eb156f914793b29b558c9b48853af8d833f3d3d2..7a9f16a7641374855d4d8f9a7189792031f51185 100644 (file)
 #define __NR_pkey_alloc                396
 #define __NR_pkey_free         397
 #define __NR_statx             398
+#define __NR_io_pgetevents     399
+#define __NR_rseq              400
 
 #endif /* _UAPI_ASM_MICROBLAZE_UNISTD_H */
index 7e99cf6984a1eb5f51597dbd8857f6f370d28328..dd71637437f4f6b1ff307d385b8a1ff293959075 100644 (file)
@@ -8,7 +8,6 @@ ifdef CONFIG_FUNCTION_TRACER
 CFLAGS_REMOVE_timer.o = -pg
 CFLAGS_REMOVE_intc.o = -pg
 CFLAGS_REMOVE_early_printk.o = -pg
-CFLAGS_REMOVE_heartbeat.o = -pg
 CFLAGS_REMOVE_ftrace.o = -pg
 CFLAGS_REMOVE_process.o = -pg
 endif
@@ -17,12 +16,11 @@ extra-y := head.o vmlinux.lds
 
 obj-y += dma.o exceptions.o \
        hw_exception_handler.o irq.o \
-       platform.o process.o prom.o ptrace.o \
+       process.o prom.o ptrace.o \
        reset.o setup.o signal.o sys_microblaze.o timer.o traps.o unwind.o
 
 obj-y += cpu/
 
-obj-$(CONFIG_HEART_BEAT)       += heartbeat.o
 obj-$(CONFIG_MODULES)          += microblaze_ksyms.o module.o
 obj-$(CONFIG_MMU)              += misc.o
 obj-$(CONFIG_STACKTRACE)       += stacktrace.o
diff --git a/arch/microblaze/kernel/heartbeat.c b/arch/microblaze/kernel/heartbeat.c
deleted file mode 100644 (file)
index 2022130..0000000
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
- * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu>
- * Copyright (C) 2007-2009 PetaLogix
- * Copyright (C) 2006 Atmark Techno, Inc.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-
-#include <linux/sched.h>
-#include <linux/sched/loadavg.h>
-#include <linux/io.h>
-
-#include <asm/setup.h>
-#include <asm/page.h>
-#include <asm/prom.h>
-
-static unsigned int base_addr;
-
-void microblaze_heartbeat(void)
-{
-       static unsigned int cnt, period, dist;
-
-       if (base_addr) {
-               if (cnt == 0 || cnt == dist)
-                       out_be32(base_addr, 1);
-               else if (cnt == 7 || cnt == dist + 7)
-                       out_be32(base_addr, 0);
-
-               if (++cnt > period) {
-                       cnt = 0;
-                       /*
-                        * The hyperbolic function below modifies the heartbeat
-                        * period length in dependency of the current (5min)
-                        * load. It goes through the points f(0)=126, f(1)=86,
-                        * f(5)=51, f(inf)->30.
-                        */
-                       period = ((672 << FSHIFT) / (5 * avenrun[0] +
-                                               (7 << FSHIFT))) + 30;
-                       dist = period / 4;
-               }
-       }
-}
-
-void microblaze_setup_heartbeat(void)
-{
-       struct device_node *gpio = NULL;
-       int *prop;
-       int j;
-       const char * const gpio_list[] = {
-               "xlnx,xps-gpio-1.00.a",
-               NULL
-       };
-
-       for (j = 0; gpio_list[j] != NULL; j++) {
-               gpio = of_find_compatible_node(NULL, NULL, gpio_list[j]);
-               if (gpio)
-                       break;
-       }
-
-       if (gpio) {
-               base_addr = be32_to_cpup(of_get_property(gpio, "reg", NULL));
-               base_addr = (unsigned long) ioremap(base_addr, PAGE_SIZE);
-               pr_notice("Heartbeat GPIO at 0x%x\n", base_addr);
-
-               /* GPIO is configured as output */
-               prop = (int *) of_get_property(gpio, "xlnx,is-bidir", NULL);
-               if (prop)
-                       out_be32(base_addr + 4, 0);
-       }
-}
diff --git a/arch/microblaze/kernel/platform.c b/arch/microblaze/kernel/platform.c
deleted file mode 100644 (file)
index 2540d60..0000000
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * Copyright 2008 Michal Simek <monstr@monstr.eu>
- *
- * based on virtex.c file
- *
- * Copyright 2007 Secret Lab Technologies Ltd.
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
- */
-
-#include <linux/init.h>
-#include <linux/of_platform.h>
-#include <asm/setup.h>
-
-static struct of_device_id xilinx_of_bus_ids[] __initdata = {
-       { .compatible = "simple-bus", },
-       { .compatible = "xlnx,compound", },
-       {}
-};
-
-static int __init microblaze_device_probe(void)
-{
-       of_platform_bus_probe(NULL, xilinx_of_bus_ids, NULL);
-       of_platform_reset_gpio_probe();
-       return 0;
-}
-device_initcall(microblaze_device_probe);
index bab4c8330ef4f3f165ad2992d9660776fb0e3c41..fcbe1daf631662f8d45a580126f58d2090d90023 100644 (file)
@@ -18,7 +18,7 @@
 static int handle; /* reset pin handle */
 static unsigned int reset_val;
 
-void of_platform_reset_gpio_probe(void)
+static int of_platform_reset_gpio_probe(void)
 {
        int ret;
        handle = of_get_named_gpio(of_find_node_by_path("/"),
@@ -27,13 +27,13 @@ void of_platform_reset_gpio_probe(void)
        if (!gpio_is_valid(handle)) {
                pr_info("Skipping unavailable RESET gpio %d (%s)\n",
                                handle, "reset");
-               return;
+               return -ENODEV;
        }
 
        ret = gpio_request(handle, "reset");
        if (ret < 0) {
                pr_info("GPIO pin is already allocated\n");
-               return;
+               return ret;
        }
 
        /* get current setup value */
@@ -51,11 +51,12 @@ void of_platform_reset_gpio_probe(void)
 
        pr_info("RESET: Registered gpio device: %d, current val: %d\n",
                                                        handle, reset_val);
-       return;
+       return 0;
 err:
        gpio_free(handle);
-       return;
+       return ret;
 }
+device_initcall(of_platform_reset_gpio_probe);
 
 
 static void gpio_system_reset(void)
index 56bcf313121fb6bd31be14dc5d9f676a132cb85c..6ab6505937921e247231f8a09fbf5ab8b463d1a5 100644 (file)
@@ -400,3 +400,5 @@ ENTRY(sys_call_table)
        .long sys_pkey_alloc
        .long sys_pkey_free
        .long sys_statx
+       .long sys_io_pgetevents
+       .long sys_rseq
index 7de941cbbd940fb7ca72840e3bf6422993c6eb94..a6683484b3a12690c517ccf5803ac64f03f16c81 100644 (file)
@@ -156,9 +156,6 @@ static inline void timer_ack(void)
 static irqreturn_t timer_interrupt(int irq, void *dev_id)
 {
        struct clock_event_device *evt = &clockevent_xilinx_timer;
-#ifdef CONFIG_HEART_BEAT
-       microblaze_heartbeat();
-#endif
        timer_ack();
        evt->event_handler(evt);
        return IRQ_HANDLED;
@@ -318,10 +315,6 @@ static int __init xilinx_timer_init(struct device_node *timer)
                return ret;
        }
 
-#ifdef CONFIG_HEART_BEAT
-       microblaze_setup_heartbeat();
-#endif
-
        ret = xilinx_clocksource_init();
        if (ret)
                return ret;
index 3f9deec70b92383130b847ef3d9585db5134675e..642a56e2a1ea1be7611a56c3a3a76f96828614d3 100644 (file)
@@ -16,6 +16,7 @@ config MIPS
        select BUILDTIME_EXTABLE_SORT
        select CLONE_BACKWARDS
        select CPU_PM if CPU_IDLE
+       select DMA_DIRECT_OPS
        select GENERIC_ATOMIC64 if !64BIT
        select GENERIC_CLOCKEVENTS
        select GENERIC_CMOS_UPDATE
@@ -65,6 +66,7 @@ config MIPS
        select HAVE_OPROFILE
        select HAVE_PERF_EVENTS
        select HAVE_REGS_AND_STACK_ACCESS_API
+       select HAVE_RSEQ
        select HAVE_STACKPROTECTOR
        select HAVE_SYSCALL_TRACEPOINTS
        select HAVE_VIRT_CPU_ACCOUNTING_GEN if 64BIT || !SMP
@@ -96,6 +98,7 @@ config MIPS_GENERIC
        select HW_HAS_PCI
        select IRQ_MIPS_CPU
        select LIBFDT
+       select MIPS_AUTO_PFN_OFFSET
        select MIPS_CPU_SCACHE
        select MIPS_GIC
        select MIPS_L1_CACHE_SHIFT_7
@@ -192,6 +195,7 @@ config ATH79
        select CSRC_R4K
        select DMA_NONCOHERENT
        select GPIOLIB
+       select PINCTRL
        select HAVE_CLK
        select COMMON_CLK
        select CLKDEV_LOOKUP
@@ -210,6 +214,8 @@ config ATH79
 
 config BMIPS_GENERIC
        bool "Broadcom Generic BMIPS kernel"
+       select ARCH_HAS_SYNC_DMA_FOR_CPU_ALL
+       select ARCH_HAS_PHYS_TO_DMA
        select BOOT_RAW
        select NO_EXCEPT_FILL
        select USE_OF
@@ -437,7 +443,6 @@ config MACH_LOONGSON32
 
 config MACH_LOONGSON64
        bool "Loongson-2/3 family of machines"
-       select ARCH_HAS_PHYS_TO_DMA
        select SYS_SUPPORTS_ZBOOT
        help
          This enables the support of Loongson-2/3 family of machines.
@@ -661,11 +666,11 @@ config SGI_IP22
 
 config SGI_IP27
        bool "SGI IP27 (Origin200/2000)"
+       select ARCH_HAS_PHYS_TO_DMA
        select FW_ARC
        select FW_ARC64
        select BOOT_ELF64
        select DEFAULT_SGI_PARTITION
-       select DMA_COHERENT
        select SYS_HAS_EARLY_PRINTK
        select HW_HAS_PCI
        select NR_CPUS_DEFAULT_64
@@ -720,6 +725,7 @@ config SGI_IP28
 
 config SGI_IP32
        bool "SGI IP32 (O2)"
+       select ARCH_HAS_PHYS_TO_DMA
        select FW_ARC
        select FW_ARC32
        select BOOT_ELF32
@@ -742,7 +748,6 @@ config SGI_IP32
 config SIBYTE_CRHINE
        bool "Sibyte BCM91120C-CRhine"
        select BOOT_ELF32
-       select DMA_COHERENT
        select SIBYTE_BCM1120
        select SWAP_IO_SPACE
        select SYS_HAS_CPU_SB1
@@ -752,7 +757,6 @@ config SIBYTE_CRHINE
 config SIBYTE_CARMEL
        bool "Sibyte BCM91120x-Carmel"
        select BOOT_ELF32
-       select DMA_COHERENT
        select SIBYTE_BCM1120
        select SWAP_IO_SPACE
        select SYS_HAS_CPU_SB1
@@ -762,7 +766,6 @@ config SIBYTE_CARMEL
 config SIBYTE_CRHONE
        bool "Sibyte BCM91125C-CRhone"
        select BOOT_ELF32
-       select DMA_COHERENT
        select SIBYTE_BCM1125
        select SWAP_IO_SPACE
        select SYS_HAS_CPU_SB1
@@ -773,7 +776,6 @@ config SIBYTE_CRHONE
 config SIBYTE_RHONE
        bool "Sibyte BCM91125E-Rhone"
        select BOOT_ELF32
-       select DMA_COHERENT
        select SIBYTE_BCM1125H
        select SWAP_IO_SPACE
        select SYS_HAS_CPU_SB1
@@ -783,7 +785,6 @@ config SIBYTE_RHONE
 config SIBYTE_SWARM
        bool "Sibyte BCM91250A-SWARM"
        select BOOT_ELF32
-       select DMA_COHERENT
        select HAVE_PATA_PLATFORM
        select SIBYTE_SB1250
        select SWAP_IO_SPACE
@@ -796,7 +797,6 @@ config SIBYTE_SWARM
 config SIBYTE_LITTLESUR
        bool "Sibyte BCM91250C2-LittleSur"
        select BOOT_ELF32
-       select DMA_COHERENT
        select HAVE_PATA_PLATFORM
        select SIBYTE_SB1250
        select SWAP_IO_SPACE
@@ -808,7 +808,6 @@ config SIBYTE_LITTLESUR
 config SIBYTE_SENTOSA
        bool "Sibyte BCM91250E-Sentosa"
        select BOOT_ELF32
-       select DMA_COHERENT
        select SIBYTE_SB1250
        select SWAP_IO_SPACE
        select SYS_HAS_CPU_SB1
@@ -818,7 +817,6 @@ config SIBYTE_SENTOSA
 config SIBYTE_BIGSUR
        bool "Sibyte BCM91480B-BigSur"
        select BOOT_ELF32
-       select DMA_COHERENT
        select NR_CPUS_DEFAULT_4
        select SIBYTE_BCM1x80
        select SWAP_IO_SPACE
@@ -894,8 +892,8 @@ config CAVIUM_OCTEON_SOC
        bool "Cavium Networks Octeon SoC based boards"
        select CEVT_R4K
        select ARCH_HAS_PHYS_TO_DMA
+       select HAS_RAPIDIO
        select PHYS_ADDR_T_64BIT
-       select DMA_COHERENT
        select SYS_SUPPORTS_64BIT_KERNEL
        select SYS_SUPPORTS_BIG_ENDIAN
        select EDAC_SUPPORT
@@ -944,7 +942,6 @@ config NLM_XLR_BOARD
        select PHYS_ADDR_T_64BIT
        select SYS_SUPPORTS_BIG_ENDIAN
        select SYS_SUPPORTS_HIGHMEM
-       select DMA_COHERENT
        select NR_CPUS_DEFAULT_32
        select CEVT_R4K
        select CSRC_R4K
@@ -972,7 +969,6 @@ config NLM_XLP_BOARD
        select SYS_SUPPORTS_BIG_ENDIAN
        select SYS_SUPPORTS_LITTLE_ENDIAN
        select SYS_SUPPORTS_HIGHMEM
-       select DMA_COHERENT
        select NR_CPUS_DEFAULT_32
        select CEVT_R4K
        select CSRC_R4K
@@ -991,7 +987,6 @@ config MIPS_PARAVIRT
        bool "Para-Virtualized guest system"
        select CEVT_R4K
        select CSRC_R4K
-       select DMA_COHERENT
        select SYS_SUPPORTS_64BIT_KERNEL
        select SYS_SUPPORTS_32BIT_KERNEL
        select SYS_SUPPORTS_BIG_ENDIAN
@@ -1117,12 +1112,14 @@ config DMA_PERDEV_COHERENT
        bool
        select DMA_MAYBE_COHERENT
 
-config DMA_COHERENT
-       bool
-
 config DMA_NONCOHERENT
        bool
+       select ARCH_HAS_SYNC_DMA_FOR_DEVICE
+       select ARCH_HAS_SYNC_DMA_FOR_CPU
        select NEED_DMA_MAP_STATE
+       select DMA_NONCOHERENT_MMAP
+       select DMA_NONCOHERENT_CACHE_SYNC
+       select DMA_NONCOHERENT_OPS
 
 config SYS_HAS_EARLY_PRINTK
        bool
@@ -1364,6 +1361,7 @@ choice
 config CPU_LOONGSON3
        bool "Loongson 3 CPU"
        depends on SYS_HAS_CPU_LOONGSON3
+       select ARCH_HAS_PHYS_TO_DMA
        select CPU_SUPPORTS_64BIT_KERNEL
        select CPU_SUPPORTS_HIGHMEM
        select CPU_SUPPORTS_HUGEPAGES
@@ -1426,7 +1424,8 @@ config CPU_LOONGSON1B
        select LEDS_GPIO_REGISTER
        help
          The Loongson 1B is a 32-bit SoC, which implements the MIPS32
-         release 2 instruction set.
+         Release 1 instruction set and part of the MIPS32 Release 2
+         instruction set.
 
 config CPU_LOONGSON1C
        bool "Loongson 1C"
@@ -1435,7 +1434,8 @@ config CPU_LOONGSON1C
        select LEDS_GPIO_REGISTER
        help
          The Loongson 1C is a 32-bit SoC, which implements the MIPS32
-         release 2 instruction set.
+         Release 1 instruction set and part of the MIPS32 Release 2
+         instruction set.
 
 config CPU_MIPS32_R1
        bool "MIPS32 Release 1"
@@ -1830,11 +1830,12 @@ config CPU_LOONGSON2
        select CPU_SUPPORTS_64BIT_KERNEL
        select CPU_SUPPORTS_HIGHMEM
        select CPU_SUPPORTS_HUGEPAGES
+       select ARCH_HAS_PHYS_TO_DMA
 
 config CPU_LOONGSON1
        bool
        select CPU_MIPS32
-       select CPU_MIPSR2
+       select CPU_MIPSR1
        select CPU_HAS_PREFETCH
        select CPU_SUPPORTS_32BIT_KERNEL
        select CPU_SUPPORTS_HIGHMEM
@@ -1978,12 +1979,6 @@ config SYS_HAS_CPU_XLR
 config SYS_HAS_CPU_XLP
        bool
 
-config MIPS_MALTA_PM
-       depends on MIPS_MALTA
-       depends on PCI
-       bool
-       default y
-
 #
 # CPU may reorder R->R, R->W, W->R, W->W
 # Reordering beyond LL and SC is handled in WEAK_REORDERING_BEYOND_LLSC
@@ -2993,6 +2988,9 @@ config PGTABLE_LEVELS
        default 3 if 64BIT && !PAGE_SIZE_64KB
        default 2
 
+config MIPS_AUTO_PFN_OFFSET
+       bool
+
 source "init/Kconfig"
 
 source "kernel/Kconfig.freezer"
@@ -3114,10 +3112,13 @@ config ZONE_DMA32
 
 source "drivers/pcmcia/Kconfig"
 
+config HAS_RAPIDIO
+       bool
+       default n
+
 config RAPIDIO
        tristate "RapidIO support"
-       depends on PCI
-       default n
+       depends on HAS_RAPIDIO || PCI
        help
          If you say Y here, the kernel will include drivers and
          infrastructure code to support RapidIO interconnect devices.
index e2122cca4ae2ea3a5e383411a872848e1fcc56a7..5425df002a6b11965b9c22273a1b20ba128afbd6 100644 (file)
@@ -122,12 +122,22 @@ cflags-y += -ffreestanding
 # are used, so we kludge that here.  A bug has been filed at
 # http://gcc.gnu.org/bugzilla/show_bug.cgi?id=29413.
 #
+# clang doesn't suffer from these issues and our checks against -dumpmachine
+# don't work so well when cross compiling, since without providing --target
+# clang's output will be based upon the build machine. So for clang we simply
+# unconditionally specify -EB or -EL as appropriate.
+#
+ifeq ($(cc-name),clang)
+cflags-$(CONFIG_CPU_BIG_ENDIAN)                += -EB
+cflags-$(CONFIG_CPU_LITTLE_ENDIAN)     += -EL
+else
 undef-all += -UMIPSEB -U_MIPSEB -U__MIPSEB -U__MIPSEB__
 undef-all += -UMIPSEL -U_MIPSEL -U__MIPSEL -U__MIPSEL__
 predef-be += -DMIPSEB -D_MIPSEB -D__MIPSEB -D__MIPSEB__
 predef-le += -DMIPSEL -D_MIPSEL -D__MIPSEL -D__MIPSEL__
 cflags-$(CONFIG_CPU_BIG_ENDIAN)                += $(shell $(CC) -dumpmachine |grep -q 'mips.*el-.*' && echo -EB $(undef-all) $(predef-be))
 cflags-$(CONFIG_CPU_LITTLE_ENDIAN)     += $(shell $(CC) -dumpmachine |grep -q 'mips.*el-.*' || echo -EL $(undef-all) $(predef-le))
+endif
 
 cflags-$(CONFIG_SB1XXX_CORELIS)        += $(call cc-option,-mno-sched-prolog) \
                                   -fno-omit-frame-pointer
@@ -155,15 +165,11 @@ cflags-$(CONFIG_CPU_R4300)        += -march=r4300 -Wa,--trap
 cflags-$(CONFIG_CPU_VR41XX)    += -march=r4100 -Wa,--trap
 cflags-$(CONFIG_CPU_R4X00)     += -march=r4600 -Wa,--trap
 cflags-$(CONFIG_CPU_TX49XX)    += -march=r4600 -Wa,--trap
-cflags-$(CONFIG_CPU_MIPS32_R1) += $(call cc-option,-march=mips32,-mips32 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS32) \
-                       -Wa,-mips32 -Wa,--trap
-cflags-$(CONFIG_CPU_MIPS32_R2) += $(call cc-option,-march=mips32r2,-mips32r2 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS32) \
-                       -Wa,-mips32r2 -Wa,--trap
+cflags-$(CONFIG_CPU_MIPS32_R1) += -march=mips32 -Wa,--trap
+cflags-$(CONFIG_CPU_MIPS32_R2) += -march=mips32r2 -Wa,--trap
 cflags-$(CONFIG_CPU_MIPS32_R6) += -march=mips32r6 -Wa,--trap -modd-spreg
-cflags-$(CONFIG_CPU_MIPS64_R1) += $(call cc-option,-march=mips64,-mips64 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS64) \
-                       -Wa,-mips64 -Wa,--trap
-cflags-$(CONFIG_CPU_MIPS64_R2) += $(call cc-option,-march=mips64r2,-mips64r2 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS64) \
-                       -Wa,-mips64r2 -Wa,--trap
+cflags-$(CONFIG_CPU_MIPS64_R1) += -march=mips64 -Wa,--trap
+cflags-$(CONFIG_CPU_MIPS64_R2) += -march=mips64r2 -Wa,--trap
 cflags-$(CONFIG_CPU_MIPS64_R6) += -march=mips64r6 -Wa,--trap
 cflags-$(CONFIG_CPU_R5000)     += -march=r5000 -Wa,--trap
 cflags-$(CONFIG_CPU_R5432)     += $(call cc-option,-march=r5400,-march=r5000) \
index fa75d75b5ba9177f669e6f61f6395a6db28563d0..ddff9a02513d57e8b86e1c862cac6810ee7976b7 100644 (file)
@@ -34,6 +34,7 @@
 #include <asm/bootinfo.h>
 #include <asm/idle.h>
 #include <asm/reboot.h>
+#include <asm/setup.h>
 #include <asm/mach-au1x00/au1000.h>
 #include <asm/mach-au1x00/gpio-au1000.h>
 #include <prom.h>
@@ -60,7 +61,7 @@ void __init prom_init(void)
        add_memory_region(0, memsize, BOOT_MEM_RAM);
 }
 
-void prom_putchar(unsigned char c)
+void prom_putchar(char c)
 {
        alchemy_uart_putchar(AU1000_UART0_PHYS_ADDR, c);
 }
index aab55aaf3d62a10c5e9882f4fe56f6a32c9d2a39..d625e6f99ae78eefa8c3d15e105134d13f6152c9 100644 (file)
@@ -31,6 +31,7 @@
 #include <mtd/mtd-abi.h>
 #include <asm/bootinfo.h>
 #include <asm/reboot.h>
+#include <asm/setup.h>
 #include <asm/mach-au1x00/au1000.h>
 #include <asm/mach-au1x00/gpio-au1000.h>
 #include <asm/mach-au1x00/au1xxx_eth.h>
@@ -58,7 +59,7 @@ void __init prom_init(void)
        add_memory_region(0, memsize, BOOT_MEM_RAM);
 }
 
-void prom_putchar(unsigned char c)
+void prom_putchar(char c)
 {
        alchemy_uart_putchar(AU1000_UART0_PHYS_ADDR, c);
 }
index 0fc53e08a894c94671d2b77d3af963b4ef4bef7f..5f05b8714385d062c8d744b3f324b6169751738a 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/pm.h>
 #include <asm/bootinfo.h>
 #include <asm/reboot.h>
+#include <asm/setup.h>
 #include <asm/mach-au1x00/au1000.h>
 #include <prom.h>
 
@@ -55,7 +56,7 @@ void __init prom_init(void)
        add_memory_region(0, memsize, BOOT_MEM_RAM);
 }
 
-void prom_putchar(unsigned char c)
+void prom_putchar(char c)
 {
        alchemy_uart_putchar(AU1000_UART0_PHYS_ADDR, c);
 }
index 203854ddd1bb34df9ff90eae917c72515524aa41..8d4b65c3268a5eef08f8fe10518af69468bc2b00 100644 (file)
@@ -14,6 +14,7 @@
 #include <asm/bootinfo.h>
 #include <asm/idle.h>
 #include <asm/reboot.h>
+#include <asm/setup.h>
 #include <asm/mach-au1x00/au1000.h>
 #include <asm/mach-db1x00/bcsr.h>
 
@@ -36,7 +37,7 @@ void __init prom_init(void)
        add_memory_region(0, memsize, BOOT_MEM_RAM);
 }
 
-void prom_putchar(unsigned char c)
+void prom_putchar(char c)
 {
        if (alchemy_get_cputype() == ALCHEMY_CPU_AU1300)
                alchemy_uart_putchar(AU1300_UART2_PHYS_ADDR, c);
index 0137656107a9c5b58c2dcdcaa6bac2132fb1957d..6b64fd96dba8fb2625c62d3f59ed96e6207b3ff0 100644 (file)
@@ -476,3 +476,32 @@ void __init ar7_init_clocks(void)
        /* adjust vbus clock rate */
        vbus_clk.rate = bus_clk.rate / 2;
 }
+
+/* dummy functions, should not be called */
+long clk_round_rate(struct clk *clk, unsigned long rate)
+{
+       WARN_ON(clk);
+       return 0;
+}
+EXPORT_SYMBOL(clk_round_rate);
+
+int clk_set_rate(struct clk *clk, unsigned long rate)
+{
+       WARN_ON(clk);
+       return 0;
+}
+EXPORT_SYMBOL(clk_set_rate);
+
+int clk_set_parent(struct clk *clk, struct clk *parent)
+{
+       WARN_ON(clk);
+       return 0;
+}
+EXPORT_SYMBOL(clk_set_parent);
+
+struct clk *clk_get_parent(struct clk *clk)
+{
+       WARN_ON(clk);
+       return NULL;
+}
+EXPORT_SYMBOL(clk_get_parent);
index dd53987a690ffdc12c5f87fd8ebade8d156faab5..2ec8d9ac91ec146107165de974fd095a11e7c5ab 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/string.h>
 #include <linux/io.h>
 #include <asm/bootinfo.h>
+#include <asm/setup.h>
 
 #include <asm/mach-ar7/ar7.h>
 #include <asm/mach-ar7/prom.h>
@@ -259,10 +260,9 @@ static inline void serial_out(int offset, int value)
        writel(value, (void *)PORT(offset));
 }
 
-int prom_putchar(char c)
+void prom_putchar(char c)
 {
        while ((serial_in(UART_LSR) & UART_LSR_TEMT) == 0)
                ;
        serial_out(UART_TX, c);
-       return 1;
 }
index 7070b4bcd01dd566d275e600da70f24e08c2140c..2c1dfd06c366dfaddddd72aef453a610a350102f 100644 (file)
@@ -12,6 +12,7 @@ config SOC_AR2315
 config PCI_AR2315
        bool "Atheros AR2315 PCI controller support"
        depends on SOC_AR2315
+       select ARCH_HAS_PHYS_TO_DMA
        select HW_HAS_PCI
        select PCI
        default y
index 6d11ae581ea775bc2e919e16bdd63cdb6b06d86f..989e71015ee67ce30eafe26c6f76ab8955c42ebb 100644 (file)
@@ -146,10 +146,10 @@ int __init ath25_find_config(phys_addr_t base, unsigned long size)
                        pr_info("Fixing up empty mac addresses\n");
                        config->reset_config_gpio = 0xffff;
                        config->sys_led_gpio = 0xffff;
-                       random_ether_addr(config->wlan0_mac);
+                       eth_random_addr(config->wlan0_mac);
                        config->wlan0_mac[0] &= ~0x06;
-                       random_ether_addr(config->enet0_mac);
-                       random_ether_addr(config->enet1_mac);
+                       eth_random_addr(config->enet0_mac);
+                       eth_random_addr(config->enet1_mac);
                }
        }
 
index 36035b628161cc8bb07a52bf5b14ca3dd40d43fa..d534761e9cdaafaf07a4686a4b105e72c22a3a73 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/mm.h>
 #include <linux/io.h>
 #include <linux/serial_reg.h>
+#include <asm/setup.h>
 
 #include "devices.h"
 #include "ar2315_regs.h"
@@ -25,7 +26,7 @@ static inline unsigned char prom_uart_rr(void __iomem *base, unsigned reg)
        return __raw_readl(base + 4 * reg);
 }
 
-void prom_putchar(unsigned char ch)
+void prom_putchar(char ch)
 {
        static void __iomem *base;
 
@@ -38,7 +39,7 @@ void prom_putchar(unsigned char ch)
 
        while ((prom_uart_rr(base, UART_LSR) & UART_LSR_THRE) == 0)
                ;
-       prom_uart_wr(base, UART_TX, ch);
+       prom_uart_wr(base, UART_TX, (unsigned char)ch);
        while ((prom_uart_rr(base, UART_LSR) & UART_LSR_THRE) == 0)
                ;
 }
index 6b1000b6a6a6717a2e18032a644feef99666a9b4..cf9158e3c2d94b4ab472f05b5c9b5735b833ef65 100644 (file)
@@ -355,6 +355,91 @@ static void __init ar934x_clocks_init(void)
        iounmap(dpll_base);
 }
 
+static void __init qca953x_clocks_init(void)
+{
+       unsigned long ref_rate;
+       unsigned long cpu_rate;
+       unsigned long ddr_rate;
+       unsigned long ahb_rate;
+       u32 pll, out_div, ref_div, nint, frac, clk_ctrl, postdiv;
+       u32 cpu_pll, ddr_pll;
+       u32 bootstrap;
+
+       bootstrap = ath79_reset_rr(QCA953X_RESET_REG_BOOTSTRAP);
+       if (bootstrap & QCA953X_BOOTSTRAP_REF_CLK_40)
+               ref_rate = 40 * 1000 * 1000;
+       else
+               ref_rate = 25 * 1000 * 1000;
+
+       pll = ath79_pll_rr(QCA953X_PLL_CPU_CONFIG_REG);
+       out_div = (pll >> QCA953X_PLL_CPU_CONFIG_OUTDIV_SHIFT) &
+                 QCA953X_PLL_CPU_CONFIG_OUTDIV_MASK;
+       ref_div = (pll >> QCA953X_PLL_CPU_CONFIG_REFDIV_SHIFT) &
+                 QCA953X_PLL_CPU_CONFIG_REFDIV_MASK;
+       nint = (pll >> QCA953X_PLL_CPU_CONFIG_NINT_SHIFT) &
+              QCA953X_PLL_CPU_CONFIG_NINT_MASK;
+       frac = (pll >> QCA953X_PLL_CPU_CONFIG_NFRAC_SHIFT) &
+              QCA953X_PLL_CPU_CONFIG_NFRAC_MASK;
+
+       cpu_pll = nint * ref_rate / ref_div;
+       cpu_pll += frac * (ref_rate >> 6) / ref_div;
+       cpu_pll /= (1 << out_div);
+
+       pll = ath79_pll_rr(QCA953X_PLL_DDR_CONFIG_REG);
+       out_div = (pll >> QCA953X_PLL_DDR_CONFIG_OUTDIV_SHIFT) &
+                 QCA953X_PLL_DDR_CONFIG_OUTDIV_MASK;
+       ref_div = (pll >> QCA953X_PLL_DDR_CONFIG_REFDIV_SHIFT) &
+                 QCA953X_PLL_DDR_CONFIG_REFDIV_MASK;
+       nint = (pll >> QCA953X_PLL_DDR_CONFIG_NINT_SHIFT) &
+              QCA953X_PLL_DDR_CONFIG_NINT_MASK;
+       frac = (pll >> QCA953X_PLL_DDR_CONFIG_NFRAC_SHIFT) &
+              QCA953X_PLL_DDR_CONFIG_NFRAC_MASK;
+
+       ddr_pll = nint * ref_rate / ref_div;
+       ddr_pll += frac * (ref_rate >> 6) / (ref_div << 4);
+       ddr_pll /= (1 << out_div);
+
+       clk_ctrl = ath79_pll_rr(QCA953X_PLL_CLK_CTRL_REG);
+
+       postdiv = (clk_ctrl >> QCA953X_PLL_CLK_CTRL_CPU_POST_DIV_SHIFT) &
+                 QCA953X_PLL_CLK_CTRL_CPU_POST_DIV_MASK;
+
+       if (clk_ctrl & QCA953X_PLL_CLK_CTRL_CPU_PLL_BYPASS)
+               cpu_rate = ref_rate;
+       else if (clk_ctrl & QCA953X_PLL_CLK_CTRL_CPUCLK_FROM_CPUPLL)
+               cpu_rate = cpu_pll / (postdiv + 1);
+       else
+               cpu_rate = ddr_pll / (postdiv + 1);
+
+       postdiv = (clk_ctrl >> QCA953X_PLL_CLK_CTRL_DDR_POST_DIV_SHIFT) &
+                 QCA953X_PLL_CLK_CTRL_DDR_POST_DIV_MASK;
+
+       if (clk_ctrl & QCA953X_PLL_CLK_CTRL_DDR_PLL_BYPASS)
+               ddr_rate = ref_rate;
+       else if (clk_ctrl & QCA953X_PLL_CLK_CTRL_DDRCLK_FROM_DDRPLL)
+               ddr_rate = ddr_pll / (postdiv + 1);
+       else
+               ddr_rate = cpu_pll / (postdiv + 1);
+
+       postdiv = (clk_ctrl >> QCA953X_PLL_CLK_CTRL_AHB_POST_DIV_SHIFT) &
+                 QCA953X_PLL_CLK_CTRL_AHB_POST_DIV_MASK;
+
+       if (clk_ctrl & QCA953X_PLL_CLK_CTRL_AHB_PLL_BYPASS)
+               ahb_rate = ref_rate;
+       else if (clk_ctrl & QCA953X_PLL_CLK_CTRL_AHBCLK_FROM_DDRPLL)
+               ahb_rate = ddr_pll / (postdiv + 1);
+       else
+               ahb_rate = cpu_pll / (postdiv + 1);
+
+       ath79_add_sys_clkdev("ref", ref_rate);
+       ath79_add_sys_clkdev("cpu", cpu_rate);
+       ath79_add_sys_clkdev("ddr", ddr_rate);
+       ath79_add_sys_clkdev("ahb", ahb_rate);
+
+       clk_add_alias("wdt", NULL, "ref", NULL);
+       clk_add_alias("uart", NULL, "ref", NULL);
+}
+
 static void __init qca955x_clocks_init(void)
 {
        unsigned long ref_rate;
@@ -440,6 +525,110 @@ static void __init qca955x_clocks_init(void)
        clk_add_alias("uart", NULL, "ref", NULL);
 }
 
+static void __init qca956x_clocks_init(void)
+{
+       unsigned long ref_rate;
+       unsigned long cpu_rate;
+       unsigned long ddr_rate;
+       unsigned long ahb_rate;
+       u32 pll, out_div, ref_div, nint, hfrac, lfrac, clk_ctrl, postdiv;
+       u32 cpu_pll, ddr_pll;
+       u32 bootstrap;
+
+       /*
+        * QCA956x timer init workaround has to be applied right before setting
+        * up the clock. Else, there will be no jiffies
+        */
+       u32 misc;
+
+       misc = ath79_reset_rr(AR71XX_RESET_REG_MISC_INT_ENABLE);
+       misc |= MISC_INT_MIPS_SI_TIMERINT_MASK;
+       ath79_reset_wr(AR71XX_RESET_REG_MISC_INT_ENABLE, misc);
+
+       bootstrap = ath79_reset_rr(QCA956X_RESET_REG_BOOTSTRAP);
+       if (bootstrap & QCA956X_BOOTSTRAP_REF_CLK_40)
+               ref_rate = 40 * 1000 * 1000;
+       else
+               ref_rate = 25 * 1000 * 1000;
+
+       pll = ath79_pll_rr(QCA956X_PLL_CPU_CONFIG_REG);
+       out_div = (pll >> QCA956X_PLL_CPU_CONFIG_OUTDIV_SHIFT) &
+                 QCA956X_PLL_CPU_CONFIG_OUTDIV_MASK;
+       ref_div = (pll >> QCA956X_PLL_CPU_CONFIG_REFDIV_SHIFT) &
+                 QCA956X_PLL_CPU_CONFIG_REFDIV_MASK;
+
+       pll = ath79_pll_rr(QCA956X_PLL_CPU_CONFIG1_REG);
+       nint = (pll >> QCA956X_PLL_CPU_CONFIG1_NINT_SHIFT) &
+              QCA956X_PLL_CPU_CONFIG1_NINT_MASK;
+       hfrac = (pll >> QCA956X_PLL_CPU_CONFIG1_NFRAC_H_SHIFT) &
+              QCA956X_PLL_CPU_CONFIG1_NFRAC_H_MASK;
+       lfrac = (pll >> QCA956X_PLL_CPU_CONFIG1_NFRAC_L_SHIFT) &
+              QCA956X_PLL_CPU_CONFIG1_NFRAC_L_MASK;
+
+       cpu_pll = nint * ref_rate / ref_div;
+       cpu_pll += (lfrac * ref_rate) / ((ref_div * 25) << 13);
+       cpu_pll += (hfrac >> 13) * ref_rate / ref_div;
+       cpu_pll /= (1 << out_div);
+
+       pll = ath79_pll_rr(QCA956X_PLL_DDR_CONFIG_REG);
+       out_div = (pll >> QCA956X_PLL_DDR_CONFIG_OUTDIV_SHIFT) &
+                 QCA956X_PLL_DDR_CONFIG_OUTDIV_MASK;
+       ref_div = (pll >> QCA956X_PLL_DDR_CONFIG_REFDIV_SHIFT) &
+                 QCA956X_PLL_DDR_CONFIG_REFDIV_MASK;
+       pll = ath79_pll_rr(QCA956X_PLL_DDR_CONFIG1_REG);
+       nint = (pll >> QCA956X_PLL_DDR_CONFIG1_NINT_SHIFT) &
+              QCA956X_PLL_DDR_CONFIG1_NINT_MASK;
+       hfrac = (pll >> QCA956X_PLL_DDR_CONFIG1_NFRAC_H_SHIFT) &
+              QCA956X_PLL_DDR_CONFIG1_NFRAC_H_MASK;
+       lfrac = (pll >> QCA956X_PLL_DDR_CONFIG1_NFRAC_L_SHIFT) &
+              QCA956X_PLL_DDR_CONFIG1_NFRAC_L_MASK;
+
+       ddr_pll = nint * ref_rate / ref_div;
+       ddr_pll += (lfrac * ref_rate) / ((ref_div * 25) << 13);
+       ddr_pll += (hfrac >> 13) * ref_rate / ref_div;
+       ddr_pll /= (1 << out_div);
+
+       clk_ctrl = ath79_pll_rr(QCA956X_PLL_CLK_CTRL_REG);
+
+       postdiv = (clk_ctrl >> QCA956X_PLL_CLK_CTRL_CPU_POST_DIV_SHIFT) &
+                 QCA956X_PLL_CLK_CTRL_CPU_POST_DIV_MASK;
+
+       if (clk_ctrl & QCA956X_PLL_CLK_CTRL_CPU_PLL_BYPASS)
+               cpu_rate = ref_rate;
+       else if (clk_ctrl & QCA956X_PLL_CLK_CTRL_CPU_DDRCLK_FROM_CPUPLL)
+               cpu_rate = ddr_pll / (postdiv + 1);
+       else
+               cpu_rate = cpu_pll / (postdiv + 1);
+
+       postdiv = (clk_ctrl >> QCA956X_PLL_CLK_CTRL_DDR_POST_DIV_SHIFT) &
+                 QCA956X_PLL_CLK_CTRL_DDR_POST_DIV_MASK;
+
+       if (clk_ctrl & QCA956X_PLL_CLK_CTRL_DDR_PLL_BYPASS)
+               ddr_rate = ref_rate;
+       else if (clk_ctrl & QCA956X_PLL_CLK_CTRL_CPU_DDRCLK_FROM_DDRPLL)
+               ddr_rate = cpu_pll / (postdiv + 1);
+       else
+               ddr_rate = ddr_pll / (postdiv + 1);
+
+       postdiv = (clk_ctrl >> QCA956X_PLL_CLK_CTRL_AHB_POST_DIV_SHIFT) &
+                 QCA956X_PLL_CLK_CTRL_AHB_POST_DIV_MASK;
+
+       if (clk_ctrl & QCA956X_PLL_CLK_CTRL_AHB_PLL_BYPASS)
+               ahb_rate = ref_rate;
+       else if (clk_ctrl & QCA956X_PLL_CLK_CTRL_AHBCLK_FROM_DDRPLL)
+               ahb_rate = ddr_pll / (postdiv + 1);
+       else
+               ahb_rate = cpu_pll / (postdiv + 1);
+
+       ath79_add_sys_clkdev("ref", ref_rate);
+       ath79_add_sys_clkdev("cpu", cpu_rate);
+       ath79_add_sys_clkdev("ddr", ddr_rate);
+       ath79_add_sys_clkdev("ahb", ahb_rate);
+
+       clk_add_alias("wdt", NULL, "ref", NULL);
+       clk_add_alias("uart", NULL, "ref", NULL);
+}
+
 void __init ath79_clocks_init(void)
 {
        if (soc_is_ar71xx())
@@ -450,8 +639,12 @@ void __init ath79_clocks_init(void)
                ar933x_clocks_init();
        else if (soc_is_ar934x())
                ar934x_clocks_init();
+       else if (soc_is_qca953x())
+               qca953x_clocks_init();
        else if (soc_is_qca955x())
                qca955x_clocks_init();
+       else if (soc_is_qca956x() || soc_is_tp9343())
+               qca956x_clocks_init();
        else
                BUG();
 }
index 10a405d593df3b5c64fa84ce9ae27eaa7ba222df..cd6055f9e7a0b8a03450f8f8bd27a24d0504056d 100644 (file)
@@ -58,7 +58,7 @@ EXPORT_SYMBOL_GPL(ath79_ddr_ctrl_init);
 
 void ath79_ddr_wb_flush(u32 reg)
 {
-       void __iomem *flush_reg = ath79_ddr_wb_flush_base + reg;
+       void __iomem *flush_reg = ath79_ddr_wb_flush_base + (reg * 4);
 
        /* Flush the DDR write buffer. */
        __raw_writel(0x1, flush_reg);
@@ -103,8 +103,12 @@ void ath79_device_reset_set(u32 mask)
                reg = AR933X_RESET_REG_RESET_MODULE;
        else if (soc_is_ar934x())
                reg = AR934X_RESET_REG_RESET_MODULE;
+       else if (soc_is_qca953x())
+               reg = QCA953X_RESET_REG_RESET_MODULE;
        else if (soc_is_qca955x())
                reg = QCA955X_RESET_REG_RESET_MODULE;
+       else if (soc_is_qca956x() || soc_is_tp9343())
+               reg = QCA956X_RESET_REG_RESET_MODULE;
        else
                BUG();
 
@@ -131,8 +135,12 @@ void ath79_device_reset_clear(u32 mask)
                reg = AR933X_RESET_REG_RESET_MODULE;
        else if (soc_is_ar934x())
                reg = AR934X_RESET_REG_RESET_MODULE;
+       else if (soc_is_qca953x())
+               reg = QCA953X_RESET_REG_RESET_MODULE;
        else if (soc_is_qca955x())
                reg = QCA955X_RESET_REG_RESET_MODULE;
+       else if (soc_is_qca956x() || soc_is_tp9343())
+               reg = QCA956X_RESET_REG_RESET_MODULE;
        else
                BUG();
 
index d1adc59af5bfeb251f436dfa4f029145152875b5..4b1063117ef72451135ff9ebb67d93e324eae0de 100644 (file)
 #include <linux/errno.h>
 #include <linux/serial_reg.h>
 #include <asm/addrspace.h>
+#include <asm/setup.h>
 
 #include <asm/mach-ath79/ath79.h>
 #include <asm/mach-ath79/ar71xx_regs.h>
 #include <asm/mach-ath79/ar933x_uart.h>
 
-static void (*_prom_putchar) (unsigned char);
+static void (*_prom_putchar)(char);
 
 static inline void prom_putchar_wait(void __iomem *reg, u32 mask, u32 val)
 {
@@ -33,31 +34,72 @@ static inline void prom_putchar_wait(void __iomem *reg, u32 mask, u32 val)
 
 #define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
 
-static void prom_putchar_ar71xx(unsigned char ch)
+static void prom_putchar_ar71xx(char ch)
 {
        void __iomem *base = (void __iomem *)(KSEG1ADDR(AR71XX_UART_BASE));
 
        prom_putchar_wait(base + UART_LSR * 4, BOTH_EMPTY, BOTH_EMPTY);
-       __raw_writel(ch, base + UART_TX * 4);
+       __raw_writel((unsigned char)ch, base + UART_TX * 4);
        prom_putchar_wait(base + UART_LSR * 4, BOTH_EMPTY, BOTH_EMPTY);
 }
 
-static void prom_putchar_ar933x(unsigned char ch)
+static void prom_putchar_ar933x(char ch)
 {
        void __iomem *base = (void __iomem *)(KSEG1ADDR(AR933X_UART_BASE));
 
        prom_putchar_wait(base + AR933X_UART_DATA_REG, AR933X_UART_DATA_TX_CSR,
                          AR933X_UART_DATA_TX_CSR);
-       __raw_writel(AR933X_UART_DATA_TX_CSR | ch, base + AR933X_UART_DATA_REG);
+       __raw_writel(AR933X_UART_DATA_TX_CSR | (unsigned char)ch,
+                    base + AR933X_UART_DATA_REG);
        prom_putchar_wait(base + AR933X_UART_DATA_REG, AR933X_UART_DATA_TX_CSR,
                          AR933X_UART_DATA_TX_CSR);
 }
 
-static void prom_putchar_dummy(unsigned char ch)
+static void prom_putchar_dummy(char ch)
 {
        /* nothing to do */
 }
 
+static void prom_enable_uart(u32 id)
+{
+       void __iomem *gpio_base;
+       u32 uart_en;
+       u32 t;
+
+       switch (id) {
+       case REV_ID_MAJOR_AR71XX:
+               uart_en = AR71XX_GPIO_FUNC_UART_EN;
+               break;
+
+       case REV_ID_MAJOR_AR7240:
+       case REV_ID_MAJOR_AR7241:
+       case REV_ID_MAJOR_AR7242:
+               uart_en = AR724X_GPIO_FUNC_UART_EN;
+               break;
+
+       case REV_ID_MAJOR_AR913X:
+               uart_en = AR913X_GPIO_FUNC_UART_EN;
+               break;
+
+       case REV_ID_MAJOR_AR9330:
+       case REV_ID_MAJOR_AR9331:
+               uart_en = AR933X_GPIO_FUNC_UART_EN;
+               break;
+
+       case REV_ID_MAJOR_AR9341:
+       case REV_ID_MAJOR_AR9342:
+       case REV_ID_MAJOR_AR9344:
+               /* TODO */
+       default:
+               return;
+       }
+
+       gpio_base = (void __iomem *)KSEG1ADDR(AR71XX_GPIO_BASE);
+       t = __raw_readl(gpio_base + AR71XX_GPIO_REG_FUNC);
+       t |= uart_en;
+       __raw_writel(t, gpio_base + AR71XX_GPIO_REG_FUNC);
+}
+
 static void prom_putchar_init(void)
 {
        void __iomem *base;
@@ -76,8 +118,12 @@ static void prom_putchar_init(void)
        case REV_ID_MAJOR_AR9341:
        case REV_ID_MAJOR_AR9342:
        case REV_ID_MAJOR_AR9344:
+       case REV_ID_MAJOR_QCA9533:
+       case REV_ID_MAJOR_QCA9533_V2:
        case REV_ID_MAJOR_QCA9556:
        case REV_ID_MAJOR_QCA9558:
+       case REV_ID_MAJOR_TP9343:
+       case REV_ID_MAJOR_QCA956X:
                _prom_putchar = prom_putchar_ar71xx;
                break;
 
@@ -88,11 +134,13 @@ static void prom_putchar_init(void)
 
        default:
                _prom_putchar = prom_putchar_dummy;
-               break;
+               return;
        }
+
+       prom_enable_uart(id);
 }
 
-void prom_putchar(unsigned char ch)
+void prom_putchar(char ch)
 {
        if (!_prom_putchar)
                prom_putchar_init();
index 6b2c6f3baefa556018dffea409500b1c7846ed77..75fb96ca61db7ef6652722640a8d452cbc125d55 100644 (file)
@@ -34,7 +34,7 @@
 #define PB44_KEYS_DEBOUNCE_INTERVAL    (3 * PB44_KEYS_POLL_INTERVAL)
 
 static struct gpiod_lookup_table pb44_i2c_gpiod_table = {
-       .dev_id = "i2c-gpio",
+       .dev_id = "i2c-gpio.0",
        .table = {
                GPIO_LOOKUP_IDX("ath79-gpio", PB44_GPIO_I2C_SDA,
                                NULL, 0, GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
index f206dafbb0a35f57ccd64bfbefd24bbe112ff4f7..4c7a93f4039a0cba2a18504525ed9cd2e0c02389 100644 (file)
@@ -40,6 +40,7 @@ static char ath79_sys_type[ATH79_SYS_TYPE_LEN];
 
 static void ath79_restart(char *command)
 {
+       local_irq_disable();
        ath79_device_reset_set(AR71XX_RESET_FULL_CHIP);
        for (;;)
                if (cpu_wait)
@@ -59,6 +60,7 @@ static void __init ath79_detect_sys_type(void)
        u32 major;
        u32 minor;
        u32 rev = 0;
+       u32 ver = 1;
 
        id = ath79_reset_rr(AR71XX_RESET_REG_REV_ID);
        major = id & REV_ID_MAJOR_MASK;
@@ -151,6 +153,17 @@ static void __init ath79_detect_sys_type(void)
                rev = id & AR934X_REV_ID_REVISION_MASK;
                break;
 
+       case REV_ID_MAJOR_QCA9533_V2:
+               ver = 2;
+               ath79_soc_rev = 2;
+               /* drop through */
+
+       case REV_ID_MAJOR_QCA9533:
+               ath79_soc = ATH79_SOC_QCA9533;
+               chip = "9533";
+               rev = id & QCA953X_REV_ID_REVISION_MASK;
+               break;
+
        case REV_ID_MAJOR_QCA9556:
                ath79_soc = ATH79_SOC_QCA9556;
                chip = "9556";
@@ -163,14 +176,30 @@ static void __init ath79_detect_sys_type(void)
                rev = id & QCA955X_REV_ID_REVISION_MASK;
                break;
 
+       case REV_ID_MAJOR_QCA956X:
+               ath79_soc = ATH79_SOC_QCA956X;
+               chip = "956X";
+               rev = id & QCA956X_REV_ID_REVISION_MASK;
+               break;
+
+       case REV_ID_MAJOR_TP9343:
+               ath79_soc = ATH79_SOC_TP9343;
+               chip = "9343";
+               rev = id & QCA956X_REV_ID_REVISION_MASK;
+               break;
+
        default:
                panic("ath79: unknown SoC, id:0x%08x", id);
        }
 
-       ath79_soc_rev = rev;
+       if (ver == 1)
+               ath79_soc_rev = rev;
 
-       if (soc_is_qca955x())
-               sprintf(ath79_sys_type, "Qualcomm Atheros QCA%s rev %u",
+       if (soc_is_qca953x() || soc_is_qca955x() || soc_is_qca956x())
+               sprintf(ath79_sys_type, "Qualcomm Atheros QCA%s ver %u rev %u",
+                       chip, ver, rev);
+       else if (soc_is_tp9343())
+               sprintf(ath79_sys_type, "Qualcomm Atheros TP%s rev %u",
                        chip, rev);
        else
                sprintf(ath79_sys_type, "Atheros AR%s rev %u", chip, rev);
index 6092226a6d766e07bb4f5bc9ae47a0be341de6e7..9e9ec27c282f8209254a61a16885f78f30105825 100644 (file)
@@ -8,6 +8,7 @@
 
 #include <bcm63xx_io.h>
 #include <linux/serial_bcm63xx.h>
+#include <asm/setup.h>
 
 static void wait_xfered(void)
 {
index 6dec30842b2f44515b3e272e94eb31d88dce976a..3d13c77c125f4a8b7fa7097446e7c550f43ecc19 100644 (file)
@@ -17,7 +17,7 @@
 #include <linux/printk.h>
 #include <linux/slab.h>
 #include <linux/types.h>
-#include <dma-coherence.h>
+#include <asm/bmips.h>
 
 /*
  * BCM338x has configurable address translation windows which allow the
@@ -40,7 +40,7 @@ static struct bmips_dma_range *bmips_dma_ranges;
 
 #define FLUSH_RAC              0x100
 
-static dma_addr_t bmips_phys_to_dma(struct device *dev, phys_addr_t pa)
+dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t pa)
 {
        struct bmips_dma_range *r;
 
@@ -52,17 +52,7 @@ static dma_addr_t bmips_phys_to_dma(struct device *dev, phys_addr_t pa)
        return pa;
 }
 
-dma_addr_t plat_map_dma_mem(struct device *dev, void *addr, size_t size)
-{
-       return bmips_phys_to_dma(dev, virt_to_phys(addr));
-}
-
-dma_addr_t plat_map_dma_mem_page(struct device *dev, struct page *page)
-{
-       return bmips_phys_to_dma(dev, page_to_phys(page));
-}
-
-unsigned long plat_dma_addr_to_phys(struct device *dev, dma_addr_t dma_addr)
+phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t dma_addr)
 {
        struct bmips_dma_range *r;
 
@@ -74,6 +64,22 @@ unsigned long plat_dma_addr_to_phys(struct device *dev, dma_addr_t dma_addr)
        return dma_addr;
 }
 
+void arch_sync_dma_for_cpu_all(struct device *dev)
+{
+       void __iomem *cbr = BMIPS_GET_CBR();
+       u32 cfg;
+
+       if (boot_cpu_type() != CPU_BMIPS3300 &&
+           boot_cpu_type() != CPU_BMIPS4350 &&
+           boot_cpu_type() != CPU_BMIPS4380)
+               return;
+
+       /* Flush stale data out of the readahead cache */
+       cfg = __raw_readl(cbr + BMIPS_RAC_CONFIG);
+       __raw_writel(cfg | 0x100, cbr + BMIPS_RAC_CONFIG);
+       __raw_readl(cbr + BMIPS_RAC_CONFIG);
+}
+
 static int __init bmips_init_dma_ranges(void)
 {
        struct device_node *np =
index 3b6f687f177cdf5b3826e10978a1bd465ed2a96e..231fc5ce375e7b8c7eee03f3d5486ef823627be7 100644 (file)
@@ -202,13 +202,6 @@ void __init device_tree_init(void)
        of_node_put(np);
 }
 
-int __init plat_of_setup(void)
-{
-       return __dt_register_buses("simple-bus", NULL);
-}
-
-arch_initcall(plat_of_setup);
-
 static int __init plat_dev_init(void)
 {
        of_clk_init(NULL);
index c22da16d67b82f7752f6ba39291e2acd4e18fb47..35704c28a28b45e7edf6812ec7772251b63cbb52 100644 (file)
@@ -105,28 +105,29 @@ $(obj)/uImage: $(obj)/uImage.$(suffix-y)
 # Flattened Image Tree (.itb) images
 #
 
-targets += vmlinux.itb
-targets += vmlinux.gz.itb
-targets += vmlinux.bz2.itb
-targets += vmlinux.lzma.itb
-targets += vmlinux.lzo.itb
-
 ifeq ($(ADDR_BITS),32)
-       itb_addr_cells = 1
+itb_addr_cells = 1
 endif
 ifeq ($(ADDR_BITS),64)
-       itb_addr_cells = 2
+itb_addr_cells = 2
 endif
 
+targets += vmlinux.its.S
+
 quiet_cmd_its_cat = CAT     $@
-      cmd_its_cat = cat $^ >$@
+      cmd_its_cat = cat $(filter-out $(PHONY), $^) >$@
 
-$(obj)/vmlinux.its.S: $(addprefix $(srctree)/arch/mips/$(PLATFORM)/,$(ITS_INPUTS))
+$(obj)/vmlinux.its.S: $(addprefix $(srctree)/arch/mips/$(PLATFORM)/,$(ITS_INPUTS)) FORCE
        $(call if_changed,its_cat)
 
+targets += vmlinux.its
+targets += vmlinux.gz.its
+targets += vmlinux.bz2.its
+targets += vmlinux.lzmo.its
+targets += vmlinux.lzo.its
+
 quiet_cmd_cpp_its_S = ITS     $@
-      cmd_cpp_its_S = $(CPP) $(cpp_flags) -P -C -o $@ $< \
-                       -D__ASSEMBLY__ \
+      cmd_cpp_its_S = $(CPP) -P -C -o $@ $< \
                        -DKERNEL_NAME="\"Linux $(KERNELRELEASE)\"" \
                        -DVMLINUX_BINARY="\"$(3)\"" \
                        -DVMLINUX_COMPRESSION="\"$(2)\"" \
@@ -136,19 +137,25 @@ quiet_cmd_cpp_its_S = ITS     $@
                        -DADDR_CELLS=$(itb_addr_cells)
 
 $(obj)/vmlinux.its: $(obj)/vmlinux.its.S $(VMLINUX) FORCE
-       $(call if_changed_dep,cpp_its_S,none,vmlinux.bin)
+       $(call if_changed,cpp_its_S,none,vmlinux.bin)
 
 $(obj)/vmlinux.gz.its: $(obj)/vmlinux.its.S $(VMLINUX) FORCE
-       $(call if_changed_dep,cpp_its_S,gzip,vmlinux.bin.gz)
+       $(call if_changed,cpp_its_S,gzip,vmlinux.bin.gz)
 
 $(obj)/vmlinux.bz2.its: $(obj)/vmlinux.its.S $(VMLINUX)  FORCE
-       $(call if_changed_dep,cpp_its_S,bzip2,vmlinux.bin.bz2)
+       $(call if_changed,cpp_its_S,bzip2,vmlinux.bin.bz2)
 
 $(obj)/vmlinux.lzma.its: $(obj)/vmlinux.its.S $(VMLINUX) FORCE
-       $(call if_changed_dep,cpp_its_S,lzma,vmlinux.bin.lzma)
+       $(call if_changed,cpp_its_S,lzma,vmlinux.bin.lzma)
 
 $(obj)/vmlinux.lzo.its: $(obj)/vmlinux.its.S $(VMLINUX) FORCE
-       $(call if_changed_dep,cpp_its_S,lzo,vmlinux.bin.lzo)
+       $(call if_changed,cpp_its_S,lzo,vmlinux.bin.lzo)
+
+targets += vmlinux.itb
+targets += vmlinux.gz.itb
+targets += vmlinux.bz2.itb
+targets += vmlinux.lzma.itb
+targets += vmlinux.lzo.itb
 
 quiet_cmd_itb-image = ITB     $@
       cmd_itb-image = \
@@ -162,14 +169,5 @@ quiet_cmd_itb-image = ITB     $@
 $(obj)/vmlinux.itb: $(obj)/vmlinux.its $(obj)/vmlinux.bin FORCE
        $(call if_changed,itb-image,$<)
 
-$(obj)/vmlinux.gz.itb: $(obj)/vmlinux.gz.its $(obj)/vmlinux.bin.gz FORCE
-       $(call if_changed,itb-image,$<)
-
-$(obj)/vmlinux.bz2.itb: $(obj)/vmlinux.bz2.its $(obj)/vmlinux.bin.bz2 FORCE
-       $(call if_changed,itb-image,$<)
-
-$(obj)/vmlinux.lzma.itb: $(obj)/vmlinux.lzma.its $(obj)/vmlinux.bin.lzma FORCE
-       $(call if_changed,itb-image,$<)
-
-$(obj)/vmlinux.lzo.itb: $(obj)/vmlinux.lzo.its $(obj)/vmlinux.bin.lzo FORCE
+$(obj)/vmlinux.%.itb: $(obj)/vmlinux.%.its $(obj)/vmlinux.bin.% FORCE
        $(call if_changed,itb-image,$<)
index d6f0fee0a151a62329738428395d5e84bbc6029d..a8a0a32e05d1b3139f6b108556ae75d6694d21ec 100644 (file)
@@ -1,6 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0
-
-extern void prom_putchar(unsigned char ch);
+#include <asm/setup.h>
 
 void putc(char c)
 {
index aa4e8f75ff5d137e1e238b62e2b1044edd28b419..ce93d57f1b4d79d43ecec21174d382873caf2e10 100644 (file)
                };
        };
 
+       spi_gpio {
+               compatible = "spi-gpio";
+               #address-cells = <1>;
+               #size-cells = <0>;
+               num-chipselects = <2>;
+
+               gpio-miso = <&gpe 14 0>;
+               gpio-sck = <&gpe 15 0>;
+               gpio-mosi = <&gpe 17 0>;
+               cs-gpios = <&gpe 16 0
+                           &gpe 18 0>;
+
+               spidev@0 {
+                       compatible = "spidev";
+                       reg = <0>;
+                       spi-max-frequency = <1000000>;
+               };
+       };
+
        uart0: serial@10030000 {
                compatible = "ingenic,jz4780-uart";
                reg = <0x10030000 0x100>;
index 3c6aed9f5439f8ab8f9a33c7c4ac5601f5cf4931..9a9bb7ea05034a820ba4c752d550dc3d81c5e3a8 100644 (file)
@@ -1,3 +1,3 @@
-dtb-$(CONFIG_LEGACY_BOARD_OCELOT)      += ocelot_pcb123.dtb
+dtb-$(CONFIG_MSCC_OCELOT)      += ocelot_pcb123.dtb
 
 obj-$(CONFIG_BUILTIN_DTB)      += $(addsuffix .o, $(dtb-y))
index 4f33dbc6734824142968b1062119b50a83475557..f7eb612b46ba81348aa8311088af817dc2df5d08 100644 (file)
                        status = "disabled";
                };
 
+               spi: spi@101000 {
+                       compatible = "mscc,ocelot-spi", "snps,dw-apb-ssi";
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+                       reg = <0x101000 0x100>, <0x3c 0x18>;
+                       interrupts = <9>;
+                       clocks = <&ahb_clk>;
+
+                       status = "disabled";
+               };
+
                switch@1010000 {
                        compatible = "mscc,vsc7514-switch";
                        reg = <0x1010000 0x10000>,
                        gpio-controller;
                        #gpio-cells = <2>;
                        gpio-ranges = <&gpio 0 0 22>;
+                       interrupt-controller;
+                       interrupts = <13>;
+                       #interrupt-cells = <2>;
 
                        uart_pins: uart-pins {
                                pins = "GPIO_6", "GPIO_7";
                                pins = "GPIO_12", "GPIO_13";
                                function = "uart2";
                        };
+
+                       miim1: miim1 {
+                               pins = "GPIO_14", "GPIO_15";
+                               function = "miim1";
+                       };
                };
 
                mdio0: mdio@107009c {
                        #address-cells = <1>;
                        #size-cells = <0>;
                        compatible = "mscc,ocelot-miim";
-                       reg = <0x107009c 0x36>, <0x10700f0 0x8>;
+                       reg = <0x107009c 0x24>, <0x10700f0 0x8>;
                        interrupts = <14>;
                        status = "disabled";
 
                                reg = <3>;
                        };
                };
+
+               mdio1: mdio@10700c0 {
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+                       compatible = "mscc,ocelot-miim";
+                       reg = <0x10700c0 0x24>;
+                       interrupts = <15>;
+                       pinctrl-names = "default";
+                       pinctrl-0 = <&miim1>;
+                       status = "disabled";
+               };
        };
 };
index 4ccd65379059910f3a142f8e69ffdf9a1c811f31..2266027759f989fe1187ddc4be42ec5ccf08cf1a 100644 (file)
        status = "okay";
 };
 
+&spi {
+       status = "okay";
+
+       flash@0 {
+               compatible = "macronix,mx25l25635f", "jedec,spi-nor";
+               spi-max-frequency = <20000000>;
+               reg = <0>;
+       };
+};
+
 &mdio0 {
        status = "okay";
 };
index 1fe561c5f90e76f5558231a8af4d59576b2ae282..61dcfa5b6ca78a843459d1c82a4d60289df11d32 100644 (file)
        usb_phy: usb-phy {
                compatible = "qca,ar7100-usb-phy";
 
-               reset-names = "usb-phy", "usb-suspend-override";
+               reset-names = "phy", "suspend-override";
                resets = <&rst 4>, <&rst 3>;
 
                #phy-cells = <0>;
index 3931033e47c834d59f0eada7c57e461882190392..7fccf6357225a5d8eeb36be31567ce9140beaca1 100644 (file)
        };
 
        gpio-keys {
-               compatible = "gpio-keys-polled";
+               compatible = "gpio-keys";
                #address-cells = <1>;
                #size-cells = <0>;
 
-               poll-interval = <20>;
                button@0 {
                        label = "reset";
                        linux,code = <KEY_RESTART>;
index efd5f07222060c8a2c9abfe4c2cc49d2f3675d73..2bae201aa365106ac262865b0ba407fe36905599 100644 (file)
        usb_phy: usb-phy {
                compatible = "qca,ar7100-usb-phy";
 
-               reset-names = "usb-phy", "usb-suspend-override";
+               reset-names = "phy", "suspend-override";
                resets = <&rst 4>, <&rst 3>;
 
                #phy-cells = <0>;
index d4e4502daaa88c0b3353763aea07a3a16498a30e..e7af2cf5f4c1991856e319c4ff4e31fbe1cceeff 100644 (file)
                };
        };
 
-       gpio-keys-polled {
-               compatible = "gpio-keys-polled";
+       gpio-keys {
+               compatible = "gpio-keys";
                #address-cells = <1>;
                #size-cells = <0>;
-               poll-interval = <100>;
 
                button@0 {
                        label = "reset";
index 4f95ccf17c4c8ae006be9c9e46af714c2d0e96b7..d38aa73f1a2e9806c5bbc4f925635b26d9d0bfae 100644 (file)
                };
        };
 
-       gpio-keys-polled {
-               compatible = "gpio-keys-polled";
+       gpio-keys {
+               compatible = "gpio-keys";
                #address-cells = <1>;
                #size-cells = <0>;
-               poll-interval = <100>;
 
                button@0 {
                        label = "jumpstart";
index f70f79c4d0d5091538dad6a48d2c0607ed1c4f87..11778abacf66ce62738c577a4779ce99aa9a9ca2 100644 (file)
                };
        };
 
-       gpio-keys-polled {
-               compatible = "gpio-keys-polled";
+       gpio-keys {
+               compatible = "gpio-keys";
                #address-cells = <1>;
                #size-cells = <0>;
-               poll-interval = <100>;
 
                button@0 {
                        label = "reset";
index 748131aea22e75ff9355f751ca8749f32d990327..c8290d36cfbe92d3a9ca2804981619680f4a5d4b 100644 (file)
                };
        };
 
-       gpio-keys-polled {
-               compatible = "gpio-keys-polled";
+       gpio-keys {
+               compatible = "gpio-keys";
                #address-cells = <1>;
                #size-cells = <0>;
-               poll-interval = <100>;
 
                button@0 {
                        label = "wps";
index b3e73c22c345cb630e69802b9313214d7cadda80..5be79ebfc3f8060d7768b663beead8d2c52b7f3b 100644 (file)
@@ -2,14 +2,17 @@
 /*
  * Some ECOFF definitions.
  */
+
+#include <stdint.h>
+
 typedef struct filehdr {
-       unsigned short  f_magic;        /* magic number */
-       unsigned short  f_nscns;        /* number of sections */
-       long            f_timdat;       /* time & date stamp */
-       long            f_symptr;       /* file pointer to symbolic header */
-       long            f_nsyms;        /* sizeof(symbolic hdr) */
-       unsigned short  f_opthdr;       /* sizeof(optional hdr) */
-       unsigned short  f_flags;        /* flags */
+       uint16_t        f_magic;        /* magic number */
+       uint16_t        f_nscns;        /* number of sections */
+       int32_t         f_timdat;       /* time & date stamp */
+       int32_t         f_symptr;       /* file pointer to symbolic header */
+       int32_t         f_nsyms;        /* sizeof(symbolic hdr) */
+       uint16_t        f_opthdr;       /* sizeof(optional hdr) */
+       uint16_t        f_flags;        /* flags */
 } FILHDR;
 #define FILHSZ sizeof(FILHDR)
 
@@ -18,32 +21,32 @@ typedef struct filehdr {
 
 typedef struct scnhdr {
        char            s_name[8];      /* section name */
-       long            s_paddr;        /* physical address, aliased s_nlib */
-       long            s_vaddr;        /* virtual address */
-       long            s_size;         /* section size */
-       long            s_scnptr;       /* file ptr to raw data for section */
-       long            s_relptr;       /* file ptr to relocation */
-       long            s_lnnoptr;      /* file ptr to gp histogram */
-       unsigned short  s_nreloc;       /* number of relocation entries */
-       unsigned short  s_nlnno;        /* number of gp histogram entries */
-       long            s_flags;        /* flags */
+       int32_t         s_paddr;        /* physical address, aliased s_nlib */
+       int32_t         s_vaddr;        /* virtual address */
+       int32_t         s_size;         /* section size */
+       int32_t         s_scnptr;       /* file ptr to raw data for section */
+       int32_t         s_relptr;       /* file ptr to relocation */
+       int32_t         s_lnnoptr;      /* file ptr to gp histogram */
+       uint16_t        s_nreloc;       /* number of relocation entries */
+       uint16_t        s_nlnno;        /* number of gp histogram entries */
+       int32_t         s_flags;        /* flags */
 } SCNHDR;
 #define SCNHSZ         sizeof(SCNHDR)
-#define SCNROUND       ((long)16)
+#define SCNROUND       ((int32_t)16)
 
 typedef struct aouthdr {
-       short   magic;          /* see above                            */
-       short   vstamp;         /* version stamp                        */
-       long    tsize;          /* text size in bytes, padded to DW bdry*/
-       long    dsize;          /* initialized data "  "                */
-       long    bsize;          /* uninitialized data "   "             */
-       long    entry;          /* entry pt.                            */
-       long    text_start;     /* base of text used for this file      */
-       long    data_start;     /* base of data used for this file      */
-       long    bss_start;      /* base of bss used for this file       */
-       long    gprmask;        /* general purpose register mask        */
-       long    cprmask[4];     /* co-processor register masks          */
-       long    gp_value;       /* the gp value used for this object    */
+       int16_t magic;          /* see above                            */
+       int16_t vstamp;         /* version stamp                        */
+       int32_t tsize;          /* text size in bytes, padded to DW bdry*/
+       int32_t dsize;          /* initialized data "  "                */
+       int32_t bsize;          /* uninitialized data "   "             */
+       int32_t entry;          /* entry pt.                            */
+       int32_t text_start;     /* base of text used for this file      */
+       int32_t data_start;     /* base of data used for this file      */
+       int32_t bss_start;      /* base of bss used for this file       */
+       int32_t gprmask;        /* general purpose register mask        */
+       int32_t cprmask[4];     /* co-processor register masks          */
+       int32_t gp_value;       /* the gp value used for this object    */
 } AOUTHDR;
 #define AOUTHSZ sizeof(AOUTHDR)
 
index 266c8137e859d418faed5e6fa3a8549b0aeff9df..6972b97235daf4a97a57205180ed5b63ebf98409 100644 (file)
@@ -43,6 +43,8 @@
 #include <limits.h>
 #include <netinet/in.h>
 #include <stdlib.h>
+#include <stdint.h>
+#include <inttypes.h>
 
 #include "ecoff.h"
 
@@ -55,8 +57,8 @@
 /* -------------------------------------------------------------------- */
 
 struct sect {
-       unsigned long vaddr;
-       unsigned long len;
+       uint32_t vaddr;
+       uint32_t len;
 };
 
 int *symTypeTable;
@@ -153,16 +155,16 @@ static char *saveRead(int file, off_t offset, off_t len, char *name)
 }
 
 #define swab16(x) \
-       ((unsigned short)( \
-               (((unsigned short)(x) & (unsigned short)0x00ffU) << 8) | \
-               (((unsigned short)(x) & (unsigned short)0xff00U) >> 8) ))
+       ((uint16_t)( \
+               (((uint16_t)(x) & (uint16_t)0x00ffU) << 8) | \
+               (((uint16_t)(x) & (uint16_t)0xff00U) >> 8) ))
 
 #define swab32(x) \
        ((unsigned int)( \
-               (((unsigned int)(x) & (unsigned int)0x000000ffUL) << 24) | \
-               (((unsigned int)(x) & (unsigned int)0x0000ff00UL) <<  8) | \
-               (((unsigned int)(x) & (unsigned int)0x00ff0000UL) >>  8) | \
-               (((unsigned int)(x) & (unsigned int)0xff000000UL) >> 24) ))
+               (((uint32_t)(x) & (uint32_t)0x000000ffUL) << 24) | \
+               (((uint32_t)(x) & (uint32_t)0x0000ff00UL) <<  8) | \
+               (((uint32_t)(x) & (uint32_t)0x00ff0000UL) >>  8) | \
+               (((uint32_t)(x) & (uint32_t)0xff000000UL) >> 24) ))
 
 static void convert_elf_hdr(Elf32_Ehdr * e)
 {
@@ -274,7 +276,7 @@ int main(int argc, char *argv[])
        struct aouthdr eah;
        struct scnhdr esecs[6];
        int infile, outfile;
-       unsigned long cur_vma = ULONG_MAX;
+       uint32_t cur_vma = UINT32_MAX;
        int addflag = 0;
        int nosecs;
 
@@ -518,7 +520,7 @@ int main(int argc, char *argv[])
 
                for (i = 0; i < nosecs; i++) {
                        printf
-                           ("Section %d: %s phys %lx  size %lx  file offset %lx\n",
+                           ("Section %d: %s phys %"PRIx32"  size %"PRIx32"\t file offset %"PRIx32"\n",
                             i, esecs[i].s_name, esecs[i].s_paddr,
                             esecs[i].s_size, esecs[i].s_scnptr);
                }
@@ -564,17 +566,16 @@ int main(int argc, char *argv[])
                   the section can be loaded before copying. */
                if (ph[i].p_type == PT_LOAD && ph[i].p_filesz) {
                        if (cur_vma != ph[i].p_vaddr) {
-                               unsigned long gap =
-                                   ph[i].p_vaddr - cur_vma;
+                               uint32_t gap = ph[i].p_vaddr - cur_vma;
                                char obuf[1024];
                                if (gap > 65536) {
                                        fprintf(stderr,
-                                               "Intersegment gap (%ld bytes) too large.\n",
+                                               "Intersegment gap (%"PRId32" bytes) too large.\n",
                                                gap);
                                        exit(1);
                                }
                                fprintf(stderr,
-                                       "Warning: %ld byte intersegment gap.\n",
+                                       "Warning: %d byte intersegment gap.\n",
                                        gap);
                                memset(obuf, 0, sizeof obuf);
                                while (gap) {
index 7b335ab2169748abd280b3ae9eed9312ac4edb73..236833be6fbe6d2a64c2e2a6386385c81db05bd3 100644 (file)
@@ -11,9 +11,7 @@
  * Copyright (C) 2010 Cavium Networks, Inc.
  */
 #include <linux/dma-direct.h>
-#include <linux/scatterlist.h>
 #include <linux/bootmem.h>
-#include <linux/export.h>
 #include <linux/swiotlb.h>
 #include <linux/types.h>
 #include <linux/init.h>
 #include <asm/octeon/octeon.h>
 
 #ifdef CONFIG_PCI
+#include <linux/pci.h>
 #include <asm/octeon/pci-octeon.h>
 #include <asm/octeon/cvmx-npi-defs.h>
 #include <asm/octeon/cvmx-pci-defs.h>
 
+struct octeon_dma_map_ops {
+       dma_addr_t (*phys_to_dma)(struct device *dev, phys_addr_t paddr);
+       phys_addr_t (*dma_to_phys)(struct device *dev, dma_addr_t daddr);
+};
+
 static dma_addr_t octeon_hole_phys_to_dma(phys_addr_t paddr)
 {
        if (paddr >= CVMX_PCIE_BAR1_PHYS_BASE && paddr < (CVMX_PCIE_BAR1_PHYS_BASE + CVMX_PCIE_BAR1_PHYS_SIZE))
@@ -61,6 +65,11 @@ static phys_addr_t octeon_gen1_dma_to_phys(struct device *dev, dma_addr_t daddr)
        return daddr;
 }
 
+static const struct octeon_dma_map_ops octeon_gen1_ops = {
+       .phys_to_dma    = octeon_gen1_phys_to_dma,
+       .dma_to_phys    = octeon_gen1_dma_to_phys,
+};
+
 static dma_addr_t octeon_gen2_phys_to_dma(struct device *dev, phys_addr_t paddr)
 {
        return octeon_hole_phys_to_dma(paddr);
@@ -71,6 +80,11 @@ static phys_addr_t octeon_gen2_dma_to_phys(struct device *dev, dma_addr_t daddr)
        return octeon_hole_dma_to_phys(daddr);
 }
 
+static const struct octeon_dma_map_ops octeon_gen2_ops = {
+       .phys_to_dma    = octeon_gen2_phys_to_dma,
+       .dma_to_phys    = octeon_gen2_dma_to_phys,
+};
+
 static dma_addr_t octeon_big_phys_to_dma(struct device *dev, phys_addr_t paddr)
 {
        if (paddr >= 0x410000000ull && paddr < 0x420000000ull)
@@ -93,6 +107,11 @@ static phys_addr_t octeon_big_dma_to_phys(struct device *dev, dma_addr_t daddr)
        return daddr;
 }
 
+static const struct octeon_dma_map_ops octeon_big_ops = {
+       .phys_to_dma    = octeon_big_phys_to_dma,
+       .dma_to_phys    = octeon_big_dma_to_phys,
+};
+
 static dma_addr_t octeon_small_phys_to_dma(struct device *dev,
                                           phys_addr_t paddr)
 {
@@ -121,105 +140,51 @@ static phys_addr_t octeon_small_dma_to_phys(struct device *dev,
        return daddr;
 }
 
-#endif /* CONFIG_PCI */
-
-static dma_addr_t octeon_dma_map_page(struct device *dev, struct page *page,
-       unsigned long offset, size_t size, enum dma_data_direction direction,
-       unsigned long attrs)
-{
-       dma_addr_t daddr = swiotlb_map_page(dev, page, offset, size,
-                                           direction, attrs);
-       mb();
-
-       return daddr;
-}
-
-static int octeon_dma_map_sg(struct device *dev, struct scatterlist *sg,
-       int nents, enum dma_data_direction direction, unsigned long attrs)
-{
-       int r = swiotlb_map_sg_attrs(dev, sg, nents, direction, attrs);
-       mb();
-       return r;
-}
-
-static void octeon_dma_sync_single_for_device(struct device *dev,
-       dma_addr_t dma_handle, size_t size, enum dma_data_direction direction)
-{
-       swiotlb_sync_single_for_device(dev, dma_handle, size, direction);
-       mb();
-}
-
-static void octeon_dma_sync_sg_for_device(struct device *dev,
-       struct scatterlist *sg, int nelems, enum dma_data_direction direction)
-{
-       swiotlb_sync_sg_for_device(dev, sg, nelems, direction);
-       mb();
-}
-
-static void *octeon_dma_alloc_coherent(struct device *dev, size_t size,
-       dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
-{
-       void *ret = swiotlb_alloc(dev, size, dma_handle, gfp, attrs);
-
-       mb();
+static const struct octeon_dma_map_ops octeon_small_ops = {
+       .phys_to_dma    = octeon_small_phys_to_dma,
+       .dma_to_phys    = octeon_small_dma_to_phys,
+};
 
-       return ret;
-}
+static const struct octeon_dma_map_ops *octeon_pci_dma_ops;
 
-static dma_addr_t octeon_unity_phys_to_dma(struct device *dev, phys_addr_t paddr)
-{
-       return paddr;
-}
-
-static phys_addr_t octeon_unity_dma_to_phys(struct device *dev, dma_addr_t daddr)
+void __init octeon_pci_dma_init(void)
 {
-       return daddr;
+       switch (octeon_dma_bar_type) {
+       case OCTEON_DMA_BAR_TYPE_PCIE:
+               octeon_pci_dma_ops = &octeon_gen1_ops;
+               break;
+       case OCTEON_DMA_BAR_TYPE_PCIE2:
+               octeon_pci_dma_ops = &octeon_gen2_ops;
+               break;
+       case OCTEON_DMA_BAR_TYPE_BIG:
+               octeon_pci_dma_ops = &octeon_big_ops;
+               break;
+       case OCTEON_DMA_BAR_TYPE_SMALL:
+               octeon_pci_dma_ops = &octeon_small_ops;
+               break;
+       default:
+               BUG();
+       }
 }
-
-struct octeon_dma_map_ops {
-       const struct dma_map_ops dma_map_ops;
-       dma_addr_t (*phys_to_dma)(struct device *dev, phys_addr_t paddr);
-       phys_addr_t (*dma_to_phys)(struct device *dev, dma_addr_t daddr);
-};
+#endif /* CONFIG_PCI */
 
 dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr)
 {
-       struct octeon_dma_map_ops *ops = container_of(get_dma_ops(dev),
-                                                     struct octeon_dma_map_ops,
-                                                     dma_map_ops);
-
-       return ops->phys_to_dma(dev, paddr);
+#ifdef CONFIG_PCI
+       if (dev && dev_is_pci(dev))
+               return octeon_pci_dma_ops->phys_to_dma(dev, paddr);
+#endif
+       return paddr;
 }
-EXPORT_SYMBOL(__phys_to_dma);
 
 phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t daddr)
 {
-       struct octeon_dma_map_ops *ops = container_of(get_dma_ops(dev),
-                                                     struct octeon_dma_map_ops,
-                                                     dma_map_ops);
-
-       return ops->dma_to_phys(dev, daddr);
+#ifdef CONFIG_PCI
+       if (dev && dev_is_pci(dev))
+               return octeon_pci_dma_ops->dma_to_phys(dev, daddr);
+#endif
+       return daddr;
 }
-EXPORT_SYMBOL(__dma_to_phys);
-
-static struct octeon_dma_map_ops octeon_linear_dma_map_ops = {
-       .dma_map_ops = {
-               .alloc = octeon_dma_alloc_coherent,
-               .free = swiotlb_free,
-               .map_page = octeon_dma_map_page,
-               .unmap_page = swiotlb_unmap_page,
-               .map_sg = octeon_dma_map_sg,
-               .unmap_sg = swiotlb_unmap_sg_attrs,
-               .sync_single_for_cpu = swiotlb_sync_single_for_cpu,
-               .sync_single_for_device = octeon_dma_sync_single_for_device,
-               .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
-               .sync_sg_for_device = octeon_dma_sync_sg_for_device,
-               .mapping_error = swiotlb_dma_mapping_error,
-               .dma_supported = swiotlb_dma_supported
-       },
-       .phys_to_dma = octeon_unity_phys_to_dma,
-       .dma_to_phys = octeon_unity_dma_to_phys
-};
 
 char *octeon_swiotlb;
 
@@ -283,52 +248,4 @@ void __init plat_swiotlb_setup(void)
 
        if (swiotlb_init_with_tbl(octeon_swiotlb, swiotlb_nslabs, 1) == -ENOMEM)
                panic("Cannot allocate SWIOTLB buffer");
-
-       mips_dma_map_ops = &octeon_linear_dma_map_ops.dma_map_ops;
 }
-
-#ifdef CONFIG_PCI
-static struct octeon_dma_map_ops _octeon_pci_dma_map_ops = {
-       .dma_map_ops = {
-               .alloc = octeon_dma_alloc_coherent,
-               .free = swiotlb_free,
-               .map_page = octeon_dma_map_page,
-               .unmap_page = swiotlb_unmap_page,
-               .map_sg = octeon_dma_map_sg,
-               .unmap_sg = swiotlb_unmap_sg_attrs,
-               .sync_single_for_cpu = swiotlb_sync_single_for_cpu,
-               .sync_single_for_device = octeon_dma_sync_single_for_device,
-               .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
-               .sync_sg_for_device = octeon_dma_sync_sg_for_device,
-               .mapping_error = swiotlb_dma_mapping_error,
-               .dma_supported = swiotlb_dma_supported
-       },
-};
-
-const struct dma_map_ops *octeon_pci_dma_map_ops;
-
-void __init octeon_pci_dma_init(void)
-{
-       switch (octeon_dma_bar_type) {
-       case OCTEON_DMA_BAR_TYPE_PCIE2:
-               _octeon_pci_dma_map_ops.phys_to_dma = octeon_gen2_phys_to_dma;
-               _octeon_pci_dma_map_ops.dma_to_phys = octeon_gen2_dma_to_phys;
-               break;
-       case OCTEON_DMA_BAR_TYPE_PCIE:
-               _octeon_pci_dma_map_ops.phys_to_dma = octeon_gen1_phys_to_dma;
-               _octeon_pci_dma_map_ops.dma_to_phys = octeon_gen1_dma_to_phys;
-               break;
-       case OCTEON_DMA_BAR_TYPE_BIG:
-               _octeon_pci_dma_map_ops.phys_to_dma = octeon_big_phys_to_dma;
-               _octeon_pci_dma_map_ops.dma_to_phys = octeon_big_dma_to_phys;
-               break;
-       case OCTEON_DMA_BAR_TYPE_SMALL:
-               _octeon_pci_dma_map_ops.phys_to_dma = octeon_small_phys_to_dma;
-               _octeon_pci_dma_map_ops.dma_to_phys = octeon_small_dma_to_phys;
-               break;
-       default:
-               BUG();
-       }
-       octeon_pci_dma_map_ops = &_octeon_pci_dma_map_ops.dma_map_ops;
-}
-#endif /* CONFIG_PCI */
index d18ed5af62f4a974a67cdc82dd890d503af99792..b8898e2b8a6fad2e7c6b58b6e2aeec3d26d7423a 100644 (file)
@@ -4,7 +4,7 @@
  * Contact: support@caviumnetworks.com
  * This file is part of the OCTEON SDK
  *
- * Copyright (c) 2003-2008 Cavium Networks
+ * Copyright (C) 2003-2018 Cavium, Inc.
  *
  * This file is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License, Version 2, as
@@ -42,9 +42,6 @@
 #include <asm/octeon/cvmx-asxx-defs.h>
 #include <asm/octeon/cvmx-dbg-defs.h>
 
-void __cvmx_interrupt_gmxx_enable(int interface);
-void __cvmx_interrupt_asxx_enable(int block);
-
 /**
  * Probe RGMII ports and determine the number present
  *
index 57828335077671607003f09e30b9a5331b2325af..a176358c5a21527f6c9f679f1c359a7624c804bb 100644 (file)
@@ -4,7 +4,7 @@
  * Contact: support@caviumnetworks.com
  * This file is part of the OCTEON SDK
  *
- * Copyright (c) 2003-2008 Cavium Networks
+ * Copyright (C) 2003-2018 Cavium, Inc.
  *
  * This file is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License, Version 2, as
 
 #include <asm/octeon/cvmx-gmxx-defs.h>
 #include <asm/octeon/cvmx-pcsx-defs.h>
-
-void __cvmx_interrupt_gmxx_enable(int interface);
-void __cvmx_interrupt_pcsx_intx_en_reg_enable(int index, int block);
-void __cvmx_interrupt_pcsxx_int_en_reg_enable(int index);
+#include <asm/octeon/cvmx-pcsxx-defs.h>
 
 /**
  * Perform initialization required only once for an SGMII port.
index ef16aa00167bbee0cef51515ba71d02eede8c3aa..2a574d2726711501b7412dfd7a1e1f18ac8ceb78 100644 (file)
@@ -4,7 +4,7 @@
  * Contact: support@caviumnetworks.com
  * This file is part of the OCTEON SDK
  *
- * Copyright (c) 2003-2008 Cavium Networks
+ * Copyright (C) 2003-2018 Cavium, Inc.
  *
  * This file is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License, Version 2, as
  * Contact Cavium Networks for more information
  ***********************license end**************************************/
 
-void __cvmx_interrupt_gmxx_enable(int interface);
-void __cvmx_interrupt_spxx_int_msk_enable(int index);
-void __cvmx_interrupt_stxx_int_msk_enable(int index);
-
 /*
  * Functions for SPI initialization, configuration,
  * and monitoring.
@@ -41,6 +37,8 @@ void __cvmx_interrupt_stxx_int_msk_enable(int index);
 
 #include <asm/octeon/cvmx-pip-defs.h>
 #include <asm/octeon/cvmx-pko-defs.h>
+#include <asm/octeon/cvmx-spxx-defs.h>
+#include <asm/octeon/cvmx-stxx-defs.h>
 
 /*
  * CVMX_HELPER_SPI_TIMEOUT is used to determine how long the SPI
index 19d54e02c18567cf7f6f494ea80623138e3ca5fc..2bb6912a580def2ab9007b741dbc44bbb0c209e2 100644 (file)
@@ -4,7 +4,7 @@
  * Contact: support@caviumnetworks.com
  * This file is part of the OCTEON SDK
  *
- * Copyright (c) 2003-2008 Cavium Networks
+ * Copyright (C) 2003-2018 Cavium, Inc.
  *
  * This file is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License, Version 2, as
 
 #include <asm/octeon/cvmx-pko-defs.h>
 #include <asm/octeon/cvmx-gmxx-defs.h>
+#include <asm/octeon/cvmx-pcsx-defs.h>
 #include <asm/octeon/cvmx-pcsxx-defs.h>
 
-void __cvmx_interrupt_gmxx_enable(int interface);
-void __cvmx_interrupt_pcsx_intx_en_reg_enable(int index, int block);
-void __cvmx_interrupt_pcsxx_int_en_reg_enable(int index);
-
 int __cvmx_helper_xaui_enumerate(int interface)
 {
        union cvmx_gmxx_hg2_control gmx_hg2_control;
index b3aec101a65d4ed96f4f986ea22c2210b297cdef..8272d8c648ca9fd4f218093212c89ad5245f4eb8 100644 (file)
@@ -814,7 +814,7 @@ static int octeon_irq_ciu_set_affinity(struct irq_data *data,
                        pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
 
                if (cpumask_test_cpu(cpu, dest) && enable_one) {
-                       enable_one = 0;
+                       enable_one = false;
                        __set_bit(cd->bit, pen);
                } else {
                        __clear_bit(cd->bit, pen);
index 8505db478904b10855eabf266f75ff9dd93e3699..807cadaf554e2e0e3d5665c95c8d2cb19ce1d705 100644 (file)
@@ -322,6 +322,7 @@ static int __init octeon_ehci_device_init(void)
                return 0;
 
        pd = of_find_device_by_node(ehci_node);
+       of_node_put(ehci_node);
        if (!pd)
                return 0;
 
@@ -384,6 +385,7 @@ static int __init octeon_ohci_device_init(void)
                return 0;
 
        pd = of_find_device_by_node(ohci_node);
+       of_node_put(ohci_node);
        if (!pd)
                return 0;
 
@@ -1067,6 +1069,6 @@ end_led:
 
 static int __init octeon_publish_devices(void)
 {
-       return of_platform_bus_probe(NULL, octeon_ids, NULL);
+       return of_platform_populate(NULL, octeon_ids, NULL, NULL);
 }
 arch_initcall(octeon_publish_devices);
index a8034d0dcadeb5ce361bfd23dc737a6f48d6f5b4..c2426232db0646108ba8443f620872a2e5273cd1 100644 (file)
@@ -36,6 +36,7 @@
 #include <asm/mipsregs.h>
 #include <asm/bootinfo.h>
 #include <asm/sections.h>
+#include <asm/setup.h>
 #include <asm/time.h>
 
 #include <asm/octeon/octeon.h>
@@ -1108,7 +1109,7 @@ void __init plat_mem_setup(void)
  * Emit one character to the boot UART.         Exported for use by the
  * watchdog timer.
  */
-int prom_putchar(char c)
+void prom_putchar(char c)
 {
        uint64_t lsrval;
 
@@ -1119,7 +1120,6 @@ int prom_putchar(char c)
 
        /* Write the byte */
        cvmx_write_csr(CVMX_MIO_UARTX_THR(octeon_uart), c & 0xffull);
-       return 1;
 }
 EXPORT_SYMBOL(prom_putchar);
 
@@ -1154,11 +1154,7 @@ void __init prom_free_prom_memory(void)
 }
 
 void __init octeon_fill_mac_addresses(void);
-int octeon_prune_device_tree(void);
 
-extern const char __appended_dtb;
-extern const char __dtb_octeon_3xxx_begin;
-extern const char __dtb_octeon_68xx_begin;
 void __init device_tree_init(void)
 {
        const void *fdt;
index be23fd25eeaa5c89f0e22efc22ba85e22e3a93c4..030ff9c205fb42420f3b7b68959050c15b0a47a2 100644 (file)
@@ -92,6 +92,8 @@ CONFIG_SERIAL_OF_PLATFORM=y
 # CONFIG_HW_RANDOM is not set
 CONFIG_I2C=y
 CONFIG_I2C_JZ4780=y
+CONFIG_SPI=y
+CONFIG_SPI_GPIO=y
 CONFIG_GPIO_SYSFS=y
 CONFIG_GPIO_INGENIC=y
 # CONFIG_HWMON is not set
index 26b1cd5ffbf550d33b7ff1afe567b30bbfe265b5..684c9dcba12665ed4f7267f9f7bbc3894f2da4b8 100644 (file)
@@ -43,9 +43,6 @@ CONFIG_NETFILTER=y
 CONFIG_DEVTMPFS=y
 CONFIG_DEVTMPFS_MOUNT=y
 CONFIG_SCSI=y
-# CONFIG_INPUT_MOUSEDEV is not set
-# CONFIG_INPUT_KEYBOARD is not set
-# CONFIG_INPUT_MOUSE is not set
 # CONFIG_SERIO is not set
 CONFIG_HW_RANDOM=y
 # CONFIG_HWMON is not set
index df8a9a15ca83a71ed80a792c7655334601902335..81058295d35f53905fe5ca3ae2c0dbe24c2a28d0 100644 (file)
@@ -317,6 +317,7 @@ CONFIG_MOUSE_PS2_ELANTECH=y
 CONFIG_SERIAL_8250=y
 CONFIG_SERIAL_8250_CONSOLE=y
 CONFIG_POWER_RESET=y
+CONFIG_POWER_RESET_PIIX4_POWEROFF=y
 CONFIG_POWER_RESET_SYSCON=y
 # CONFIG_HWMON is not set
 CONFIG_FB=y
index 14df9ef15d408d7b6d9bd2868736c6b9852f4401..5c10cddc39d363352415555c559c9d23f117349b 100644 (file)
@@ -328,6 +328,7 @@ CONFIG_INPUT_MOUSEDEV=y
 CONFIG_SERIAL_8250=y
 CONFIG_SERIAL_8250_CONSOLE=y
 CONFIG_POWER_RESET=y
+CONFIG_POWER_RESET_PIIX4_POWEROFF=y
 CONFIG_POWER_RESET_SYSCON=y
 # CONFIG_HWMON is not set
 CONFIG_FB=y
index 25092e344574b8267822311596b8f1306204af19..bb694f5065f14e3afb4ac66ef073da33229311f1 100644 (file)
@@ -330,6 +330,7 @@ CONFIG_INPUT_MOUSEDEV=y
 CONFIG_SERIAL_8250=y
 CONFIG_SERIAL_8250_CONSOLE=y
 CONFIG_POWER_RESET=y
+CONFIG_POWER_RESET_PIIX4_POWEROFF=y
 CONFIG_POWER_RESET_SYSCON=y
 # CONFIG_HWMON is not set
 CONFIG_FB=y
index 210bf609f7858308548a6f437f69bef772462cc9..5b5306b80576579837a90f923148473cf8245e2a 100644 (file)
@@ -133,6 +133,7 @@ CONFIG_SERIAL_8250=y
 CONFIG_SERIAL_8250_CONSOLE=y
 CONFIG_HW_RANDOM=y
 CONFIG_POWER_RESET=y
+CONFIG_POWER_RESET_PIIX4_POWEROFF=y
 CONFIG_POWER_RESET_SYSCON=y
 # CONFIG_HWMON is not set
 CONFIG_FB=y
index e5934aa98397189740359f5ecfe06d23a9e62890..85543599448f3650cc203b23ced7dbe6e3239a2f 100644 (file)
@@ -133,6 +133,7 @@ CONFIG_SERIAL_8250=y
 CONFIG_SERIAL_8250_CONSOLE=y
 CONFIG_HW_RANDOM=y
 CONFIG_POWER_RESET=y
+CONFIG_POWER_RESET_PIIX4_POWEROFF=y
 CONFIG_POWER_RESET_SYSCON=y
 # CONFIG_HWMON is not set
 CONFIG_FB=y
index cb2ca11c17893e2048b0060d6a6fa25993a4d24d..067bb84ac916647db897b3c6db7ef9cf471bacdc 100644 (file)
@@ -134,6 +134,7 @@ CONFIG_SERIAL_8250=y
 CONFIG_SERIAL_8250_CONSOLE=y
 CONFIG_HW_RANDOM=y
 CONFIG_POWER_RESET=y
+CONFIG_POWER_RESET_PIIX4_POWEROFF=y
 CONFIG_POWER_RESET_SYSCON=y
 # CONFIG_HWMON is not set
 CONFIG_FB=y
index be29fcec69fcc70e325627bfa60f07e90f307c84..dfc78c3172a3be43c72e37352f5c0e9e62998474 100644 (file)
@@ -137,6 +137,7 @@ CONFIG_SERIAL_8250=y
 CONFIG_SERIAL_8250_CONSOLE=y
 CONFIG_HW_RANDOM=y
 CONFIG_POWER_RESET=y
+CONFIG_POWER_RESET_PIIX4_POWEROFF=y
 CONFIG_POWER_RESET_SYSCON=y
 # CONFIG_HWMON is not set
 CONFIG_FB=y
index 40462d4c90a0159d563516664011c2bafa51f912..50a2288c69f8207e6b9925de8e309310e12d81e8 100644 (file)
@@ -132,6 +132,7 @@ CONFIG_SERIAL_8250=y
 CONFIG_SERIAL_8250_CONSOLE=y
 CONFIG_HW_RANDOM=y
 CONFIG_POWER_RESET=y
+CONFIG_POWER_RESET_PIIX4_POWEROFF=y
 CONFIG_POWER_RESET_SYSCON=y
 # CONFIG_HWMON is not set
 CONFIG_FB=y
index 4e50176cb3df53e65c06fc00355b359ed0216a20..99a19cf5f9ba8af6feb873d862d14e2459d1db36 100644 (file)
@@ -326,6 +326,7 @@ CONFIG_MOUSE_PS2_ELANTECH=y
 CONFIG_SERIAL_8250=y
 CONFIG_SERIAL_8250_CONSOLE=y
 CONFIG_POWER_RESET=y
+CONFIG_POWER_RESET_PIIX4_POWEROFF=y
 CONFIG_POWER_RESET_SYSCON=y
 # CONFIG_HWMON is not set
 CONFIG_FB=y
index 769d4b9ac82e1b14e2dde98ad0ce3b0b49621865..365e3913231ef0ed581710b95407685a304063a2 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/init.h>
 #include <linux/console.h>
 #include <linux/fs.h>
+#include <asm/setup.h>
 #include <asm/sgialib.h>
 
 static void prom_console_write(struct console *co, const char *s,
index 7e8ba5ce95be0778c958d2772a2015e0450aaf85..be381307fbb0157d91527af37b507f3ba475442a 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/kernel.h>
 #include <asm/sgialib.h>
 #include <asm/bcache.h>
+#include <asm/setup.h>
 
 /*
  * IP22 boardcache is not compatible with board caches.         Thus we disable it
index 6aa264b9856ac99b71b88d54fae19cea27d782de..8772617b64cefec0835523fe107a2febdaa778ab 100644 (file)
@@ -19,6 +19,7 @@
 #include <asm/mipsprom.h>
 #include <asm/mipsregs.h>
 #include <asm/bootinfo.h>
+#include <asm/setup.h>
 
 /* special SNI prom calls */
 /*
index ba9b2c8cce6837dcb2bc5e84b07c6f5dcda6c297..08e33c6b2539c9bfd4225f84df2390467e601920 100644 (file)
@@ -35,13 +35,13 @@ config LEGACY_BOARD_OCELOT
        depends on LEGACY_BOARD_SEAD3=n
        select LEGACY_BOARDS
        select MSCC_OCELOT
+       select SYS_HAS_EARLY_PRINTK
+       select USE_GENERIC_EARLY_PRINTK_8250
 
 config MSCC_OCELOT
        bool
        select GPIOLIB
        select MSCC_OCELOT_IRQ
-       select SYS_HAS_EARLY_PRINTK
-       select USE_GENERIC_EARLY_PRINTK_8250
 
 comment "FIT/UHI Boards"
 
@@ -65,6 +65,14 @@ config FIT_IMAGE_FDT_XILFPGA
          Enable this to include the FDT for the MIPSfpga platform
          from Imagination Technologies in the FIT kernel image.
 
+config FIT_IMAGE_FDT_OCELOT_PCB123
+       bool "Include FDT for Microsemi Ocelot PCB123"
+       select MSCC_OCELOT
+       help
+         Enable this to include the FDT for the Ocelot PCB123 platform
+         from Microsemi in the FIT kernel image.
+         This requires u-boot on the platform.
+
 config VIRT_BOARD_RANCHU
        bool "Support Ranchu platform for Android emulator"
        help
index 0dd0d5d460a5fc7988b03f856f47563589d7e995..879cb80396c88e57b07fdd9e4474971c284148c2 100644 (file)
@@ -16,4 +16,5 @@ all-$(CONFIG_MIPS_GENERIC)    := vmlinux.gz.itb
 its-y                                  := vmlinux.its.S
 its-$(CONFIG_FIT_IMAGE_FDT_BOSTON)     += board-boston.its.S
 its-$(CONFIG_FIT_IMAGE_FDT_NI169445)   += board-ni169445.its.S
+its-$(CONFIG_FIT_IMAGE_FDT_OCELOT_PCB123) += board-ocelot_pcb123.its.S
 its-$(CONFIG_FIT_IMAGE_FDT_XILFPGA)    += board-xilfpga.its.S
diff --git a/arch/mips/generic/board-ocelot_pcb123.its.S b/arch/mips/generic/board-ocelot_pcb123.its.S
new file mode 100644 (file)
index 0000000..5a7d5e1
--- /dev/null
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/ {
+       images {
+               fdt@ocelot_pcb123 {
+                       description = "MSCC Ocelot PCB123 Device Tree";
+                       data = /incbin/("boot/dts/mscc/ocelot_pcb123.dtb");
+                       type = "flat_dt";
+                       arch = "mips";
+                       compression = "none";
+                       hash@0 {
+                               algo = "sha1";
+                       };
+               };
+       };
+
+       configurations {
+               conf@ocelot_pcb123 {
+                       description = "Ocelot Linux kernel";
+                       kernel = "kernel@0";
+                       fdt = "fdt@ocelot_pcb123";
+               };
+       };
+};
index 5ba6fcc26fa726aff50cc3a64315855eb935c1b0..a106f8113842a35e4c125d741a11992b63d95281 100644 (file)
@@ -14,7 +14,6 @@
 #include <linux/init.h>
 #include <linux/irqchip.h>
 #include <linux/of_fdt.h>
-#include <linux/of_platform.h>
 
 #include <asm/bootinfo.h>
 #include <asm/fw/fw.h>
@@ -204,22 +203,11 @@ void __init arch_init_irq(void)
                                            "mti,cpu-interrupt-controller");
        if (!cpu_has_veic && !intc_node)
                mips_cpu_irq_init();
+       of_node_put(intc_node);
 
        irqchip_init();
 }
 
-static int __init publish_devices(void)
-{
-       if (!of_have_populated_dt())
-               panic("Device-tree not present");
-
-       if (of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL))
-               panic("Failed to populate DT");
-
-       return 0;
-}
-arch_initcall(publish_devices);
-
 void __init prom_free_prom_memory(void)
 {
 }
index b408dac722ac7fd7345fd0bf2e1e00f9b7dbd110..7ba4ad5cc1d668003f5824fa00746803a5a50524 100644 (file)
@@ -27,8 +27,6 @@ __init int yamon_dt_append_cmdline(void *fdt)
 
        /* find or add chosen node */
        chosen_off = fdt_path_offset(fdt, "/chosen");
-       if (chosen_off == -FDT_ERR_NOTFOUND)
-               chosen_off = fdt_path_offset(fdt, "/chosen@0");
        if (chosen_off == -FDT_ERR_NOTFOUND)
                chosen_off = fdt_add_subnode(fdt, 0, "chosen");
        if (chosen_off < 0) {
@@ -220,8 +218,6 @@ __init int yamon_dt_serial_config(void *fdt)
 
        /* find or add chosen node */
        chosen_off = fdt_path_offset(fdt, "/chosen");
-       if (chosen_off == -FDT_ERR_NOTFOUND)
-               chosen_off = fdt_path_offset(fdt, "/chosen@0");
        if (chosen_off == -FDT_ERR_NOTFOUND)
                chosen_off = fdt_add_subnode(fdt, 0, "chosen");
        if (chosen_off < 0) {
index 45d541baf359a9b057d74eafed0dc38dd9b1bf17..58351e48421e50cc2ec46dff5cbf33334620d0a6 100644 (file)
@@ -8,6 +8,7 @@ generic-y += irq_work.h
 generic-y += local64.h
 generic-y += mcs_spinlock.h
 generic-y += mm-arch-hooks.h
+generic-y += msi.h
 generic-y += parport.h
 generic-y += percpu.h
 generic-y += preempt.h
index 0ab176bdb8e8107e3a8b700215ce9a96471b3ee9..0269b3de8b5197ab171ea9e4e4870ab4fe68dea9 100644 (file)
 #include <asm/cmpxchg.h>
 #include <asm/war.h>
 
+/*
+ * Using a branch-likely instruction to check the result of an sc instruction
+ * works around a bug present in R10000 CPUs prior to revision 3.0 that could
+ * cause ll-sc sequences to execute non-atomically.
+ */
+#if R10000_LLSC_WAR
+# define __scbeqz "beqzl"
+#else
+# define __scbeqz "beqz"
+#endif
+
 #define ATOMIC_INIT(i)   { (i) }
 
 /*
 #define ATOMIC_OP(op, c_op, asm_op)                                          \
 static __inline__ void atomic_##op(int i, atomic_t * v)                              \
 {                                                                            \
-       if (kernel_uses_llsc && R10000_LLSC_WAR) {                            \
+       if (kernel_uses_llsc) {                                               \
                int temp;                                                     \
                                                                              \
                __asm__ __volatile__(                                         \
-               "       .set    arch=r4000                              \n"   \
+               "       .set    "MIPS_ISA_LEVEL"                        \n"   \
                "1:     ll      %0, %1          # atomic_" #op "        \n"   \
                "       " #asm_op " %0, %2                              \n"   \
                "       sc      %0, %1                                  \n"   \
-               "       beqzl   %0, 1b                                  \n"   \
+               "\t" __scbeqz " %0, 1b                                  \n"   \
                "       .set    mips0                                   \n"   \
                : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter)          \
                : "Ir" (i));                                                  \
-       } else if (kernel_uses_llsc) {                                        \
-               int temp;                                                     \
-                                                                             \
-               do {                                                          \
-                       __asm__ __volatile__(                                 \
-                       "       .set    "MIPS_ISA_LEVEL"                \n"   \
-                       "       ll      %0, %1          # atomic_" #op "\n"   \
-                       "       " #asm_op " %0, %2                      \n"   \
-                       "       sc      %0, %1                          \n"   \
-                       "       .set    mips0                           \n"   \
-                       : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter)  \
-                       : "Ir" (i));                                          \
-               } while (unlikely(!temp));                                    \
        } else {                                                              \
                unsigned long flags;                                          \
                                                                              \
@@ -83,36 +81,20 @@ static __inline__ int atomic_##op##_return_relaxed(int i, atomic_t * v)           \
 {                                                                            \
        int result;                                                           \
                                                                              \
-       if (kernel_uses_llsc && R10000_LLSC_WAR) {                            \
+       if (kernel_uses_llsc) {                                               \
                int temp;                                                     \
                                                                              \
                __asm__ __volatile__(                                         \
-               "       .set    arch=r4000                              \n"   \
+               "       .set    "MIPS_ISA_LEVEL"                        \n"   \
                "1:     ll      %1, %2          # atomic_" #op "_return \n"   \
                "       " #asm_op " %0, %1, %3                          \n"   \
                "       sc      %0, %2                                  \n"   \
-               "       beqzl   %0, 1b                                  \n"   \
+               "\t" __scbeqz " %0, 1b                                  \n"   \
                "       " #asm_op " %0, %1, %3                          \n"   \
                "       .set    mips0                                   \n"   \
                : "=&r" (result), "=&r" (temp),                               \
                  "+" GCC_OFF_SMALL_ASM() (v->counter)                        \
                : "Ir" (i));                                                  \
-       } else if (kernel_uses_llsc) {                                        \
-               int temp;                                                     \
-                                                                             \
-               do {                                                          \
-                       __asm__ __volatile__(                                 \
-                       "       .set    "MIPS_ISA_LEVEL"                \n"   \
-                       "       ll      %1, %2  # atomic_" #op "_return \n"   \
-                       "       " #asm_op " %0, %1, %3                  \n"   \
-                       "       sc      %0, %2                          \n"   \
-                       "       .set    mips0                           \n"   \
-                       : "=&r" (result), "=&r" (temp),                       \
-                         "+" GCC_OFF_SMALL_ASM() (v->counter)                \
-                       : "Ir" (i));                                          \
-               } while (unlikely(!result));                                  \
-                                                                             \
-               result = temp; result c_op i;                                 \
        } else {                                                              \
                unsigned long flags;                                          \
                                                                              \
@@ -131,36 +113,20 @@ static __inline__ int atomic_fetch_##op##_relaxed(int i, atomic_t * v)          \
 {                                                                            \
        int result;                                                           \
                                                                              \
-       if (kernel_uses_llsc && R10000_LLSC_WAR) {                            \
+       if (kernel_uses_llsc) {                                               \
                int temp;                                                     \
                                                                              \
                __asm__ __volatile__(                                         \
-               "       .set    arch=r4000                              \n"   \
+               "       .set    "MIPS_ISA_LEVEL"                        \n"   \
                "1:     ll      %1, %2          # atomic_fetch_" #op "  \n"   \
                "       " #asm_op " %0, %1, %3                          \n"   \
                "       sc      %0, %2                                  \n"   \
-               "       beqzl   %0, 1b                                  \n"   \
+               "\t" __scbeqz " %0, 1b                                  \n"   \
                "       move    %0, %1                                  \n"   \
                "       .set    mips0                                   \n"   \
                : "=&r" (result), "=&r" (temp),                               \
                  "+" GCC_OFF_SMALL_ASM() (v->counter)                        \
                : "Ir" (i));                                                  \
-       } else if (kernel_uses_llsc) {                                        \
-               int temp;                                                     \
-                                                                             \
-               do {                                                          \
-                       __asm__ __volatile__(                                 \
-                       "       .set    "MIPS_ISA_LEVEL"                \n"   \
-                       "       ll      %1, %2  # atomic_fetch_" #op "  \n"   \
-                       "       " #asm_op " %0, %1, %3                  \n"   \
-                       "       sc      %0, %2                          \n"   \
-                       "       .set    mips0                           \n"   \
-                       : "=&r" (result), "=&r" (temp),                       \
-                         "+" GCC_OFF_SMALL_ASM() (v->counter)                \
-                       : "Ir" (i));                                          \
-               } while (unlikely(!result));                                  \
-                                                                             \
-               result = temp;                                                \
        } else {                                                              \
                unsigned long flags;                                          \
                                                                              \
@@ -218,38 +184,17 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
 
        smp_mb__before_llsc();
 
-       if (kernel_uses_llsc && R10000_LLSC_WAR) {
-               int temp;
-
-               __asm__ __volatile__(
-               "       .set    arch=r4000                              \n"
-               "1:     ll      %1, %2          # atomic_sub_if_positive\n"
-               "       subu    %0, %1, %3                              \n"
-               "       bltz    %0, 1f                                  \n"
-               "       sc      %0, %2                                  \n"
-               "       .set    noreorder                               \n"
-               "       beqzl   %0, 1b                                  \n"
-               "        subu   %0, %1, %3                              \n"
-               "       .set    reorder                                 \n"
-               "1:                                                     \n"
-               "       .set    mips0                                   \n"
-               : "=&r" (result), "=&r" (temp),
-                 "+" GCC_OFF_SMALL_ASM() (v->counter)
-               : "Ir" (i), GCC_OFF_SMALL_ASM() (v->counter)
-               : "memory");
-       } else if (kernel_uses_llsc) {
+       if (kernel_uses_llsc) {
                int temp;
 
                __asm__ __volatile__(
                "       .set    "MIPS_ISA_LEVEL"                        \n"
                "1:     ll      %1, %2          # atomic_sub_if_positive\n"
                "       subu    %0, %1, %3                              \n"
+               "       move    %1, %0                                  \n"
                "       bltz    %0, 1f                                  \n"
-               "       sc      %0, %2                                  \n"
-               "       .set    noreorder                               \n"
-               "       beqz    %0, 1b                                  \n"
-               "        subu   %0, %1, %3                              \n"
-               "       .set    reorder                                 \n"
+               "       sc      %1, %2                                  \n"
+               "\t" __scbeqz " %1, 1b                                  \n"
                "1:                                                     \n"
                "       .set    mips0                                   \n"
                : "=&r" (result), "=&r" (temp),
@@ -274,97 +219,12 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
 #define atomic_xchg(v, new) (xchg(&((v)->counter), (new)))
 
-/**
- * __atomic_add_unless - add unless the number is a given value
- * @v: pointer of type atomic_t
- * @a: the amount to add to v...
- * @u: ...unless v is equal to u.
- *
- * Atomically adds @a to @v, so long as it was not @u.
- * Returns the old value of @v.
- */
-static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
-{
-       int c, old;
-       c = atomic_read(v);
-       for (;;) {
-               if (unlikely(c == (u)))
-                       break;
-               old = atomic_cmpxchg((v), c, c + (a));
-               if (likely(old == c))
-                       break;
-               c = old;
-       }
-       return c;
-}
-
-#define atomic_dec_return(v) atomic_sub_return(1, (v))
-#define atomic_inc_return(v) atomic_add_return(1, (v))
-
-/*
- * atomic_sub_and_test - subtract value from variable and test result
- * @i: integer value to subtract
- * @v: pointer of type atomic_t
- *
- * Atomically subtracts @i from @v and returns
- * true if the result is zero, or false for all
- * other cases.
- */
-#define atomic_sub_and_test(i, v) (atomic_sub_return((i), (v)) == 0)
-
-/*
- * atomic_inc_and_test - increment and test
- * @v: pointer of type atomic_t
- *
- * Atomically increments @v by 1
- * and returns true if the result is zero, or false for all
- * other cases.
- */
-#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
-
-/*
- * atomic_dec_and_test - decrement by 1 and test
- * @v: pointer of type atomic_t
- *
- * Atomically decrements @v by 1 and
- * returns true if the result is 0, or false for all other
- * cases.
- */
-#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
-
 /*
  * atomic_dec_if_positive - decrement by 1 if old value positive
  * @v: pointer of type atomic_t
  */
 #define atomic_dec_if_positive(v)      atomic_sub_if_positive(1, v)
 
-/*
- * atomic_inc - increment atomic variable
- * @v: pointer of type atomic_t
- *
- * Atomically increments @v by 1.
- */
-#define atomic_inc(v) atomic_add(1, (v))
-
-/*
- * atomic_dec - decrement and test
- * @v: pointer of type atomic_t
- *
- * Atomically decrements @v by 1.
- */
-#define atomic_dec(v) atomic_sub(1, (v))
-
-/*
- * atomic_add_negative - add and test if negative
- * @v: pointer of type atomic_t
- * @i: integer value to add
- *
- * Atomically adds @i to @v and returns true
- * if the result is negative, or false when
- * result is greater than or equal to zero.
- */
-#define atomic_add_negative(i, v) (atomic_add_return(i, (v)) < 0)
-
 #ifdef CONFIG_64BIT
 
 #define ATOMIC64_INIT(i)    { (i) }
@@ -386,31 +246,18 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
 #define ATOMIC64_OP(op, c_op, asm_op)                                        \
 static __inline__ void atomic64_##op(long i, atomic64_t * v)                 \
 {                                                                            \
-       if (kernel_uses_llsc && R10000_LLSC_WAR) {                            \
+       if (kernel_uses_llsc) {                                               \
                long temp;                                                    \
                                                                              \
                __asm__ __volatile__(                                         \
-               "       .set    arch=r4000                              \n"   \
+               "       .set    "MIPS_ISA_LEVEL"                        \n"   \
                "1:     lld     %0, %1          # atomic64_" #op "      \n"   \
                "       " #asm_op " %0, %2                              \n"   \
                "       scd     %0, %1                                  \n"   \
-               "       beqzl   %0, 1b                                  \n"   \
+               "\t" __scbeqz " %0, 1b                                  \n"   \
                "       .set    mips0                                   \n"   \
                : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter)          \
                : "Ir" (i));                                                  \
-       } else if (kernel_uses_llsc) {                                        \
-               long temp;                                                    \
-                                                                             \
-               do {                                                          \
-                       __asm__ __volatile__(                                 \
-                       "       .set    "MIPS_ISA_LEVEL"                \n"   \
-                       "       lld     %0, %1          # atomic64_" #op "\n" \
-                       "       " #asm_op " %0, %2                      \n"   \
-                       "       scd     %0, %1                          \n"   \
-                       "       .set    mips0                           \n"   \
-                       : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter)      \
-                       : "Ir" (i));                                          \
-               } while (unlikely(!temp));                                    \
        } else {                                                              \
                unsigned long flags;                                          \
                                                                              \
@@ -425,37 +272,20 @@ static __inline__ long atomic64_##op##_return_relaxed(long i, atomic64_t * v) \
 {                                                                            \
        long result;                                                          \
                                                                              \
-       if (kernel_uses_llsc && R10000_LLSC_WAR) {                            \
+       if (kernel_uses_llsc) {                                               \
                long temp;                                                    \
                                                                              \
                __asm__ __volatile__(                                         \
-               "       .set    arch=r4000                              \n"   \
+               "       .set    "MIPS_ISA_LEVEL"                        \n"   \
                "1:     lld     %1, %2          # atomic64_" #op "_return\n"  \
                "       " #asm_op " %0, %1, %3                          \n"   \
                "       scd     %0, %2                                  \n"   \
-               "       beqzl   %0, 1b                                  \n"   \
+               "\t" __scbeqz " %0, 1b                                  \n"   \
                "       " #asm_op " %0, %1, %3                          \n"   \
                "       .set    mips0                                   \n"   \
                : "=&r" (result), "=&r" (temp),                               \
                  "+" GCC_OFF_SMALL_ASM() (v->counter)                        \
                : "Ir" (i));                                                  \
-       } else if (kernel_uses_llsc) {                                        \
-               long temp;                                                    \
-                                                                             \
-               do {                                                          \
-                       __asm__ __volatile__(                                 \
-                       "       .set    "MIPS_ISA_LEVEL"                \n"   \
-                       "       lld     %1, %2  # atomic64_" #op "_return\n"  \
-                       "       " #asm_op " %0, %1, %3                  \n"   \
-                       "       scd     %0, %2                          \n"   \
-                       "       .set    mips0                           \n"   \
-                       : "=&r" (result), "=&r" (temp),                       \
-                         "=" GCC_OFF_SMALL_ASM() (v->counter)                \
-                       : "Ir" (i), GCC_OFF_SMALL_ASM() (v->counter)          \
-                       : "memory");                                          \
-               } while (unlikely(!result));                                  \
-                                                                             \
-               result = temp; result c_op i;                                 \
        } else {                                                              \
                unsigned long flags;                                          \
                                                                              \
@@ -478,33 +308,16 @@ static __inline__ long atomic64_fetch_##op##_relaxed(long i, atomic64_t * v)  \
                long temp;                                                    \
                                                                              \
                __asm__ __volatile__(                                         \
-               "       .set    arch=r4000                              \n"   \
+               "       .set    "MIPS_ISA_LEVEL"                        \n"   \
                "1:     lld     %1, %2          # atomic64_fetch_" #op "\n"   \
                "       " #asm_op " %0, %1, %3                          \n"   \
                "       scd     %0, %2                                  \n"   \
-               "       beqzl   %0, 1b                                  \n"   \
+               "\t" __scbeqz " %0, 1b                                  \n"   \
                "       move    %0, %1                                  \n"   \
                "       .set    mips0                                   \n"   \
                : "=&r" (result), "=&r" (temp),                               \
                  "+" GCC_OFF_SMALL_ASM() (v->counter)                        \
                : "Ir" (i));                                                  \
-       } else if (kernel_uses_llsc) {                                        \
-               long temp;                                                    \
-                                                                             \
-               do {                                                          \
-                       __asm__ __volatile__(                                 \
-                       "       .set    "MIPS_ISA_LEVEL"                \n"   \
-                       "       lld     %1, %2  # atomic64_fetch_" #op "\n"   \
-                       "       " #asm_op " %0, %1, %3                  \n"   \
-                       "       scd     %0, %2                          \n"   \
-                       "       .set    mips0                           \n"   \
-                       : "=&r" (result), "=&r" (temp),                       \
-                         "=" GCC_OFF_SMALL_ASM() (v->counter)                \
-                       : "Ir" (i), GCC_OFF_SMALL_ASM() (v->counter)          \
-                       : "memory");                                          \
-               } while (unlikely(!result));                                  \
-                                                                             \
-               result = temp;                                                \
        } else {                                                              \
                unsigned long flags;                                          \
                                                                              \
@@ -563,38 +376,17 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
 
        smp_mb__before_llsc();
 
-       if (kernel_uses_llsc && R10000_LLSC_WAR) {
-               long temp;
-
-               __asm__ __volatile__(
-               "       .set    arch=r4000                              \n"
-               "1:     lld     %1, %2          # atomic64_sub_if_positive\n"
-               "       dsubu   %0, %1, %3                              \n"
-               "       bltz    %0, 1f                                  \n"
-               "       scd     %0, %2                                  \n"
-               "       .set    noreorder                               \n"
-               "       beqzl   %0, 1b                                  \n"
-               "        dsubu  %0, %1, %3                              \n"
-               "       .set    reorder                                 \n"
-               "1:                                                     \n"
-               "       .set    mips0                                   \n"
-               : "=&r" (result), "=&r" (temp),
-                 "=" GCC_OFF_SMALL_ASM() (v->counter)
-               : "Ir" (i), GCC_OFF_SMALL_ASM() (v->counter)
-               : "memory");
-       } else if (kernel_uses_llsc) {
+       if (kernel_uses_llsc) {
                long temp;
 
                __asm__ __volatile__(
                "       .set    "MIPS_ISA_LEVEL"                        \n"
                "1:     lld     %1, %2          # atomic64_sub_if_positive\n"
                "       dsubu   %0, %1, %3                              \n"
+               "       move    %1, %0                                  \n"
                "       bltz    %0, 1f                                  \n"
-               "       scd     %0, %2                                  \n"
-               "       .set    noreorder                               \n"
-               "       beqz    %0, 1b                                  \n"
-               "        dsubu  %0, %1, %3                              \n"
-               "       .set    reorder                                 \n"
+               "       scd     %1, %2                                  \n"
+               "\t" __scbeqz " %1, 1b                                  \n"
                "1:                                                     \n"
                "       .set    mips0                                   \n"
                : "=&r" (result), "=&r" (temp),
@@ -620,99 +412,12 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
        ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
 #define atomic64_xchg(v, new) (xchg(&((v)->counter), (new)))
 
-/**
- * atomic64_add_unless - add unless the number is a given value
- * @v: pointer of type atomic64_t
- * @a: the amount to add to v...
- * @u: ...unless v is equal to u.
- *
- * Atomically adds @a to @v, so long as it was not @u.
- * Returns true iff @v was not @u.
- */
-static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
-{
-       long c, old;
-       c = atomic64_read(v);
-       for (;;) {
-               if (unlikely(c == (u)))
-                       break;
-               old = atomic64_cmpxchg((v), c, c + (a));
-               if (likely(old == c))
-                       break;
-               c = old;
-       }
-       return c != (u);
-}
-
-#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
-
-#define atomic64_dec_return(v) atomic64_sub_return(1, (v))
-#define atomic64_inc_return(v) atomic64_add_return(1, (v))
-
-/*
- * atomic64_sub_and_test - subtract value from variable and test result
- * @i: integer value to subtract
- * @v: pointer of type atomic64_t
- *
- * Atomically subtracts @i from @v and returns
- * true if the result is zero, or false for all
- * other cases.
- */
-#define atomic64_sub_and_test(i, v) (atomic64_sub_return((i), (v)) == 0)
-
-/*
- * atomic64_inc_and_test - increment and test
- * @v: pointer of type atomic64_t
- *
- * Atomically increments @v by 1
- * and returns true if the result is zero, or false for all
- * other cases.
- */
-#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
-
-/*
- * atomic64_dec_and_test - decrement by 1 and test
- * @v: pointer of type atomic64_t
- *
- * Atomically decrements @v by 1 and
- * returns true if the result is 0, or false for all other
- * cases.
- */
-#define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0)
-
 /*
  * atomic64_dec_if_positive - decrement by 1 if old value positive
  * @v: pointer of type atomic64_t
  */
 #define atomic64_dec_if_positive(v)    atomic64_sub_if_positive(1, v)
 
-/*
- * atomic64_inc - increment atomic variable
- * @v: pointer of type atomic64_t
- *
- * Atomically increments @v by 1.
- */
-#define atomic64_inc(v) atomic64_add(1, (v))
-
-/*
- * atomic64_dec - decrement and test
- * @v: pointer of type atomic64_t
- *
- * Atomically decrements @v by 1.
- */
-#define atomic64_dec(v) atomic64_sub(1, (v))
-
-/*
- * atomic64_add_negative - add and test if negative
- * @v: pointer of type atomic64_t
- * @i: integer value to add
- *
- * Atomically adds @i to @v and returns true
- * if the result is negative, or false when
- * result is greater than or equal to zero.
- */
-#define atomic64_add_negative(i, v) (atomic64_add_return(i, (v)) < 0)
-
 #endif /* CONFIG_64BIT */
 
 #endif /* _ASM_ATOMIC_H */
index b3e2975f83d36e021a57988672ef26c09701f879..bf6a8afd7ad2783dff63cb55954a2e34d8ca6e79 100644 (file)
@@ -123,22 +123,6 @@ static inline void bmips_write_zscm_reg(unsigned int offset, unsigned long data)
        barrier();
 }
 
-static inline void bmips_post_dma_flush(struct device *dev)
-{
-       void __iomem *cbr = BMIPS_GET_CBR();
-       u32 cfg;
-
-       if (boot_cpu_type() != CPU_BMIPS3300 &&
-           boot_cpu_type() != CPU_BMIPS4350 &&
-           boot_cpu_type() != CPU_BMIPS4380)
-               return;
-
-       /* Flush stale data out of the readahead cache */
-       cfg = __raw_readl(cbr + BMIPS_RAC_CONFIG);
-       __raw_writel(cfg | 0x100, cbr + BMIPS_RAC_CONFIG);
-       __raw_readl(cbr + BMIPS_RAC_CONFIG);
-}
-
 #endif /* !defined(__ASSEMBLY__) */
 
 #endif /* _ASM_BMIPS_H */
index 9cdb4e4ce258390d75325ea0dba6562f13332924..0edba3e757471b74d441fa520c3ab44084c5f630 100644 (file)
 #include <asm/isa-rev.h>
 #include <cpu-feature-overrides.h>
 
+#define __ase(ase)                     (cpu_data[0].ases & (ase))
+#define __opt(opt)                     (cpu_data[0].options & (opt))
+
+/*
+ * Check if MIPS_ISA_REV is >= isa *and* an option or ASE is detected during
+ * boot (typically by cpu_probe()).
+ *
+ * Note that these should only be used in cases where a kernel built for an
+ * older ISA *cannot* run on a CPU which supports the feature in question. For
+ * example this may be used for features introduced with MIPSr6, since a kernel
+ * built for an older ISA cannot run on a MIPSr6 CPU. This should not be used
+ * for MIPSr2 features however, since a MIPSr1 or earlier kernel might run on a
+ * MIPSr2 CPU.
+ */
+#define __isa_ge_and_ase(isa, ase)     ((MIPS_ISA_REV >= (isa)) && __ase(ase))
+#define __isa_ge_and_opt(isa, opt)     ((MIPS_ISA_REV >= (isa)) && __opt(opt))
+
+/*
+ * Check if MIPS_ISA_REV is >= isa *or* an option or ASE is detected during
+ * boot (typically by cpu_probe()).
+ *
+ * These are for use with features that are optional up until a particular ISA
+ * revision & then become required.
+ */
+#define __isa_ge_or_ase(isa, ase)      ((MIPS_ISA_REV >= (isa)) || __ase(ase))
+#define __isa_ge_or_opt(isa, opt)      ((MIPS_ISA_REV >= (isa)) || __opt(opt))
+
+/*
+ * Check if MIPS_ISA_REV is < isa *and* an option or ASE is detected during
+ * boot (typically by cpu_probe()).
+ *
+ * These are for use with features that are optional up until a particular ISA
+ * revision & are then removed - ie. no longer present in any CPU implementing
+ * the given ISA revision.
+ */
+#define __isa_lt_and_ase(isa, ase)     ((MIPS_ISA_REV < (isa)) && __ase(ase))
+#define __isa_lt_and_opt(isa, opt)     ((MIPS_ISA_REV < (isa)) && __opt(opt))
+
 /*
  * SMP assumption: Options of CPU 0 are a superset of all processors.
  * This is true for all known MIPS systems.
  */
 #ifndef cpu_has_tlb
-#define cpu_has_tlb            (cpu_data[0].options & MIPS_CPU_TLB)
+#define cpu_has_tlb            __opt(MIPS_CPU_TLB)
 #endif
 #ifndef cpu_has_ftlb
-#define cpu_has_ftlb           (cpu_data[0].options & MIPS_CPU_FTLB)
+#define cpu_has_ftlb           __opt(MIPS_CPU_FTLB)
 #endif
 #ifndef cpu_has_tlbinv
-#define cpu_has_tlbinv         (cpu_data[0].options & MIPS_CPU_TLBINV)
+#define cpu_has_tlbinv         __opt(MIPS_CPU_TLBINV)
 #endif
 #ifndef cpu_has_segments
-#define cpu_has_segments       (cpu_data[0].options & MIPS_CPU_SEGMENTS)
+#define cpu_has_segments       __opt(MIPS_CPU_SEGMENTS)
 #endif
 #ifndef cpu_has_eva
-#define cpu_has_eva            (cpu_data[0].options & MIPS_CPU_EVA)
+#define cpu_has_eva            __opt(MIPS_CPU_EVA)
 #endif
 #ifndef cpu_has_htw
-#define cpu_has_htw            (cpu_data[0].options & MIPS_CPU_HTW)
+#define cpu_has_htw            __opt(MIPS_CPU_HTW)
 #endif
 #ifndef cpu_has_ldpte
-#define cpu_has_ldpte          (cpu_data[0].options & MIPS_CPU_LDPTE)
+#define cpu_has_ldpte          __opt(MIPS_CPU_LDPTE)
 #endif
 #ifndef cpu_has_rixiex
-#define cpu_has_rixiex         (cpu_data[0].options & MIPS_CPU_RIXIEX)
+#define cpu_has_rixiex         __isa_ge_or_opt(6, MIPS_CPU_RIXIEX)
 #endif
 #ifndef cpu_has_maar
-#define cpu_has_maar           (cpu_data[0].options & MIPS_CPU_MAAR)
+#define cpu_has_maar           __opt(MIPS_CPU_MAAR)
 #endif
 #ifndef cpu_has_rw_llb
-#define cpu_has_rw_llb         (cpu_data[0].options & MIPS_CPU_RW_LLB)
+#define cpu_has_rw_llb         __isa_ge_or_opt(6, MIPS_CPU_RW_LLB)
 #endif
 
 /*
 #define cpu_has_3kex           (!cpu_has_4kex)
 #endif
 #ifndef cpu_has_4kex
-#define cpu_has_4kex           (cpu_data[0].options & MIPS_CPU_4KEX)
+#define cpu_has_4kex           __isa_ge_or_opt(1, MIPS_CPU_4KEX)
 #endif
 #ifndef cpu_has_3k_cache
-#define cpu_has_3k_cache       (cpu_data[0].options & MIPS_CPU_3K_CACHE)
+#define cpu_has_3k_cache       __isa_lt_and_opt(1, MIPS_CPU_3K_CACHE)
 #endif
 #define cpu_has_6k_cache       0
 #define cpu_has_8k_cache       0
 #ifndef cpu_has_4k_cache
-#define cpu_has_4k_cache       (cpu_data[0].options & MIPS_CPU_4K_CACHE)
+#define cpu_has_4k_cache       __isa_ge_or_opt(1, MIPS_CPU_4K_CACHE)
 #endif
 #ifndef cpu_has_tx39_cache
-#define cpu_has_tx39_cache     (cpu_data[0].options & MIPS_CPU_TX39_CACHE)
+#define cpu_has_tx39_cache     __opt(MIPS_CPU_TX39_CACHE)
 #endif
 #ifndef cpu_has_octeon_cache
 #define cpu_has_octeon_cache   0
 #define raw_cpu_has_fpu                cpu_has_fpu
 #endif
 #ifndef cpu_has_32fpr
-#define cpu_has_32fpr          (cpu_data[0].options & MIPS_CPU_32FPR)
+#define cpu_has_32fpr          __isa_ge_or_opt(1, MIPS_CPU_32FPR)
 #endif
 #ifndef cpu_has_counter
-#define cpu_has_counter                (cpu_data[0].options & MIPS_CPU_COUNTER)
+#define cpu_has_counter                __opt(MIPS_CPU_COUNTER)
 #endif
 #ifndef cpu_has_watch
-#define cpu_has_watch          (cpu_data[0].options & MIPS_CPU_WATCH)
+#define cpu_has_watch          __opt(MIPS_CPU_WATCH)
 #endif
 #ifndef cpu_has_divec
-#define cpu_has_divec          (cpu_data[0].options & MIPS_CPU_DIVEC)
+#define cpu_has_divec          __isa_ge_or_opt(1, MIPS_CPU_DIVEC)
 #endif
 #ifndef cpu_has_vce
-#define cpu_has_vce            (cpu_data[0].options & MIPS_CPU_VCE)
+#define cpu_has_vce            __opt(MIPS_CPU_VCE)
 #endif
 #ifndef cpu_has_cache_cdex_p
-#define cpu_has_cache_cdex_p   (cpu_data[0].options & MIPS_CPU_CACHE_CDEX_P)
+#define cpu_has_cache_cdex_p   __opt(MIPS_CPU_CACHE_CDEX_P)
 #endif
 #ifndef cpu_has_cache_cdex_s
-#define cpu_has_cache_cdex_s   (cpu_data[0].options & MIPS_CPU_CACHE_CDEX_S)
+#define cpu_has_cache_cdex_s   __opt(MIPS_CPU_CACHE_CDEX_S)
 #endif
 #ifndef cpu_has_prefetch
-#define cpu_has_prefetch       (cpu_data[0].options & MIPS_CPU_PREFETCH)
+#define cpu_has_prefetch       __isa_ge_or_opt(1, MIPS_CPU_PREFETCH)
 #endif
 #ifndef cpu_has_mcheck
-#define cpu_has_mcheck         (cpu_data[0].options & MIPS_CPU_MCHECK)
+#define cpu_has_mcheck         __isa_ge_or_opt(1, MIPS_CPU_MCHECK)
 #endif
 #ifndef cpu_has_ejtag
-#define cpu_has_ejtag          (cpu_data[0].options & MIPS_CPU_EJTAG)
+#define cpu_has_ejtag          __opt(MIPS_CPU_EJTAG)
 #endif
 #ifndef cpu_has_llsc
-#define cpu_has_llsc           (cpu_data[0].options & MIPS_CPU_LLSC)
+#define cpu_has_llsc           __isa_ge_or_opt(1, MIPS_CPU_LLSC)
 #endif
 #ifndef cpu_has_bp_ghist
-#define cpu_has_bp_ghist       (cpu_data[0].options & MIPS_CPU_BP_GHIST)
+#define cpu_has_bp_ghist       __opt(MIPS_CPU_BP_GHIST)
 #endif
 #ifndef kernel_uses_llsc
 #define kernel_uses_llsc       cpu_has_llsc
 #endif
 #ifndef cpu_has_guestctl0ext
-#define cpu_has_guestctl0ext   (cpu_data[0].options & MIPS_CPU_GUESTCTL0EXT)
+#define cpu_has_guestctl0ext   __opt(MIPS_CPU_GUESTCTL0EXT)
 #endif
 #ifndef cpu_has_guestctl1
-#define cpu_has_guestctl1      (cpu_data[0].options & MIPS_CPU_GUESTCTL1)
+#define cpu_has_guestctl1      __opt(MIPS_CPU_GUESTCTL1)
 #endif
 #ifndef cpu_has_guestctl2
-#define cpu_has_guestctl2      (cpu_data[0].options & MIPS_CPU_GUESTCTL2)
+#define cpu_has_guestctl2      __opt(MIPS_CPU_GUESTCTL2)
 #endif
 #ifndef cpu_has_guestid
-#define cpu_has_guestid                (cpu_data[0].options & MIPS_CPU_GUESTID)
+#define cpu_has_guestid                __opt(MIPS_CPU_GUESTID)
 #endif
 #ifndef cpu_has_drg
-#define cpu_has_drg            (cpu_data[0].options & MIPS_CPU_DRG)
+#define cpu_has_drg            __opt(MIPS_CPU_DRG)
 #endif
 #ifndef cpu_has_mips16
-#define cpu_has_mips16         (cpu_data[0].ases & MIPS_ASE_MIPS16)
+#define cpu_has_mips16         __isa_lt_and_ase(6, MIPS_ASE_MIPS16)
 #endif
 #ifndef cpu_has_mips16e2
-#define cpu_has_mips16e2       (cpu_data[0].ases & MIPS_ASE_MIPS16E2)
+#define cpu_has_mips16e2       __isa_lt_and_ase(6, MIPS_ASE_MIPS16E2)
 #endif
 #ifndef cpu_has_mdmx
-#define cpu_has_mdmx           (cpu_data[0].ases & MIPS_ASE_MDMX)
+#define cpu_has_mdmx           __isa_lt_and_ase(6, MIPS_ASE_MDMX)
 #endif
 #ifndef cpu_has_mips3d
-#define cpu_has_mips3d         (cpu_data[0].ases & MIPS_ASE_MIPS3D)
+#define cpu_has_mips3d         __isa_lt_and_ase(6, MIPS_ASE_MIPS3D)
 #endif
 #ifndef cpu_has_smartmips
-#define cpu_has_smartmips      (cpu_data[0].ases & MIPS_ASE_SMARTMIPS)
+#define cpu_has_smartmips      __isa_lt_and_ase(6, MIPS_ASE_SMARTMIPS)
 #endif
 
 #ifndef cpu_has_rixi
-#define cpu_has_rixi           (cpu_data[0].options & MIPS_CPU_RIXI)
+#define cpu_has_rixi           __isa_ge_or_opt(6, MIPS_CPU_RIXI)
 #endif
 
 #ifndef cpu_has_mmips
 # ifdef CONFIG_SYS_SUPPORTS_MICROMIPS
-#  define cpu_has_mmips                (cpu_data[0].options & MIPS_CPU_MICROMIPS)
+#  define cpu_has_mmips                __opt(MIPS_CPU_MICROMIPS)
 # else
 #  define cpu_has_mmips                0
 # endif
 #endif
 
 #ifndef cpu_has_lpa
-#define cpu_has_lpa            (cpu_data[0].options & MIPS_CPU_LPA)
+#define cpu_has_lpa            __opt(MIPS_CPU_LPA)
 #endif
 #ifndef cpu_has_mvh
-#define cpu_has_mvh            (cpu_data[0].options & MIPS_CPU_MVH)
+#define cpu_has_mvh            __opt(MIPS_CPU_MVH)
 #endif
 #ifndef cpu_has_xpa
 #define cpu_has_xpa            (cpu_has_lpa && cpu_has_mvh)
 #endif
 
 #ifndef cpu_has_dsp
-#define cpu_has_dsp            (cpu_data[0].ases & MIPS_ASE_DSP)
+#define cpu_has_dsp            __ase(MIPS_ASE_DSP)
 #endif
 
 #ifndef cpu_has_dsp2
-#define cpu_has_dsp2           (cpu_data[0].ases & MIPS_ASE_DSP2P)
+#define cpu_has_dsp2           __ase(MIPS_ASE_DSP2P)
 #endif
 
 #ifndef cpu_has_dsp3
-#define cpu_has_dsp3           (cpu_data[0].ases & MIPS_ASE_DSP3)
+#define cpu_has_dsp3           __ase(MIPS_ASE_DSP3)
 #endif
 
 #ifndef cpu_has_mipsmt
-#define cpu_has_mipsmt         (cpu_data[0].ases & MIPS_ASE_MIPSMT)
+#define cpu_has_mipsmt         __isa_lt_and_ase(6, MIPS_ASE_MIPSMT)
 #endif
 
 #ifndef cpu_has_vp
-#define cpu_has_vp             (cpu_data[0].options & MIPS_CPU_VP)
+#define cpu_has_vp             __isa_ge_and_opt(6, MIPS_CPU_VP)
 #endif
 
 #ifndef cpu_has_userlocal
-#define cpu_has_userlocal      (cpu_data[0].options & MIPS_CPU_ULRI)
+#define cpu_has_userlocal      __isa_ge_or_opt(6, MIPS_CPU_ULRI)
 #endif
 
 #ifdef CONFIG_32BIT
 # ifndef cpu_has_nofpuex
-# define cpu_has_nofpuex       (cpu_data[0].options & MIPS_CPU_NOFPUEX)
+# define cpu_has_nofpuex       __isa_lt_and_opt(1, MIPS_CPU_NOFPUEX)
 # endif
 # ifndef cpu_has_64bits
 # define cpu_has_64bits                (cpu_data[0].isa_level & MIPS_CPU_ISA_64BIT)
 #endif
 
 #if defined(CONFIG_CPU_MIPSR2_IRQ_VI) && !defined(cpu_has_vint)
-# define cpu_has_vint          (cpu_data[0].options & MIPS_CPU_VINT)
+# define cpu_has_vint          __opt(MIPS_CPU_VINT)
 #elif !defined(cpu_has_vint)
 # define cpu_has_vint                  0
 #endif
 
 #if defined(CONFIG_CPU_MIPSR2_IRQ_EI) && !defined(cpu_has_veic)
-# define cpu_has_veic          (cpu_data[0].options & MIPS_CPU_VEIC)
+# define cpu_has_veic          __opt(MIPS_CPU_VEIC)
 #elif !defined(cpu_has_veic)
 # define cpu_has_veic                  0
 #endif
 
 #ifndef cpu_has_inclusive_pcaches
-#define cpu_has_inclusive_pcaches      (cpu_data[0].options & MIPS_CPU_INCLUSIVE_CACHES)
+#define cpu_has_inclusive_pcaches      __opt(MIPS_CPU_INCLUSIVE_CACHES)
 #endif
 
 #ifndef cpu_dcache_line_size
 #endif
 
 #ifndef cpu_has_perf_cntr_intr_bit
-#define cpu_has_perf_cntr_intr_bit     (cpu_data[0].options & MIPS_CPU_PCI)
+#define cpu_has_perf_cntr_intr_bit     __opt(MIPS_CPU_PCI)
 #endif
 
 #ifndef cpu_has_vz
-#define cpu_has_vz             (cpu_data[0].ases & MIPS_ASE_VZ)
+#define cpu_has_vz             __ase(MIPS_ASE_VZ)
 #endif
 
 #if defined(CONFIG_CPU_HAS_MSA) && !defined(cpu_has_msa)
-# define cpu_has_msa           (cpu_data[0].ases & MIPS_ASE_MSA)
+# define cpu_has_msa           __ase(MIPS_ASE_MSA)
 #elif !defined(cpu_has_msa)
 # define cpu_has_msa           0
 #endif
 
 #ifndef cpu_has_ufr
-# define cpu_has_ufr           (cpu_data[0].options & MIPS_CPU_UFR)
+# define cpu_has_ufr           __opt(MIPS_CPU_UFR)
 #endif
 
 #ifndef cpu_has_fre
-# define cpu_has_fre           (cpu_data[0].options & MIPS_CPU_FRE)
+# define cpu_has_fre           __opt(MIPS_CPU_FRE)
 #endif
 
 #ifndef cpu_has_cdmm
-# define cpu_has_cdmm          (cpu_data[0].options & MIPS_CPU_CDMM)
+# define cpu_has_cdmm          __opt(MIPS_CPU_CDMM)
 #endif
 
 #ifndef cpu_has_small_pages
-# define cpu_has_small_pages   (cpu_data[0].options & MIPS_CPU_SP)
+# define cpu_has_small_pages   __opt(MIPS_CPU_SP)
 #endif
 
 #ifndef cpu_has_nan_legacy
-#define cpu_has_nan_legacy     (cpu_data[0].options & MIPS_CPU_NAN_LEGACY)
+#define cpu_has_nan_legacy     __isa_lt_and_opt(6, MIPS_CPU_NAN_LEGACY)
 #endif
 #ifndef cpu_has_nan_2008
-#define cpu_has_nan_2008       (cpu_data[0].options & MIPS_CPU_NAN_2008)
+#define cpu_has_nan_2008       __isa_ge_or_opt(6, MIPS_CPU_NAN_2008)
 #endif
 
 #ifndef cpu_has_ebase_wg
-# define cpu_has_ebase_wg      (cpu_data[0].options & MIPS_CPU_EBASE_WG)
+# define cpu_has_ebase_wg      __opt(MIPS_CPU_EBASE_WG)
 #endif
 
 #ifndef cpu_has_badinstr
-# define cpu_has_badinstr      (cpu_data[0].options & MIPS_CPU_BADINSTR)
+# define cpu_has_badinstr      __isa_ge_or_opt(6, MIPS_CPU_BADINSTR)
 #endif
 
 #ifndef cpu_has_badinstrp
-# define cpu_has_badinstrp     (cpu_data[0].options & MIPS_CPU_BADINSTRP)
+# define cpu_has_badinstrp     __isa_ge_or_opt(6, MIPS_CPU_BADINSTRP)
 #endif
 
 #ifndef cpu_has_contextconfig
-# define cpu_has_contextconfig (cpu_data[0].options & MIPS_CPU_CTXTC)
+# define cpu_has_contextconfig __opt(MIPS_CPU_CTXTC)
 #endif
 
 #ifndef cpu_has_perf
-# define cpu_has_perf          (cpu_data[0].options & MIPS_CPU_PERF)
+# define cpu_has_perf          __opt(MIPS_CPU_PERF)
 #endif
 
-#if defined(CONFIG_SMP) && (MIPS_ISA_REV >= 6)
+#ifdef CONFIG_SMP
 /*
  * Some systems share FTLB RAMs between threads within a core (siblings in
  * kernel parlance). This means that FTLB entries may become invalid at almost
  */
 # ifndef cpu_has_shared_ftlb_ram
 #  define cpu_has_shared_ftlb_ram \
-       (current_cpu_data.options & MIPS_CPU_SHARED_FTLB_RAM)
+       __isa_ge_and_opt(6, MIPS_CPU_SHARED_FTLB_RAM)
 # endif
 
 /*
  */
 # ifndef cpu_has_shared_ftlb_entries
 #  define cpu_has_shared_ftlb_entries \
-       (current_cpu_data.options & MIPS_CPU_SHARED_FTLB_ENTRIES)
+       __isa_ge_and_opt(6, MIPS_CPU_SHARED_FTLB_ENTRIES)
 # endif
-#endif /* SMP && MIPS_ISA_REV >= 6 */
+#endif /* SMP */
 
 #ifndef cpu_has_shared_ftlb_ram
 # define cpu_has_shared_ftlb_ram 0
 
 #ifdef CONFIG_MIPS_MT_SMP
 # define cpu_has_mipsmt_pertccounters \
-       (cpu_data[0].options & MIPS_CPU_MT_PER_TC_PERF_COUNTERS)
+       __isa_lt_and_opt(6, MIPS_CPU_MT_PER_TC_PERF_COUNTERS)
 #else
 # define cpu_has_mipsmt_pertccounters 0
 #endif /* CONFIG_MIPS_MT_SMP */
index 5b9d02ef4f60375f9bf4370e12cd2e566890159b..dacbdb84516a09134896edd704d129b4f5f516f7 100644 (file)
  * Definitions for 7:0 on legacy processors
  */
 
-#define PRID_REV_TX4927                0x0022
-#define PRID_REV_TX4937                0x0030
-#define PRID_REV_R4400         0x0040
-#define PRID_REV_R3000A                0x0030
-#define PRID_REV_R3000         0x0020
-#define PRID_REV_R2000A                0x0010
-#define PRID_REV_TX3912                0x0010
-#define PRID_REV_TX3922                0x0030
-#define PRID_REV_TX3927                0x0040
-#define PRID_REV_VR4111                0x0050
-#define PRID_REV_VR4181                0x0050  /* Same as VR4111 */
-#define PRID_REV_VR4121                0x0060
-#define PRID_REV_VR4122                0x0070
-#define PRID_REV_VR4181A       0x0070  /* Same as VR4122 */
-#define PRID_REV_VR4130                0x0080
-#define PRID_REV_34K_V1_0_2    0x0022
-#define PRID_REV_LOONGSON1B    0x0020
-#define PRID_REV_LOONGSON1C    0x0020  /* Same as Loongson-1B */
-#define PRID_REV_LOONGSON2E    0x0002
-#define PRID_REV_LOONGSON2F    0x0003
-#define PRID_REV_LOONGSON3A_R1 0x0005
-#define PRID_REV_LOONGSON3B_R1 0x0006
-#define PRID_REV_LOONGSON3B_R2 0x0007
-#define PRID_REV_LOONGSON3A_R2 0x0008
-#define PRID_REV_LOONGSON3A_R3 0x0009
+#define PRID_REV_TX4927                        0x0022
+#define PRID_REV_TX4937                        0x0030
+#define PRID_REV_R4400                 0x0040
+#define PRID_REV_R3000A                        0x0030
+#define PRID_REV_R3000                 0x0020
+#define PRID_REV_R2000A                        0x0010
+#define PRID_REV_TX3912                        0x0010
+#define PRID_REV_TX3922                        0x0030
+#define PRID_REV_TX3927                        0x0040
+#define PRID_REV_VR4111                        0x0050
+#define PRID_REV_VR4181                        0x0050  /* Same as VR4111 */
+#define PRID_REV_VR4121                        0x0060
+#define PRID_REV_VR4122                        0x0070
+#define PRID_REV_VR4181A               0x0070  /* Same as VR4122 */
+#define PRID_REV_VR4130                        0x0080
+#define PRID_REV_34K_V1_0_2            0x0022
+#define PRID_REV_LOONGSON1B            0x0020
+#define PRID_REV_LOONGSON1C            0x0020  /* Same as Loongson-1B */
+#define PRID_REV_LOONGSON2E            0x0002
+#define PRID_REV_LOONGSON2F            0x0003
+#define PRID_REV_LOONGSON3A_R1         0x0005
+#define PRID_REV_LOONGSON3B_R1         0x0006
+#define PRID_REV_LOONGSON3B_R2         0x0007
+#define PRID_REV_LOONGSON3A_R2         0x0008
+#define PRID_REV_LOONGSON3A_R3_0       0x0009
+#define PRID_REV_LOONGSON3A_R3_1       0x000d
 
 /*
  * Older processors used to encode processor version and revision in two
index 72d0eab02afcbbfc33bd6ee653389a30887c1f3a..8eda48748ed59cc9d2e57bc19acb41ebacb853ae 100644 (file)
@@ -21,10 +21,10 @@ enum coherent_io_user_state {
 extern enum coherent_io_user_state coherentio;
 extern int hw_coherentio;
 #else
-#ifdef CONFIG_DMA_COHERENT
-#define coherentio     IO_COHERENCE_ENABLED
-#else
+#ifdef CONFIG_DMA_NONCOHERENT
 #define coherentio     IO_COHERENCE_DISABLED
+#else
+#define coherentio     IO_COHERENCE_ENABLED
 #endif
 #define hw_coherentio  0
 #endif /* CONFIG_DMA_MAYBE_COHERENT */
index f32f15530aba4c2b0bce3b5feb2b0a91b0fadabb..b5c240806e1bb72b4b1b35d2d8ed13ac3e562f64 100644 (file)
@@ -1 +1,16 @@
-#include <asm/dma-coherence.h>
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _MIPS_DMA_DIRECT_H
+#define _MIPS_DMA_DIRECT_H 1
+
+static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
+{
+       if (!dev->dma_mask)
+               return false;
+
+       return addr + size - 1 <= *dev->dma_mask;
+}
+
+dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr);
+phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t daddr);
+
+#endif /* _MIPS_DMA_DIRECT_H */
index 886e75a383f27c35d2bc85e9a42ad03106d1d3f8..e81c4e97ff1a28f47ef337069b22518bdce933ca 100644 (file)
@@ -2,19 +2,21 @@
 #ifndef _ASM_DMA_MAPPING_H
 #define _ASM_DMA_MAPPING_H
 
-#include <linux/scatterlist.h>
-#include <asm/dma-coherence.h>
-#include <asm/cache.h>
+#include <linux/swiotlb.h>
 
-#ifndef CONFIG_SGI_IP27 /* Kludge to fix 2.6.39 build for IP27 */
-#include <dma-coherence.h>
-#endif
-
-extern const struct dma_map_ops *mips_dma_map_ops;
+extern const struct dma_map_ops jazz_dma_ops;
 
 static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
 {
-       return mips_dma_map_ops;
+#if defined(CONFIG_MACH_JAZZ)
+       return &jazz_dma_ops;
+#elif defined(CONFIG_SWIOTLB)
+       return &swiotlb_dma_ops;
+#elif defined(CONFIG_DMA_NONCOHERENT_OPS)
+       return &dma_noncoherent_ops;
+#else
+       return &dma_direct_ops;
+#endif
 }
 
 #define arch_setup_dma_ops arch_setup_dma_ops
index a7d0b836f2f7dd9c8bf7897759aed6b9f59ade39..54c730aed32718e828d204b73e9747a18db87713 100644 (file)
@@ -12,6 +12,8 @@
 #ifndef _ASM_IO_H
 #define _ASM_IO_H
 
+#define ARCH_HAS_IOREMAP_WC
+
 #include <linux/compiler.h>
 #include <linux/kernel.h>
 #include <linux/types.h>
@@ -141,14 +143,14 @@ static inline void * phys_to_virt(unsigned long address)
 /*
  * ISA I/O bus memory addresses are 1:1 with the physical address.
  */
-static inline unsigned long isa_virt_to_bus(volatile void * address)
+static inline unsigned long isa_virt_to_bus(volatile void *address)
 {
-       return (unsigned long)address - PAGE_OFFSET;
+       return virt_to_phys(address);
 }
 
-static inline void * isa_bus_to_virt(unsigned long address)
+static inline void *isa_bus_to_virt(unsigned long address)
 {
-       return (void *)(address + PAGE_OFFSET);
+       return phys_to_virt(address);
 }
 
 #define isa_page_to_bus page_to_phys
@@ -278,15 +280,25 @@ static inline void __iomem * __ioremap_mode(phys_addr_t offset, unsigned long si
 #define ioremap_cache ioremap_cachable
 
 /*
- * These two are MIPS specific ioremap variant.         ioremap_cacheable_cow
- * requests a cachable mapping, ioremap_uncached_accelerated requests a
- * mapping using the uncached accelerated mode which isn't supported on
- * all processors.
+ * ioremap_wc     -   map bus memory into CPU space
+ * @offset:    bus address of the memory
+ * @size:      size of the resource to map
+ *
+ * ioremap_wc performs a platform specific sequence of operations to
+ * make bus memory CPU accessible via the readb/readw/readl/writeb/
+ * writew/writel functions and the other mmio helpers. The returned
+ * address is not guaranteed to be usable directly as a virtual
+ * address.
+ *
+ * This version of ioremap ensures that the memory is marked uncachable
+ * but accelerated by means of write-combining feature. It is specifically
+ * useful for PCIe prefetchable windows, which may vastly improve a
+ * communications performance. If it was determined on boot stage, what
+ * CPU CCA doesn't support UCA, the method shall fall-back to the
+ * _CACHE_UNCACHED option (see cpu_probe() method).
  */
-#define ioremap_cacheable_cow(offset, size)                            \
-       __ioremap_mode((offset), (size), _CACHE_CACHABLE_COW)
-#define ioremap_uncached_accelerated(offset, size)                     \
-       __ioremap_mode((offset), (size), _CACHE_UNCACHED_ACCELERATED)
+#define ioremap_wc(offset, size)                                       \
+       __ioremap_mode((offset), (size), boot_cpu_data.writecombine)
 
 static inline void iounmap(const volatile void __iomem *addr)
 {
@@ -414,6 +426,8 @@ static inline type pfx##in##bwlq##p(unsigned long port)                     \
        __val = *__addr;                                                \
        slow;                                                           \
                                                                        \
+       /* prevent prefetching of coherent DMA data prematurely */      \
+       rmb();                                                          \
        return pfx##ioswab##bwlq(__addr, __val);                        \
 }
 
@@ -588,7 +602,7 @@ static inline void memcpy_toio(volatile void __iomem *dst, const void *src, int
  *
  * This API used to be exported; it now is for arch code internal use only.
  */
-#if defined(CONFIG_DMA_NONCOHERENT) || defined(CONFIG_DMA_MAYBE_COHERENT)
+#ifdef CONFIG_DMA_NONCOHERENT
 
 extern void (*_dma_cache_wback_inv)(unsigned long start, unsigned long size);
 extern void (*_dma_cache_wback)(unsigned long start, unsigned long size);
@@ -607,7 +621,7 @@ extern void (*_dma_cache_inv)(unsigned long start, unsigned long size);
 #define dma_cache_inv(start,size)      \
        do { (void) (start); (void) (size); } while (0)
 
-#endif /* CONFIG_DMA_NONCOHERENT || CONFIG_DMA_MAYBE_COHERENT */
+#endif /* CONFIG_DMA_NONCOHERENT */
 
 /*
  * Read a 32-bit register that requires a 64-bit read cycle on the bus.
index ad1a99948f2795c0608c32e3dc213c46e171d162..a72dfbf1babb10da565d425330fbcf515344d526 100644 (file)
@@ -68,16 +68,6 @@ struct prev_kprobe {
        unsigned long saved_epc;
 };
 
-#define MAX_JPROBES_STACK_SIZE 128
-#define MAX_JPROBES_STACK_ADDR \
-       (((unsigned long)current_thread_info()) + THREAD_SIZE - 32 - sizeof(struct pt_regs))
-
-#define MIN_JPROBES_STACK_SIZE(ADDR)                                   \
-       ((((ADDR) + MAX_JPROBES_STACK_SIZE) > MAX_JPROBES_STACK_ADDR)   \
-               ? MAX_JPROBES_STACK_ADDR - (ADDR)                       \
-               : MAX_JPROBES_STACK_SIZE)
-
-
 #define SKIP_DELAYSLOT 0x0001
 
 /* per-cpu kprobe control block */
@@ -86,12 +76,9 @@ struct kprobe_ctlblk {
        unsigned long kprobe_old_SR;
        unsigned long kprobe_saved_SR;
        unsigned long kprobe_saved_epc;
-       unsigned long jprobe_saved_sp;
-       struct pt_regs jprobe_saved_regs;
        /* Per-thread fields, used while emulating branches */
        unsigned long flags;
        unsigned long target_epc;
-       u8 jprobes_stack[MAX_JPROBES_STACK_SIZE];
        struct prev_kprobe prev_kprobe;
 };
 
index 660ab64c0fc9936030c3cfc399016a1397edf26e..a004d94dfbddcb306f5a03b3b2c3926abe99a840 100644 (file)
@@ -17,9 +17,6 @@
 #define PAGE_OFFSET    _AC(0x94000000, UL)
 #define PHYS_OFFSET    _AC(0x14000000, UL)
 
-#define UNCAC_BASE     _AC(0xb4000000, UL)     /* 0xa0000000 + PHYS_OFFSET */
-#define IO_BASE                UNCAC_BASE
-
 #include <asm/mach-generic/spaces.h>
 
 #endif /* __ASM_AR7_SPACES_H */
diff --git a/arch/mips/include/asm/mach-ath25/dma-coherence.h b/arch/mips/include/asm/mach-ath25/dma-coherence.h
deleted file mode 100644 (file)
index d5defdd..0000000
+++ /dev/null
@@ -1,76 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2006  Ralf Baechle <ralf@linux-mips.org>
- * Copyright (C) 2007  Felix Fietkau <nbd@openwrt.org>
- *
- */
-#ifndef __ASM_MACH_ATH25_DMA_COHERENCE_H
-#define __ASM_MACH_ATH25_DMA_COHERENCE_H
-
-#include <linux/device.h>
-
-/*
- * We need some arbitrary non-zero value to be programmed to the BAR1 register
- * of PCI host controller to enable DMA. The same value should be used as the
- * offset to calculate the physical address of DMA buffer for PCI devices.
- */
-#define AR2315_PCI_HOST_SDRAM_BASEADDR 0x20000000
-
-static inline dma_addr_t ath25_dev_offset(struct device *dev)
-{
-#ifdef CONFIG_PCI
-       extern struct bus_type pci_bus_type;
-
-       if (dev && dev->bus == &pci_bus_type)
-               return AR2315_PCI_HOST_SDRAM_BASEADDR;
-#endif
-       return 0;
-}
-
-static inline dma_addr_t
-plat_map_dma_mem(struct device *dev, void *addr, size_t size)
-{
-       return virt_to_phys(addr) + ath25_dev_offset(dev);
-}
-
-static inline dma_addr_t
-plat_map_dma_mem_page(struct device *dev, struct page *page)
-{
-       return page_to_phys(page) + ath25_dev_offset(dev);
-}
-
-static inline unsigned long
-plat_dma_addr_to_phys(struct device *dev, dma_addr_t dma_addr)
-{
-       return dma_addr - ath25_dev_offset(dev);
-}
-
-static inline void
-plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr, size_t size,
-                  enum dma_data_direction direction)
-{
-}
-
-static inline int plat_dma_supported(struct device *dev, u64 mask)
-{
-       return 1;
-}
-
-static inline int plat_device_is_coherent(struct device *dev)
-{
-#ifdef CONFIG_DMA_COHERENT
-       return 1;
-#endif
-#ifdef CONFIG_DMA_NONCOHERENT
-       return 0;
-#endif
-}
-
-static inline void plat_post_dma_flush(struct device *dev)
-{
-}
-
-#endif /* __ASM_MACH_ATH25_DMA_COHERENCE_H */
index d99ca862dae32babbe68d039572a8e0f455ef5f0..284b4fa23e0399b9c8a8833633b5b75c5bbfbb30 100644 (file)
 #include <linux/bitops.h>
 
 #define AR71XX_APB_BASE                0x18000000
+#define AR71XX_GE0_BASE                0x19000000
+#define AR71XX_GE0_SIZE                0x10000
+#define AR71XX_GE1_BASE                0x1a000000
+#define AR71XX_GE1_SIZE                0x10000
 #define AR71XX_EHCI_BASE       0x1b000000
 #define AR71XX_EHCI_SIZE       0x1000
 #define AR71XX_OHCI_BASE       0x1c000000
@@ -39,6 +43,8 @@
 #define AR71XX_PLL_SIZE                0x100
 #define AR71XX_RESET_BASE      (AR71XX_APB_BASE + 0x00060000)
 #define AR71XX_RESET_SIZE      0x100
+#define AR71XX_MII_BASE                (AR71XX_APB_BASE + 0x00070000)
+#define AR71XX_MII_SIZE                0x100
 
 #define AR71XX_PCI_MEM_BASE    0x10000000
 #define AR71XX_PCI_MEM_SIZE    0x07000000
 
 #define AR933X_UART_BASE       (AR71XX_APB_BASE + 0x00020000)
 #define AR933X_UART_SIZE       0x14
+#define AR933X_GMAC_BASE       (AR71XX_APB_BASE + 0x00070000)
+#define AR933X_GMAC_SIZE       0x04
 #define AR933X_WMAC_BASE       (AR71XX_APB_BASE + 0x00100000)
 #define AR933X_WMAC_SIZE       0x20000
 #define AR933X_EHCI_BASE       0x1b000000
 #define AR933X_EHCI_SIZE       0x1000
 
+#define AR934X_GMAC_BASE       (AR71XX_APB_BASE + 0x00070000)
+#define AR934X_GMAC_SIZE       0x14
 #define AR934X_WMAC_BASE       (AR71XX_APB_BASE + 0x00100000)
 #define AR934X_WMAC_SIZE       0x20000
 #define AR934X_EHCI_BASE       0x1b000000
 #define AR934X_EHCI_SIZE       0x200
+#define AR934X_NFC_BASE                0x1b000200
+#define AR934X_NFC_SIZE                0xb8
 #define AR934X_SRIF_BASE       (AR71XX_APB_BASE + 0x00116000)
 #define AR934X_SRIF_SIZE       0x1000
 
+#define QCA953X_GMAC_BASE      (AR71XX_APB_BASE + 0x00070000)
+#define QCA953X_GMAC_SIZE      0x14
+#define QCA953X_WMAC_BASE      (AR71XX_APB_BASE + 0x00100000)
+#define QCA953X_WMAC_SIZE      0x20000
+#define QCA953X_EHCI_BASE      0x1b000000
+#define QCA953X_EHCI_SIZE      0x200
+#define QCA953X_SRIF_BASE      (AR71XX_APB_BASE + 0x00116000)
+#define QCA953X_SRIF_SIZE      0x1000
+
+#define QCA953X_PCI_CFG_BASE0  0x14000000
+#define QCA953X_PCI_CTRL_BASE0 (AR71XX_APB_BASE + 0x000f0000)
+#define QCA953X_PCI_CRP_BASE0  (AR71XX_APB_BASE + 0x000c0000)
+#define QCA953X_PCI_MEM_BASE0  0x10000000
+#define QCA953X_PCI_MEM_SIZE   0x02000000
+
 #define QCA955X_PCI_MEM_BASE0  0x10000000
 #define QCA955X_PCI_MEM_BASE1  0x12000000
 #define QCA955X_PCI_MEM_SIZE   0x02000000
 #define QCA955X_PCI_CTRL_BASE1 (AR71XX_APB_BASE + 0x00280000)
 #define QCA955X_PCI_CTRL_SIZE  0x100
 
+#define QCA955X_GMAC_BASE      (AR71XX_APB_BASE + 0x00070000)
+#define QCA955X_GMAC_SIZE      0x40
 #define QCA955X_WMAC_BASE      (AR71XX_APB_BASE + 0x00100000)
 #define QCA955X_WMAC_SIZE      0x20000
 #define QCA955X_EHCI0_BASE     0x1b000000
 #define QCA955X_EHCI1_BASE     0x1b400000
 #define QCA955X_EHCI_SIZE      0x1000
+#define QCA955X_NFC_BASE       0x1b800200
+#define QCA955X_NFC_SIZE       0xb8
+
+#define QCA956X_PCI_MEM_BASE1  0x12000000
+#define QCA956X_PCI_MEM_SIZE   0x02000000
+#define QCA956X_PCI_CFG_BASE1  0x16000000
+#define QCA956X_PCI_CFG_SIZE   0x1000
+#define QCA956X_PCI_CRP_BASE1  (AR71XX_APB_BASE + 0x00250000)
+#define QCA956X_PCI_CRP_SIZE   0x1000
+#define QCA956X_PCI_CTRL_BASE1 (AR71XX_APB_BASE + 0x00280000)
+#define QCA956X_PCI_CTRL_SIZE  0x100
+
+#define QCA956X_WMAC_BASE      (AR71XX_APB_BASE + 0x00100000)
+#define QCA956X_WMAC_SIZE      0x20000
+#define QCA956X_EHCI0_BASE     0x1b000000
+#define QCA956X_EHCI1_BASE     0x1b400000
+#define QCA956X_EHCI_SIZE      0x200
+#define QCA956X_GMAC_SGMII_BASE        (AR71XX_APB_BASE + 0x00070000)
+#define QCA956X_GMAC_SGMII_SIZE        0x64
+#define QCA956X_PLL_BASE       (AR71XX_APB_BASE + 0x00050000)
+#define QCA956X_PLL_SIZE       0x50
+#define QCA956X_GMAC_BASE      (AR71XX_APB_BASE + 0x00070000)
+#define QCA956X_GMAC_SIZE      0x64
+
+/*
+ * Hidden Registers
+ */
+#define QCA956X_MAC_CFG_BASE           0xb9000000
+#define QCA956X_MAC_CFG_SIZE           0x64
+
+#define QCA956X_MAC_CFG1_REG           0x00
+#define QCA956X_MAC_CFG1_SOFT_RST      BIT(31)
+#define QCA956X_MAC_CFG1_RX_RST                BIT(19)
+#define QCA956X_MAC_CFG1_TX_RST                BIT(18)
+#define QCA956X_MAC_CFG1_LOOPBACK      BIT(8)
+#define QCA956X_MAC_CFG1_RX_EN         BIT(2)
+#define QCA956X_MAC_CFG1_TX_EN         BIT(0)
+
+#define QCA956X_MAC_CFG2_REG           0x04
+#define QCA956X_MAC_CFG2_IF_1000       BIT(9)
+#define QCA956X_MAC_CFG2_IF_10_100     BIT(8)
+#define QCA956X_MAC_CFG2_HUGE_FRAME_EN BIT(5)
+#define QCA956X_MAC_CFG2_LEN_CHECK     BIT(4)
+#define QCA956X_MAC_CFG2_PAD_CRC_EN    BIT(2)
+#define QCA956X_MAC_CFG2_FDX           BIT(0)
+
+#define QCA956X_MAC_MII_MGMT_CFG_REG   0x20
+#define QCA956X_MGMT_CFG_CLK_DIV_20    0x07
+
+#define QCA956X_MAC_FIFO_CFG0_REG      0x48
+#define QCA956X_MAC_FIFO_CFG1_REG      0x4c
+#define QCA956X_MAC_FIFO_CFG2_REG      0x50
+#define QCA956X_MAC_FIFO_CFG3_REG      0x54
+#define QCA956X_MAC_FIFO_CFG4_REG      0x58
+#define QCA956X_MAC_FIFO_CFG5_REG      0x5c
+
+#define QCA956X_DAM_RESET_OFFSET       0xb90001bc
+#define QCA956X_DAM_RESET_SIZE         0x4
+#define QCA956X_INLINE_CHKSUM_ENG      BIT(27)
 
 /*
  * DDR_CTRL block
 #define AR934X_DDR_REG_FLUSH_PCIE      0xa8
 #define AR934X_DDR_REG_FLUSH_WMAC      0xac
 
+#define QCA953X_DDR_REG_FLUSH_GE0      0x9c
+#define QCA953X_DDR_REG_FLUSH_GE1      0xa0
+#define QCA953X_DDR_REG_FLUSH_USB      0xa4
+#define QCA953X_DDR_REG_FLUSH_PCIE     0xa8
+#define QCA953X_DDR_REG_FLUSH_WMAC     0xac
+
 /*
  * PLL block
  */
 #define AR71XX_AHB_DIV_SHIFT           20
 #define AR71XX_AHB_DIV_MASK            0x7
 
+#define AR71XX_ETH0_PLL_SHIFT          17
+#define AR71XX_ETH1_PLL_SHIFT          19
+
 #define AR724X_PLL_REG_CPU_CONFIG      0x00
 #define AR724X_PLL_REG_PCIE_CONFIG     0x10
 
+#define AR724X_PLL_REG_PCIE_CONFIG_PPL_BYPASS  BIT(16)
+#define AR724X_PLL_REG_PCIE_CONFIG_PPL_RESET   BIT(25)
+
 #define AR724X_PLL_FB_SHIFT            0
 #define AR724X_PLL_FB_MASK             0x3ff
 #define AR724X_PLL_REF_DIV_SHIFT       10
 #define AR724X_DDR_DIV_SHIFT           22
 #define AR724X_DDR_DIV_MASK            0x3
 
+#define AR7242_PLL_REG_ETH0_INT_CLOCK  0x2c
+
 #define AR913X_PLL_REG_CPU_CONFIG      0x00
 #define AR913X_PLL_REG_ETH_CONFIG      0x04
 #define AR913X_PLL_REG_ETH0_INT_CLOCK  0x14
 #define AR913X_AHB_DIV_SHIFT           19
 #define AR913X_AHB_DIV_MASK            0x1
 
+#define AR913X_ETH0_PLL_SHIFT          20
+#define AR913X_ETH1_PLL_SHIFT          22
+
 #define AR933X_PLL_CPU_CONFIG_REG      0x00
 #define AR933X_PLL_CLOCK_CTRL_REG      0x08
 
 #define AR934X_PLL_CPU_CONFIG_REG              0x00
 #define AR934X_PLL_DDR_CONFIG_REG              0x04
 #define AR934X_PLL_CPU_DDR_CLK_CTRL_REG                0x08
+#define AR934X_PLL_SWITCH_CLOCK_CONTROL_REG    0x24
+#define AR934X_PLL_ETH_XMII_CONTROL_REG                0x2c
 
 #define AR934X_PLL_CPU_CONFIG_NFRAC_SHIFT      0
 #define AR934X_PLL_CPU_CONFIG_NFRAC_MASK       0x3f
 #define AR934X_PLL_CPU_DDR_CLK_CTRL_DDRCLK_FROM_DDRPLL BIT(21)
 #define AR934X_PLL_CPU_DDR_CLK_CTRL_AHBCLK_FROM_DDRPLL BIT(24)
 
+#define AR934X_PLL_SWITCH_CLOCK_CONTROL_MDIO_CLK_SEL   BIT(6)
+
+#define QCA953X_PLL_CPU_CONFIG_REG             0x00
+#define QCA953X_PLL_DDR_CONFIG_REG             0x04
+#define QCA953X_PLL_CLK_CTRL_REG               0x08
+#define QCA953X_PLL_SWITCH_CLOCK_CONTROL_REG   0x24
+#define QCA953X_PLL_ETH_XMII_CONTROL_REG       0x2c
+#define QCA953X_PLL_ETH_SGMII_CONTROL_REG      0x48
+
+#define QCA953X_PLL_CPU_CONFIG_NFRAC_SHIFT     0
+#define QCA953X_PLL_CPU_CONFIG_NFRAC_MASK      0x3f
+#define QCA953X_PLL_CPU_CONFIG_NINT_SHIFT      6
+#define QCA953X_PLL_CPU_CONFIG_NINT_MASK       0x3f
+#define QCA953X_PLL_CPU_CONFIG_REFDIV_SHIFT    12
+#define QCA953X_PLL_CPU_CONFIG_REFDIV_MASK     0x1f
+#define QCA953X_PLL_CPU_CONFIG_OUTDIV_SHIFT    19
+#define QCA953X_PLL_CPU_CONFIG_OUTDIV_MASK     0x7
+
+#define QCA953X_PLL_DDR_CONFIG_NFRAC_SHIFT     0
+#define QCA953X_PLL_DDR_CONFIG_NFRAC_MASK      0x3ff
+#define QCA953X_PLL_DDR_CONFIG_NINT_SHIFT      10
+#define QCA953X_PLL_DDR_CONFIG_NINT_MASK       0x3f
+#define QCA953X_PLL_DDR_CONFIG_REFDIV_SHIFT    16
+#define QCA953X_PLL_DDR_CONFIG_REFDIV_MASK     0x1f
+#define QCA953X_PLL_DDR_CONFIG_OUTDIV_SHIFT    23
+#define QCA953X_PLL_DDR_CONFIG_OUTDIV_MASK     0x7
+
+#define QCA953X_PLL_CLK_CTRL_CPU_PLL_BYPASS            BIT(2)
+#define QCA953X_PLL_CLK_CTRL_DDR_PLL_BYPASS            BIT(3)
+#define QCA953X_PLL_CLK_CTRL_AHB_PLL_BYPASS            BIT(4)
+#define QCA953X_PLL_CLK_CTRL_CPU_POST_DIV_SHIFT                5
+#define QCA953X_PLL_CLK_CTRL_CPU_POST_DIV_MASK         0x1f
+#define QCA953X_PLL_CLK_CTRL_DDR_POST_DIV_SHIFT                10
+#define QCA953X_PLL_CLK_CTRL_DDR_POST_DIV_MASK         0x1f
+#define QCA953X_PLL_CLK_CTRL_AHB_POST_DIV_SHIFT                15
+#define QCA953X_PLL_CLK_CTRL_AHB_POST_DIV_MASK         0x1f
+#define QCA953X_PLL_CLK_CTRL_CPUCLK_FROM_CPUPLL                BIT(20)
+#define QCA953X_PLL_CLK_CTRL_DDRCLK_FROM_DDRPLL                BIT(21)
+#define QCA953X_PLL_CLK_CTRL_AHBCLK_FROM_DDRPLL                BIT(24)
+
 #define QCA955X_PLL_CPU_CONFIG_REG             0x00
 #define QCA955X_PLL_DDR_CONFIG_REG             0x04
 #define QCA955X_PLL_CLK_CTRL_REG               0x08
+#define QCA955X_PLL_ETH_XMII_CONTROL_REG       0x28
+#define QCA955X_PLL_ETH_SGMII_CONTROL_REG      0x48
+#define QCA955X_PLL_ETH_SGMII_SERDES_REG       0x4c
 
 #define QCA955X_PLL_CPU_CONFIG_NFRAC_SHIFT     0
 #define QCA955X_PLL_CPU_CONFIG_NFRAC_MASK      0x3f
 #define QCA955X_PLL_CLK_CTRL_DDRCLK_FROM_DDRPLL                BIT(21)
 #define QCA955X_PLL_CLK_CTRL_AHBCLK_FROM_DDRPLL                BIT(24)
 
+#define QCA955X_PLL_ETH_SGMII_SERDES_LOCK_DETECT       BIT(2)
+#define QCA955X_PLL_ETH_SGMII_SERDES_PLL_REFCLK                BIT(1)
+#define QCA955X_PLL_ETH_SGMII_SERDES_EN_PLL            BIT(0)
+
+#define QCA956X_PLL_CPU_CONFIG_REG                     0x00
+#define QCA956X_PLL_CPU_CONFIG1_REG                    0x04
+#define QCA956X_PLL_DDR_CONFIG_REG                     0x08
+#define QCA956X_PLL_DDR_CONFIG1_REG                    0x0c
+#define QCA956X_PLL_CLK_CTRL_REG                       0x10
+#define QCA956X_PLL_SWITCH_CLOCK_CONTROL_REG           0x28
+#define QCA956X_PLL_ETH_XMII_CONTROL_REG               0x30
+#define QCA956X_PLL_ETH_SGMII_SERDES_REG               0x4c
+
+#define QCA956X_PLL_CPU_CONFIG_REFDIV_SHIFT            12
+#define QCA956X_PLL_CPU_CONFIG_REFDIV_MASK             0x1f
+#define QCA956X_PLL_CPU_CONFIG_OUTDIV_SHIFT            19
+#define QCA956X_PLL_CPU_CONFIG_OUTDIV_MASK             0x7
+
+#define QCA956X_PLL_CPU_CONFIG1_NFRAC_L_SHIFT          0
+#define QCA956X_PLL_CPU_CONFIG1_NFRAC_L_MASK           0x1f
+#define QCA956X_PLL_CPU_CONFIG1_NFRAC_H_SHIFT          5
+#define QCA956X_PLL_CPU_CONFIG1_NFRAC_H_MASK           0x1fff
+#define QCA956X_PLL_CPU_CONFIG1_NINT_SHIFT             18
+#define QCA956X_PLL_CPU_CONFIG1_NINT_MASK              0x1ff
+
+#define QCA956X_PLL_DDR_CONFIG_REFDIV_SHIFT            16
+#define QCA956X_PLL_DDR_CONFIG_REFDIV_MASK             0x1f
+#define QCA956X_PLL_DDR_CONFIG_OUTDIV_SHIFT            23
+#define QCA956X_PLL_DDR_CONFIG_OUTDIV_MASK             0x7
+
+#define QCA956X_PLL_DDR_CONFIG1_NFRAC_L_SHIFT          0
+#define QCA956X_PLL_DDR_CONFIG1_NFRAC_L_MASK           0x1f
+#define QCA956X_PLL_DDR_CONFIG1_NFRAC_H_SHIFT          5
+#define QCA956X_PLL_DDR_CONFIG1_NFRAC_H_MASK           0x1fff
+#define QCA956X_PLL_DDR_CONFIG1_NINT_SHIFT             18
+#define QCA956X_PLL_DDR_CONFIG1_NINT_MASK              0x1ff
+
+#define QCA956X_PLL_CLK_CTRL_CPU_PLL_BYPASS            BIT(2)
+#define QCA956X_PLL_CLK_CTRL_DDR_PLL_BYPASS            BIT(3)
+#define QCA956X_PLL_CLK_CTRL_AHB_PLL_BYPASS            BIT(4)
+#define QCA956X_PLL_CLK_CTRL_CPU_POST_DIV_SHIFT                5
+#define QCA956X_PLL_CLK_CTRL_CPU_POST_DIV_MASK         0x1f
+#define QCA956X_PLL_CLK_CTRL_DDR_POST_DIV_SHIFT                10
+#define QCA956X_PLL_CLK_CTRL_DDR_POST_DIV_MASK         0x1f
+#define QCA956X_PLL_CLK_CTRL_AHB_POST_DIV_SHIFT                15
+#define QCA956X_PLL_CLK_CTRL_AHB_POST_DIV_MASK         0x1f
+#define QCA956X_PLL_CLK_CTRL_CPU_DDRCLK_FROM_DDRPLL    BIT(20)
+#define QCA956X_PLL_CLK_CTRL_CPU_DDRCLK_FROM_CPUPLL    BIT(21)
+#define QCA956X_PLL_CLK_CTRL_AHBCLK_FROM_DDRPLL                BIT(24)
+
+#define QCA956X_PLL_SWITCH_CLOCK_SPARE_I2C_CLK_SELB            BIT(5)
+#define QCA956X_PLL_SWITCH_CLOCK_SPARE_MDIO_CLK_SEL0_1         BIT(6)
+#define QCA956X_PLL_SWITCH_CLOCK_SPARE_UART1_CLK_SEL           BIT(7)
+#define QCA956X_PLL_SWITCH_CLOCK_SPARE_USB_REFCLK_FREQ_SEL_SHIFT 8
+#define QCA956X_PLL_SWITCH_CLOCK_SPARE_USB_REFCLK_FREQ_SEL_MASK         0xf
+#define QCA956X_PLL_SWITCH_CLOCK_SPARE_EN_PLL_TOP              BIT(12)
+#define QCA956X_PLL_SWITCH_CLOCK_SPARE_MDIO_CLK_SEL0_2         BIT(13)
+#define QCA956X_PLL_SWITCH_CLOCK_SPARE_MDIO_CLK_SEL1_1         BIT(14)
+#define QCA956X_PLL_SWITCH_CLOCK_SPARE_MDIO_CLK_SEL1_2         BIT(15)
+#define QCA956X_PLL_SWITCH_CLOCK_SPARE_SWITCH_FUNC_TST_MODE    BIT(16)
+#define QCA956X_PLL_SWITCH_CLOCK_SPARE_EEE_ENABLE              BIT(17)
+#define QCA956X_PLL_SWITCH_CLOCK_SPARE_OEN_CLK125M_PLL         BIT(18)
+#define QCA956X_PLL_SWITCH_CLOCK_SPARE_SWITCHCLK_SEL           BIT(19)
+
+#define QCA956X_PLL_ETH_XMII_TX_INVERT                 BIT(1)
+#define QCA956X_PLL_ETH_XMII_GIGE                      BIT(25)
+#define QCA956X_PLL_ETH_XMII_RX_DELAY_SHIFT            28
+#define QCA956X_PLL_ETH_XMII_RX_DELAY_MASK             0x3
+#define QCA956X_PLL_ETH_XMII_TX_DELAY_SHIFT            26
+#define QCA956X_PLL_ETH_XMII_TX_DELAY_MASK             3
+
+#define QCA956X_PLL_ETH_SGMII_SERDES_LOCK_DETECT               BIT(2)
+#define QCA956X_PLL_ETH_SGMII_SERDES_PLL_REFCLK                        BIT(1)
+#define QCA956X_PLL_ETH_SGMII_SERDES_EN_PLL                    BIT(0)
+
 /*
  * USB_CONFIG block
  */
 #define AR934X_RESET_REG_BOOTSTRAP             0xb0
 #define AR934X_RESET_REG_PCIE_WMAC_INT_STATUS  0xac
 
+#define QCA953X_RESET_REG_RESET_MODULE         0x1c
+#define QCA953X_RESET_REG_BOOTSTRAP            0xb0
+#define QCA953X_RESET_REG_PCIE_WMAC_INT_STATUS 0xac
+
 #define QCA955X_RESET_REG_RESET_MODULE         0x1c
 #define QCA955X_RESET_REG_BOOTSTRAP            0xb0
 #define QCA955X_RESET_REG_EXT_INT_STATUS       0xac
 
+#define QCA956X_RESET_REG_RESET_MODULE         0x1c
+#define QCA956X_RESET_REG_BOOTSTRAP            0xb0
+#define QCA956X_RESET_REG_EXT_INT_STATUS       0xac
+
+#define MISC_INT_MIPS_SI_TIMERINT_MASK BIT(28)
 #define MISC_INT_ETHSW                 BIT(12)
 #define MISC_INT_TIMER4                        BIT(10)
 #define MISC_INT_TIMER3                        BIT(9)
 #define AR913X_RESET_USB_HOST          BIT(5)
 #define AR913X_RESET_USB_PHY           BIT(4)
 
+#define AR933X_RESET_GE1_MDIO          BIT(23)
+#define AR933X_RESET_GE0_MDIO          BIT(22)
+#define AR933X_RESET_GE1_MAC           BIT(13)
 #define AR933X_RESET_WMAC              BIT(11)
+#define AR933X_RESET_GE0_MAC           BIT(9)
 #define AR933X_RESET_USB_HOST          BIT(5)
 #define AR933X_RESET_USB_PHY           BIT(4)
 #define AR933X_RESET_USBSUS_OVERRIDE   BIT(3)
 
+#define AR934X_RESET_HOST              BIT(31)
+#define AR934X_RESET_SLIC              BIT(30)
+#define AR934X_RESET_HDMA              BIT(29)
+#define AR934X_RESET_EXTERNAL          BIT(28)
+#define AR934X_RESET_RTC               BIT(27)
+#define AR934X_RESET_PCIE_EP_INT       BIT(26)
+#define AR934X_RESET_CHKSUM_ACC                BIT(25)
+#define AR934X_RESET_FULL_CHIP         BIT(24)
+#define AR934X_RESET_GE1_MDIO          BIT(23)
+#define AR934X_RESET_GE0_MDIO          BIT(22)
+#define AR934X_RESET_CPU_NMI           BIT(21)
+#define AR934X_RESET_CPU_COLD          BIT(20)
+#define AR934X_RESET_HOST_RESET_INT    BIT(19)
+#define AR934X_RESET_PCIE_EP           BIT(18)
+#define AR934X_RESET_UART1             BIT(17)
+#define AR934X_RESET_DDR               BIT(16)
+#define AR934X_RESET_USB_PHY_PLL_PWD_EXT BIT(15)
+#define AR934X_RESET_NANDF             BIT(14)
+#define AR934X_RESET_GE1_MAC           BIT(13)
+#define AR934X_RESET_ETH_SWITCH_ANALOG BIT(12)
 #define AR934X_RESET_USB_PHY_ANALOG    BIT(11)
+#define AR934X_RESET_HOST_DMA_INT      BIT(10)
+#define AR934X_RESET_GE0_MAC           BIT(9)
+#define AR934X_RESET_ETH_SWITCH                BIT(8)
+#define AR934X_RESET_PCIE_PHY          BIT(7)
+#define AR934X_RESET_PCIE              BIT(6)
 #define AR934X_RESET_USB_HOST          BIT(5)
 #define AR934X_RESET_USB_PHY           BIT(4)
 #define AR934X_RESET_USBSUS_OVERRIDE   BIT(3)
-
+#define AR934X_RESET_LUT               BIT(2)
+#define AR934X_RESET_MBOX              BIT(1)
+#define AR934X_RESET_I2S               BIT(0)
+
+#define QCA953X_RESET_USB_EXT_PWR      BIT(29)
+#define QCA953X_RESET_EXTERNAL         BIT(28)
+#define QCA953X_RESET_RTC              BIT(27)
+#define QCA953X_RESET_FULL_CHIP                BIT(24)
+#define QCA953X_RESET_GE1_MDIO         BIT(23)
+#define QCA953X_RESET_GE0_MDIO         BIT(22)
+#define QCA953X_RESET_CPU_NMI          BIT(21)
+#define QCA953X_RESET_CPU_COLD         BIT(20)
+#define QCA953X_RESET_DDR              BIT(16)
+#define QCA953X_RESET_USB_PHY_PLL_PWD_EXT BIT(15)
+#define QCA953X_RESET_GE1_MAC          BIT(13)
+#define QCA953X_RESET_ETH_SWITCH_ANALOG        BIT(12)
+#define QCA953X_RESET_USB_PHY_ANALOG   BIT(11)
+#define QCA953X_RESET_GE0_MAC          BIT(9)
+#define QCA953X_RESET_ETH_SWITCH       BIT(8)
+#define QCA953X_RESET_PCIE_PHY         BIT(7)
+#define QCA953X_RESET_PCIE             BIT(6)
+#define QCA953X_RESET_USB_HOST         BIT(5)
+#define QCA953X_RESET_USB_PHY          BIT(4)
+#define QCA953X_RESET_USBSUS_OVERRIDE  BIT(3)
+
+#define QCA955X_RESET_HOST             BIT(31)
+#define QCA955X_RESET_SLIC             BIT(30)
+#define QCA955X_RESET_HDMA             BIT(29)
+#define QCA955X_RESET_EXTERNAL         BIT(28)
+#define QCA955X_RESET_RTC              BIT(27)
+#define QCA955X_RESET_PCIE_EP_INT      BIT(26)
+#define QCA955X_RESET_CHKSUM_ACC       BIT(25)
+#define QCA955X_RESET_FULL_CHIP                BIT(24)
+#define QCA955X_RESET_GE1_MDIO         BIT(23)
+#define QCA955X_RESET_GE0_MDIO         BIT(22)
+#define QCA955X_RESET_CPU_NMI          BIT(21)
+#define QCA955X_RESET_CPU_COLD         BIT(20)
+#define QCA955X_RESET_HOST_RESET_INT   BIT(19)
+#define QCA955X_RESET_PCIE_EP          BIT(18)
+#define QCA955X_RESET_UART1            BIT(17)
+#define QCA955X_RESET_DDR              BIT(16)
+#define QCA955X_RESET_USB_PHY_PLL_PWD_EXT BIT(15)
+#define QCA955X_RESET_NANDF            BIT(14)
+#define QCA955X_RESET_GE1_MAC          BIT(13)
+#define QCA955X_RESET_SGMII_ANALOG     BIT(12)
+#define QCA955X_RESET_USB_PHY_ANALOG   BIT(11)
+#define QCA955X_RESET_HOST_DMA_INT     BIT(10)
+#define QCA955X_RESET_GE0_MAC          BIT(9)
+#define QCA955X_RESET_SGMII            BIT(8)
+#define QCA955X_RESET_PCIE_PHY         BIT(7)
+#define QCA955X_RESET_PCIE             BIT(6)
+#define QCA955X_RESET_USB_HOST         BIT(5)
+#define QCA955X_RESET_USB_PHY          BIT(4)
+#define QCA955X_RESET_USBSUS_OVERRIDE  BIT(3)
+#define QCA955X_RESET_LUT              BIT(2)
+#define QCA955X_RESET_MBOX             BIT(1)
+#define QCA955X_RESET_I2S              BIT(0)
+
+#define QCA956X_RESET_EXTERNAL         BIT(28)
+#define QCA956X_RESET_FULL_CHIP                BIT(24)
+#define QCA956X_RESET_GE1_MDIO         BIT(23)
+#define QCA956X_RESET_GE0_MDIO         BIT(22)
+#define QCA956X_RESET_CPU_NMI          BIT(21)
+#define QCA956X_RESET_CPU_COLD         BIT(20)
+#define QCA956X_RESET_DMA              BIT(19)
+#define QCA956X_RESET_DDR              BIT(16)
+#define QCA956X_RESET_GE1_MAC          BIT(13)
+#define QCA956X_RESET_SGMII_ANALOG     BIT(12)
+#define QCA956X_RESET_USB_PHY_ANALOG   BIT(11)
+#define QCA956X_RESET_GE0_MAC          BIT(9)
+#define QCA956X_RESET_SGMII            BIT(8)
+#define QCA956X_RESET_USB_HOST         BIT(5)
+#define QCA956X_RESET_USB_PHY          BIT(4)
+#define QCA956X_RESET_USBSUS_OVERRIDE  BIT(3)
+#define QCA956X_RESET_SWITCH_ANALOG    BIT(2)
+#define QCA956X_RESET_SWITCH           BIT(0)
+
+#define AR933X_BOOTSTRAP_MDIO_GPIO_EN  BIT(18)
+#define AR933X_BOOTSTRAP_EEPBUSY       BIT(4)
 #define AR933X_BOOTSTRAP_REF_CLK_40    BIT(0)
 
 #define AR934X_BOOTSTRAP_SW_OPTION8    BIT(23)
 #define AR934X_BOOTSTRAP_SDRAM_DISABLED BIT(1)
 #define AR934X_BOOTSTRAP_DDR1          BIT(0)
 
+#define QCA953X_BOOTSTRAP_SW_OPTION2   BIT(12)
+#define QCA953X_BOOTSTRAP_SW_OPTION1   BIT(11)
+#define QCA953X_BOOTSTRAP_EJTAG_MODE   BIT(5)
+#define QCA953X_BOOTSTRAP_REF_CLK_40   BIT(4)
+#define QCA953X_BOOTSTRAP_SDRAM_DISABLED BIT(1)
+#define QCA953X_BOOTSTRAP_DDR1         BIT(0)
+
 #define QCA955X_BOOTSTRAP_REF_CLK_40   BIT(4)
 
+#define QCA956X_BOOTSTRAP_REF_CLK_40   BIT(2)
+
 #define AR934X_PCIE_WMAC_INT_WMAC_MISC         BIT(0)
 #define AR934X_PCIE_WMAC_INT_WMAC_TX           BIT(1)
 #define AR934X_PCIE_WMAC_INT_WMAC_RXLP         BIT(2)
         AR934X_PCIE_WMAC_INT_PCIE_RC1 | AR934X_PCIE_WMAC_INT_PCIE_RC2 | \
         AR934X_PCIE_WMAC_INT_PCIE_RC3)
 
+#define QCA953X_PCIE_WMAC_INT_WMAC_MISC                BIT(0)
+#define QCA953X_PCIE_WMAC_INT_WMAC_TX          BIT(1)
+#define QCA953X_PCIE_WMAC_INT_WMAC_RXLP                BIT(2)
+#define QCA953X_PCIE_WMAC_INT_WMAC_RXHP                BIT(3)
+#define QCA953X_PCIE_WMAC_INT_PCIE_RC          BIT(4)
+#define QCA953X_PCIE_WMAC_INT_PCIE_RC0         BIT(5)
+#define QCA953X_PCIE_WMAC_INT_PCIE_RC1         BIT(6)
+#define QCA953X_PCIE_WMAC_INT_PCIE_RC2         BIT(7)
+#define QCA953X_PCIE_WMAC_INT_PCIE_RC3         BIT(8)
+#define QCA953X_PCIE_WMAC_INT_WMAC_ALL \
+       (QCA953X_PCIE_WMAC_INT_WMAC_MISC | QCA953X_PCIE_WMAC_INT_WMAC_TX | \
+        QCA953X_PCIE_WMAC_INT_WMAC_RXLP | QCA953X_PCIE_WMAC_INT_WMAC_RXHP)
+
+#define QCA953X_PCIE_WMAC_INT_PCIE_ALL \
+       (QCA953X_PCIE_WMAC_INT_PCIE_RC | QCA953X_PCIE_WMAC_INT_PCIE_RC0 | \
+        QCA953X_PCIE_WMAC_INT_PCIE_RC1 | QCA953X_PCIE_WMAC_INT_PCIE_RC2 | \
+        QCA953X_PCIE_WMAC_INT_PCIE_RC3)
+
 #define QCA955X_EXT_INT_WMAC_MISC              BIT(0)
 #define QCA955X_EXT_INT_WMAC_TX                        BIT(1)
 #define QCA955X_EXT_INT_WMAC_RXLP              BIT(2)
         QCA955X_EXT_INT_PCIE_RC2_INT1 | QCA955X_EXT_INT_PCIE_RC2_INT2 | \
         QCA955X_EXT_INT_PCIE_RC2_INT3)
 
+#define QCA956X_EXT_INT_WMAC_MISC              BIT(0)
+#define QCA956X_EXT_INT_WMAC_TX                        BIT(1)
+#define QCA956X_EXT_INT_WMAC_RXLP              BIT(2)
+#define QCA956X_EXT_INT_WMAC_RXHP              BIT(3)
+#define QCA956X_EXT_INT_PCIE_RC1               BIT(4)
+#define QCA956X_EXT_INT_PCIE_RC1_INT0          BIT(5)
+#define QCA956X_EXT_INT_PCIE_RC1_INT1          BIT(6)
+#define QCA956X_EXT_INT_PCIE_RC1_INT2          BIT(7)
+#define QCA956X_EXT_INT_PCIE_RC1_INT3          BIT(8)
+#define QCA956X_EXT_INT_PCIE_RC2               BIT(12)
+#define QCA956X_EXT_INT_PCIE_RC2_INT0          BIT(13)
+#define QCA956X_EXT_INT_PCIE_RC2_INT1          BIT(14)
+#define QCA956X_EXT_INT_PCIE_RC2_INT2          BIT(15)
+#define QCA956X_EXT_INT_PCIE_RC2_INT3          BIT(16)
+#define QCA956X_EXT_INT_USB1                   BIT(24)
+#define QCA956X_EXT_INT_USB2                   BIT(28)
+
+#define QCA956X_EXT_INT_WMAC_ALL \
+       (QCA956X_EXT_INT_WMAC_MISC | QCA956X_EXT_INT_WMAC_TX | \
+        QCA956X_EXT_INT_WMAC_RXLP | QCA956X_EXT_INT_WMAC_RXHP)
+
+#define QCA956X_EXT_INT_PCIE_RC1_ALL \
+       (QCA956X_EXT_INT_PCIE_RC1 | QCA956X_EXT_INT_PCIE_RC1_INT0 | \
+        QCA956X_EXT_INT_PCIE_RC1_INT1 | QCA956X_EXT_INT_PCIE_RC1_INT2 | \
+        QCA956X_EXT_INT_PCIE_RC1_INT3)
+
+#define QCA956X_EXT_INT_PCIE_RC2_ALL \
+       (QCA956X_EXT_INT_PCIE_RC2 | QCA956X_EXT_INT_PCIE_RC2_INT0 | \
+        QCA956X_EXT_INT_PCIE_RC2_INT1 | QCA956X_EXT_INT_PCIE_RC2_INT2 | \
+        QCA956X_EXT_INT_PCIE_RC2_INT3)
+
 #define REV_ID_MAJOR_MASK              0xfff0
 #define REV_ID_MAJOR_AR71XX            0x00a0
 #define REV_ID_MAJOR_AR913X            0x00b0
 #define REV_ID_MAJOR_AR9341            0x0120
 #define REV_ID_MAJOR_AR9342            0x1120
 #define REV_ID_MAJOR_AR9344            0x2120
+#define REV_ID_MAJOR_QCA9533           0x0140
+#define REV_ID_MAJOR_QCA9533_V2                0x0160
 #define REV_ID_MAJOR_QCA9556           0x0130
 #define REV_ID_MAJOR_QCA9558           0x1130
+#define REV_ID_MAJOR_TP9343            0x0150
+#define REV_ID_MAJOR_QCA956X           0x1150
 
 #define AR71XX_REV_ID_MINOR_MASK       0x3
 #define AR71XX_REV_ID_MINOR_AR7130     0x0
 
 #define AR934X_REV_ID_REVISION_MASK    0xf
 
+#define QCA953X_REV_ID_REVISION_MASK   0xf
+
 #define QCA955X_REV_ID_REVISION_MASK   0xf
 
+#define QCA956X_REV_ID_REVISION_MASK   0xf
+
 /*
  * SPI block
  */
 #define AR71XX_GPIO_REG_INT_ENABLE     0x24
 #define AR71XX_GPIO_REG_FUNC           0x28
 
+#define AR934X_GPIO_REG_OUT_FUNC0      0x2c
+#define AR934X_GPIO_REG_OUT_FUNC1      0x30
+#define AR934X_GPIO_REG_OUT_FUNC2      0x34
+#define AR934X_GPIO_REG_OUT_FUNC3      0x38
+#define AR934X_GPIO_REG_OUT_FUNC4      0x3c
+#define AR934X_GPIO_REG_OUT_FUNC5      0x40
 #define AR934X_GPIO_REG_FUNC           0x6c
 
+#define QCA953X_GPIO_REG_OUT_FUNC0     0x2c
+#define QCA953X_GPIO_REG_OUT_FUNC1     0x30
+#define QCA953X_GPIO_REG_OUT_FUNC2     0x34
+#define QCA953X_GPIO_REG_OUT_FUNC3     0x38
+#define QCA953X_GPIO_REG_OUT_FUNC4     0x3c
+#define QCA953X_GPIO_REG_IN_ENABLE0    0x44
+#define QCA953X_GPIO_REG_FUNC          0x6c
+
+#define QCA953X_GPIO_OUT_MUX_SPI_CS1           10
+#define QCA953X_GPIO_OUT_MUX_SPI_CS2           11
+#define QCA953X_GPIO_OUT_MUX_SPI_CS0           9
+#define QCA953X_GPIO_OUT_MUX_SPI_CLK           8
+#define QCA953X_GPIO_OUT_MUX_SPI_MOSI          12
+#define QCA953X_GPIO_OUT_MUX_LED_LINK1         41
+#define QCA953X_GPIO_OUT_MUX_LED_LINK2         42
+#define QCA953X_GPIO_OUT_MUX_LED_LINK3         43
+#define QCA953X_GPIO_OUT_MUX_LED_LINK4         44
+#define QCA953X_GPIO_OUT_MUX_LED_LINK5         45
+
+#define QCA955X_GPIO_REG_OUT_FUNC0     0x2c
+#define QCA955X_GPIO_REG_OUT_FUNC1     0x30
+#define QCA955X_GPIO_REG_OUT_FUNC2     0x34
+#define QCA955X_GPIO_REG_OUT_FUNC3     0x38
+#define QCA955X_GPIO_REG_OUT_FUNC4     0x3c
+#define QCA955X_GPIO_REG_OUT_FUNC5     0x40
+#define QCA955X_GPIO_REG_FUNC          0x6c
+
+#define QCA956X_GPIO_REG_OUT_FUNC0     0x2c
+#define QCA956X_GPIO_REG_OUT_FUNC1     0x30
+#define QCA956X_GPIO_REG_OUT_FUNC2     0x34
+#define QCA956X_GPIO_REG_OUT_FUNC3     0x38
+#define QCA956X_GPIO_REG_OUT_FUNC4     0x3c
+#define QCA956X_GPIO_REG_OUT_FUNC5     0x40
+#define QCA956X_GPIO_REG_IN_ENABLE0    0x44
+#define QCA956X_GPIO_REG_IN_ENABLE3    0x50
+#define QCA956X_GPIO_REG_FUNC          0x6c
+
+#define QCA956X_GPIO_OUT_MUX_GE0_MDO   32
+#define QCA956X_GPIO_OUT_MUX_GE0_MDC   33
+
 #define AR71XX_GPIO_COUNT              16
 #define AR7240_GPIO_COUNT              18
 #define AR7241_GPIO_COUNT              20
 #define AR913X_GPIO_COUNT              22
 #define AR933X_GPIO_COUNT              30
 #define AR934X_GPIO_COUNT              23
+#define QCA953X_GPIO_COUNT             18
 #define QCA955X_GPIO_COUNT             24
+#define QCA956X_GPIO_COUNT             23
 
 /*
  * SRIF block
 #define AR934X_SRIF_DPLL2_OUTDIV_SHIFT 13
 #define AR934X_SRIF_DPLL2_OUTDIV_MASK  0x7
 
+#define QCA953X_SRIF_CPU_DPLL1_REG     0x1c0
+#define QCA953X_SRIF_CPU_DPLL2_REG     0x1c4
+#define QCA953X_SRIF_CPU_DPLL3_REG     0x1c8
+
+#define QCA953X_SRIF_DDR_DPLL1_REG     0x240
+#define QCA953X_SRIF_DDR_DPLL2_REG     0x244
+#define QCA953X_SRIF_DDR_DPLL3_REG     0x248
+
+#define QCA953X_SRIF_DPLL1_REFDIV_SHIFT        27
+#define QCA953X_SRIF_DPLL1_REFDIV_MASK 0x1f
+#define QCA953X_SRIF_DPLL1_NINT_SHIFT  18
+#define QCA953X_SRIF_DPLL1_NINT_MASK   0x1ff
+#define QCA953X_SRIF_DPLL1_NFRAC_MASK  0x0003ffff
+
+#define QCA953X_SRIF_DPLL2_LOCAL_PLL   BIT(30)
+#define QCA953X_SRIF_DPLL2_OUTDIV_SHIFT        13
+#define QCA953X_SRIF_DPLL2_OUTDIV_MASK 0x7
+
+#define AR71XX_GPIO_FUNC_STEREO_EN             BIT(17)
+#define AR71XX_GPIO_FUNC_SLIC_EN               BIT(16)
+#define AR71XX_GPIO_FUNC_SPI_CS2_EN            BIT(13)
+#define AR71XX_GPIO_FUNC_SPI_CS1_EN            BIT(12)
+#define AR71XX_GPIO_FUNC_UART_EN               BIT(8)
+#define AR71XX_GPIO_FUNC_USB_OC_EN             BIT(4)
+#define AR71XX_GPIO_FUNC_USB_CLK_EN            BIT(0)
+
+#define AR724X_GPIO_FUNC_GE0_MII_CLK_EN                BIT(19)
+#define AR724X_GPIO_FUNC_SPI_EN                        BIT(18)
+#define AR724X_GPIO_FUNC_SPI_CS_EN2            BIT(14)
+#define AR724X_GPIO_FUNC_SPI_CS_EN1            BIT(13)
+#define AR724X_GPIO_FUNC_CLK_OBS5_EN           BIT(12)
+#define AR724X_GPIO_FUNC_CLK_OBS4_EN           BIT(11)
+#define AR724X_GPIO_FUNC_CLK_OBS3_EN           BIT(10)
+#define AR724X_GPIO_FUNC_CLK_OBS2_EN           BIT(9)
+#define AR724X_GPIO_FUNC_CLK_OBS1_EN           BIT(8)
+#define AR724X_GPIO_FUNC_ETH_SWITCH_LED4_EN    BIT(7)
+#define AR724X_GPIO_FUNC_ETH_SWITCH_LED3_EN    BIT(6)
+#define AR724X_GPIO_FUNC_ETH_SWITCH_LED2_EN    BIT(5)
+#define AR724X_GPIO_FUNC_ETH_SWITCH_LED1_EN    BIT(4)
+#define AR724X_GPIO_FUNC_ETH_SWITCH_LED0_EN    BIT(3)
+#define AR724X_GPIO_FUNC_UART_RTS_CTS_EN       BIT(2)
+#define AR724X_GPIO_FUNC_UART_EN               BIT(1)
+#define AR724X_GPIO_FUNC_JTAG_DISABLE          BIT(0)
+
+#define AR913X_GPIO_FUNC_WMAC_LED_EN           BIT(22)
+#define AR913X_GPIO_FUNC_EXP_PORT_CS_EN                BIT(21)
+#define AR913X_GPIO_FUNC_I2S_REFCLKEN          BIT(20)
+#define AR913X_GPIO_FUNC_I2S_MCKEN             BIT(19)
+#define AR913X_GPIO_FUNC_I2S1_EN               BIT(18)
+#define AR913X_GPIO_FUNC_I2S0_EN               BIT(17)
+#define AR913X_GPIO_FUNC_SLIC_EN               BIT(16)
+#define AR913X_GPIO_FUNC_UART_RTSCTS_EN                BIT(9)
+#define AR913X_GPIO_FUNC_UART_EN               BIT(8)
+#define AR913X_GPIO_FUNC_USB_CLK_EN            BIT(4)
+
+#define AR933X_GPIO_FUNC_SPDIF2TCK             BIT(31)
+#define AR933X_GPIO_FUNC_SPDIF_EN              BIT(30)
+#define AR933X_GPIO_FUNC_I2SO_22_18_EN         BIT(29)
+#define AR933X_GPIO_FUNC_I2S_MCK_EN            BIT(27)
+#define AR933X_GPIO_FUNC_I2SO_EN               BIT(26)
+#define AR933X_GPIO_FUNC_ETH_SWITCH_LED_DUPL   BIT(25)
+#define AR933X_GPIO_FUNC_ETH_SWITCH_LED_COLL   BIT(24)
+#define AR933X_GPIO_FUNC_ETH_SWITCH_LED_ACT    BIT(23)
+#define AR933X_GPIO_FUNC_SPI_EN                        BIT(18)
+#define AR933X_GPIO_FUNC_SPI_CS_EN2            BIT(14)
+#define AR933X_GPIO_FUNC_SPI_CS_EN1            BIT(13)
+#define AR933X_GPIO_FUNC_ETH_SWITCH_LED4_EN    BIT(7)
+#define AR933X_GPIO_FUNC_ETH_SWITCH_LED3_EN    BIT(6)
+#define AR933X_GPIO_FUNC_ETH_SWITCH_LED2_EN    BIT(5)
+#define AR933X_GPIO_FUNC_ETH_SWITCH_LED1_EN    BIT(4)
+#define AR933X_GPIO_FUNC_ETH_SWITCH_LED0_EN    BIT(3)
+#define AR933X_GPIO_FUNC_UART_RTS_CTS_EN       BIT(2)
+#define AR933X_GPIO_FUNC_UART_EN               BIT(1)
+#define AR933X_GPIO_FUNC_JTAG_DISABLE          BIT(0)
+
+#define AR934X_GPIO_FUNC_CLK_OBS7_EN           BIT(9)
+#define AR934X_GPIO_FUNC_CLK_OBS6_EN           BIT(8)
+#define AR934X_GPIO_FUNC_CLK_OBS5_EN           BIT(7)
+#define AR934X_GPIO_FUNC_CLK_OBS4_EN           BIT(6)
+#define AR934X_GPIO_FUNC_CLK_OBS3_EN           BIT(5)
+#define AR934X_GPIO_FUNC_CLK_OBS2_EN           BIT(4)
+#define AR934X_GPIO_FUNC_CLK_OBS1_EN           BIT(3)
+#define AR934X_GPIO_FUNC_CLK_OBS0_EN           BIT(2)
+#define AR934X_GPIO_FUNC_JTAG_DISABLE          BIT(1)
+
+#define AR934X_GPIO_OUT_GPIO           0
+#define AR934X_GPIO_OUT_SPI_CS1        7
+#define AR934X_GPIO_OUT_LED_LINK0      41
+#define AR934X_GPIO_OUT_LED_LINK1      42
+#define AR934X_GPIO_OUT_LED_LINK2      43
+#define AR934X_GPIO_OUT_LED_LINK3      44
+#define AR934X_GPIO_OUT_LED_LINK4      45
+#define AR934X_GPIO_OUT_EXT_LNA0       46
+#define AR934X_GPIO_OUT_EXT_LNA1       47
+
+#define QCA955X_GPIO_FUNC_CLK_OBS7_EN          BIT(9)
+#define QCA955X_GPIO_FUNC_CLK_OBS6_EN          BIT(8)
+#define QCA955X_GPIO_FUNC_CLK_OBS5_EN          BIT(7)
+#define QCA955X_GPIO_FUNC_CLK_OBS4_EN          BIT(6)
+#define QCA955X_GPIO_FUNC_CLK_OBS3_EN          BIT(5)
+#define QCA955X_GPIO_FUNC_CLK_OBS2_EN          BIT(4)
+#define QCA955X_GPIO_FUNC_CLK_OBS1_EN          BIT(3)
+#define QCA955X_GPIO_FUNC_JTAG_DISABLE         BIT(1)
+
+#define QCA955X_GPIO_OUT_GPIO          0
+#define QCA955X_MII_EXT_MDI            1
+#define QCA955X_SLIC_DATA_OUT          3
+#define QCA955X_SLIC_PCM_FS            4
+#define QCA955X_SLIC_PCM_CLK           5
+#define QCA955X_SPI_CLK                        8
+#define QCA955X_SPI_CS_0               9
+#define QCA955X_SPI_CS_1               10
+#define QCA955X_SPI_CS_2               11
+#define QCA955X_SPI_MISO               12
+#define QCA955X_I2S_CLK                        13
+#define QCA955X_I2S_WS                 14
+#define QCA955X_I2S_SD                 15
+#define QCA955X_I2S_MCK                        16
+#define QCA955X_SPDIF_OUT              17
+#define QCA955X_UART1_TD               18
+#define QCA955X_UART1_RTS              19
+#define QCA955X_UART1_RD               20
+#define QCA955X_UART1_CTS              21
+#define QCA955X_UART0_SOUT             22
+#define QCA955X_SPDIF2_OUT             23
+#define QCA955X_LED_SGMII_SPEED0       24
+#define QCA955X_LED_SGMII_SPEED1       25
+#define QCA955X_LED_SGMII_DUPLEX       26
+#define QCA955X_LED_SGMII_LINK_UP      27
+#define QCA955X_SGMII_SPEED0_INVERT    28
+#define QCA955X_SGMII_SPEED1_INVERT    29
+#define QCA955X_SGMII_DUPLEX_INVERT    30
+#define QCA955X_SGMII_LINK_UP_INVERT   31
+#define QCA955X_GE1_MII_MDO            32
+#define QCA955X_GE1_MII_MDC            33
+#define QCA955X_SWCOM2                 38
+#define QCA955X_SWCOM3                 39
+#define QCA955X_MAC2_GPIO              40
+#define QCA955X_MAC3_GPIO              41
+#define QCA955X_ATT_LED                        42
+#define QCA955X_PWR_LED                        43
+#define QCA955X_TX_FRAME               44
+#define QCA955X_RX_CLEAR_EXTERNAL      45
+#define QCA955X_LED_NETWORK_EN         46
+#define QCA955X_LED_POWER_EN           47
+#define QCA955X_WMAC_GLUE_WOW          68
+#define QCA955X_RX_CLEAR_EXTENSION     70
+#define QCA955X_CP_NAND_CS1            73
+#define QCA955X_USB_SUSPEND            74
+#define QCA955X_ETH_TX_ERR             75
+#define QCA955X_DDR_DQ_OE              76
+#define QCA955X_CLKREQ_N_EP            77
+#define QCA955X_CLKREQ_N_RC            78
+#define QCA955X_CLK_OBS0               79
+#define QCA955X_CLK_OBS1               80
+#define QCA955X_CLK_OBS2               81
+#define QCA955X_CLK_OBS3               82
+#define QCA955X_CLK_OBS4               83
+#define QCA955X_CLK_OBS5               84
+
+/*
+ * MII_CTRL block
+ */
+#define AR71XX_MII_REG_MII0_CTRL       0x00
+#define AR71XX_MII_REG_MII1_CTRL       0x04
+
+#define AR71XX_MII_CTRL_IF_MASK                3
+#define AR71XX_MII_CTRL_SPEED_SHIFT    4
+#define AR71XX_MII_CTRL_SPEED_MASK     3
+#define AR71XX_MII_CTRL_SPEED_10       0
+#define AR71XX_MII_CTRL_SPEED_100      1
+#define AR71XX_MII_CTRL_SPEED_1000     2
+
+#define AR71XX_MII0_CTRL_IF_GMII       0
+#define AR71XX_MII0_CTRL_IF_MII                1
+#define AR71XX_MII0_CTRL_IF_RGMII      2
+#define AR71XX_MII0_CTRL_IF_RMII       3
+
+#define AR71XX_MII1_CTRL_IF_RGMII      0
+#define AR71XX_MII1_CTRL_IF_RMII       1
+
+/*
+ * AR933X GMAC interface
+ */
+#define AR933X_GMAC_REG_ETH_CFG                0x00
+
+#define AR933X_ETH_CFG_RGMII_GE0       BIT(0)
+#define AR933X_ETH_CFG_MII_GE0         BIT(1)
+#define AR933X_ETH_CFG_GMII_GE0                BIT(2)
+#define AR933X_ETH_CFG_MII_GE0_MASTER  BIT(3)
+#define AR933X_ETH_CFG_MII_GE0_SLAVE   BIT(4)
+#define AR933X_ETH_CFG_MII_GE0_ERR_EN  BIT(5)
+#define AR933X_ETH_CFG_SW_PHY_SWAP     BIT(7)
+#define AR933X_ETH_CFG_SW_PHY_ADDR_SWAP        BIT(8)
+#define AR933X_ETH_CFG_RMII_GE0                BIT(9)
+#define AR933X_ETH_CFG_RMII_GE0_SPD_10 0
+#define AR933X_ETH_CFG_RMII_GE0_SPD_100        BIT(10)
+
+/*
+ * AR934X GMAC Interface
+ */
+#define AR934X_GMAC_REG_ETH_CFG                0x00
+
+#define AR934X_ETH_CFG_RGMII_GMAC0     BIT(0)
+#define AR934X_ETH_CFG_MII_GMAC0       BIT(1)
+#define AR934X_ETH_CFG_GMII_GMAC0      BIT(2)
+#define AR934X_ETH_CFG_MII_GMAC0_MASTER        BIT(3)
+#define AR934X_ETH_CFG_MII_GMAC0_SLAVE BIT(4)
+#define AR934X_ETH_CFG_MII_GMAC0_ERR_EN        BIT(5)
+#define AR934X_ETH_CFG_SW_ONLY_MODE    BIT(6)
+#define AR934X_ETH_CFG_SW_PHY_SWAP     BIT(7)
+#define AR934X_ETH_CFG_SW_APB_ACCESS   BIT(9)
+#define AR934X_ETH_CFG_RMII_GMAC0      BIT(10)
+#define AR933X_ETH_CFG_MII_CNTL_SPEED  BIT(11)
+#define AR934X_ETH_CFG_RMII_GMAC0_MASTER BIT(12)
+#define AR933X_ETH_CFG_SW_ACC_MSB_FIRST        BIT(13)
+#define AR934X_ETH_CFG_RXD_DELAY        BIT(14)
+#define AR934X_ETH_CFG_RXD_DELAY_MASK   0x3
+#define AR934X_ETH_CFG_RXD_DELAY_SHIFT  14
+#define AR934X_ETH_CFG_RDV_DELAY        BIT(16)
+#define AR934X_ETH_CFG_RDV_DELAY_MASK   0x3
+#define AR934X_ETH_CFG_RDV_DELAY_SHIFT  16
+
+/*
+ * QCA953X GMAC Interface
+ */
+#define QCA953X_GMAC_REG_ETH_CFG               0x00
+
+#define QCA953X_ETH_CFG_SW_ONLY_MODE           BIT(6)
+#define QCA953X_ETH_CFG_SW_PHY_SWAP            BIT(7)
+#define QCA953X_ETH_CFG_SW_APB_ACCESS          BIT(9)
+#define QCA953X_ETH_CFG_SW_ACC_MSB_FIRST       BIT(13)
+
+/*
+ * QCA955X GMAC Interface
+ */
+
+#define QCA955X_GMAC_REG_ETH_CFG       0x00
+#define QCA955X_GMAC_REG_SGMII_SERDES  0x18
+
+#define QCA955X_ETH_CFG_RGMII_EN       BIT(0)
+#define QCA955X_ETH_CFG_MII_GE0                BIT(1)
+#define QCA955X_ETH_CFG_GMII_GE0       BIT(2)
+#define QCA955X_ETH_CFG_MII_GE0_MASTER BIT(3)
+#define QCA955X_ETH_CFG_MII_GE0_SLAVE  BIT(4)
+#define QCA955X_ETH_CFG_GE0_ERR_EN     BIT(5)
+#define QCA955X_ETH_CFG_GE0_SGMII      BIT(6)
+#define QCA955X_ETH_CFG_RMII_GE0       BIT(10)
+#define QCA955X_ETH_CFG_MII_CNTL_SPEED BIT(11)
+#define QCA955X_ETH_CFG_RMII_GE0_MASTER        BIT(12)
+#define QCA955X_ETH_CFG_RXD_DELAY_MASK 0x3
+#define QCA955X_ETH_CFG_RXD_DELAY_SHIFT        14
+#define QCA955X_ETH_CFG_RDV_DELAY      BIT(16)
+#define QCA955X_ETH_CFG_RDV_DELAY_MASK 0x3
+#define QCA955X_ETH_CFG_RDV_DELAY_SHIFT        16
+#define QCA955X_ETH_CFG_TXD_DELAY_MASK 0x3
+#define QCA955X_ETH_CFG_TXD_DELAY_SHIFT        18
+#define QCA955X_ETH_CFG_TXE_DELAY_MASK 0x3
+#define QCA955X_ETH_CFG_TXE_DELAY_SHIFT        20
+
+#define QCA955X_SGMII_SERDES_LOCK_DETECT_STATUS        BIT(15)
+#define QCA955X_SGMII_SERDES_RES_CALIBRATION_SHIFT 23
+#define QCA955X_SGMII_SERDES_RES_CALIBRATION_MASK 0xf
+/*
+ * QCA956X GMAC Interface
+ */
+
+#define QCA956X_GMAC_REG_ETH_CFG       0x00
+#define QCA956X_GMAC_REG_SGMII_RESET   0x14
+#define QCA956X_GMAC_REG_SGMII_SERDES  0x18
+#define QCA956X_GMAC_REG_MR_AN_CONTROL 0x1c
+#define QCA956X_GMAC_REG_SGMII_CONFIG  0x34
+#define QCA956X_GMAC_REG_SGMII_DEBUG   0x58
+
+#define QCA956X_ETH_CFG_RGMII_EN               BIT(0)
+#define QCA956X_ETH_CFG_GE0_SGMII              BIT(6)
+#define QCA956X_ETH_CFG_SW_ONLY_MODE           BIT(7)
+#define QCA956X_ETH_CFG_SW_PHY_SWAP            BIT(8)
+#define QCA956X_ETH_CFG_SW_PHY_ADDR_SWAP       BIT(9)
+#define QCA956X_ETH_CFG_SW_APB_ACCESS          BIT(10)
+#define QCA956X_ETH_CFG_SW_ACC_MSB_FIRST       BIT(13)
+#define QCA956X_ETH_CFG_RXD_DELAY_MASK         0x3
+#define QCA956X_ETH_CFG_RXD_DELAY_SHIFT                14
+#define QCA956X_ETH_CFG_RDV_DELAY_MASK         0x3
+#define QCA956X_ETH_CFG_RDV_DELAY_SHIFT                16
+
+#define QCA956X_SGMII_RESET_RX_CLK_N_RESET     0x0
+#define QCA956X_SGMII_RESET_RX_CLK_N           BIT(0)
+#define QCA956X_SGMII_RESET_TX_CLK_N           BIT(1)
+#define QCA956X_SGMII_RESET_RX_125M_N          BIT(2)
+#define QCA956X_SGMII_RESET_TX_125M_N          BIT(3)
+#define QCA956X_SGMII_RESET_HW_RX_125M_N       BIT(4)
+
+#define QCA956X_SGMII_SERDES_CDR_BW_MASK       0x3
+#define QCA956X_SGMII_SERDES_CDR_BW_SHIFT      1
+#define QCA956X_SGMII_SERDES_TX_DR_CTRL_MASK   0x7
+#define QCA956X_SGMII_SERDES_TX_DR_CTRL_SHIFT  4
+#define QCA956X_SGMII_SERDES_PLL_BW            BIT(8)
+#define QCA956X_SGMII_SERDES_VCO_FAST          BIT(9)
+#define QCA956X_SGMII_SERDES_VCO_SLOW          BIT(10)
+#define QCA956X_SGMII_SERDES_LOCK_DETECT_STATUS        BIT(15)
+#define QCA956X_SGMII_SERDES_EN_SIGNAL_DETECT  BIT(16)
+#define QCA956X_SGMII_SERDES_FIBER_SDO         BIT(17)
+#define QCA956X_SGMII_SERDES_RES_CALIBRATION_SHIFT 23
+#define QCA956X_SGMII_SERDES_RES_CALIBRATION_MASK 0xf
+#define QCA956X_SGMII_SERDES_VCO_REG_SHIFT     27
+#define QCA956X_SGMII_SERDES_VCO_REG_MASK      0xf
+
+#define QCA956X_MR_AN_CONTROL_AN_ENABLE                BIT(12)
+#define QCA956X_MR_AN_CONTROL_PHY_RESET                BIT(15)
+
+#define QCA956X_SGMII_CONFIG_MODE_CTRL_SHIFT   0
+#define QCA956X_SGMII_CONFIG_MODE_CTRL_MASK    0x7
+
 #endif /* __ASM_MACH_AR71XX_REGS_H */
index 441faa92c3cd488ac97685a1930597f7449a88f4..73dcd63b8243b66a7134eb93fb5084a525cbf8c3 100644 (file)
@@ -32,8 +32,11 @@ enum ath79_soc_type {
        ATH79_SOC_AR9341,
        ATH79_SOC_AR9342,
        ATH79_SOC_AR9344,
+       ATH79_SOC_QCA9533,
        ATH79_SOC_QCA9556,
        ATH79_SOC_QCA9558,
+       ATH79_SOC_TP9343,
+       ATH79_SOC_QCA956X,
 };
 
 extern enum ath79_soc_type ath79_soc;
@@ -100,6 +103,16 @@ static inline int soc_is_ar934x(void)
        return soc_is_ar9341() || soc_is_ar9342() || soc_is_ar9344();
 }
 
+static inline int soc_is_qca9533(void)
+{
+       return ath79_soc == ATH79_SOC_QCA9533;
+}
+
+static inline int soc_is_qca953x(void)
+{
+       return soc_is_qca9533();
+}
+
 static inline int soc_is_qca9556(void)
 {
        return ath79_soc == ATH79_SOC_QCA9556;
@@ -115,6 +128,26 @@ static inline int soc_is_qca955x(void)
        return soc_is_qca9556() || soc_is_qca9558();
 }
 
+static inline int soc_is_tp9343(void)
+{
+       return ath79_soc == ATH79_SOC_TP9343;
+}
+
+static inline int soc_is_qca9561(void)
+{
+       return ath79_soc == ATH79_SOC_QCA956X;
+}
+
+static inline int soc_is_qca9563(void)
+{
+       return ath79_soc == ATH79_SOC_QCA956X;
+}
+
+static inline int soc_is_qca956x(void)
+{
+       return soc_is_qca9561() || soc_is_qca9563();
+}
+
 void ath79_ddr_wb_flush(unsigned int reg);
 void ath79_ddr_set_pci_windows(void);
 
@@ -134,6 +167,7 @@ static inline u32 ath79_pll_rr(unsigned reg)
 static inline void ath79_reset_wr(unsigned reg, u32 val)
 {
        __raw_writel(val, ath79_reset_base + reg);
+       (void) __raw_readl(ath79_reset_base + reg); /* flush */
 }
 
 static inline u32 ath79_reset_rr(unsigned reg)
index 0089a740e5aed1a17fab5c247a568f4e5100b09b..026ad90c8ac00330196bea2aa4a55d5b5ce63d9d 100644 (file)
@@ -36,6 +36,7 @@
 #define cpu_has_mdmx           0
 #define cpu_has_mips3d         0
 #define cpu_has_smartmips      0
+#define cpu_has_rixi           0
 
 #define cpu_has_mips32r1       1
 #define cpu_has_mips32r2       1
@@ -43,6 +44,7 @@
 #define cpu_has_mips64r2       0
 
 #define cpu_has_mipsmt         0
+#define cpu_has_userlocal      0
 
 #define cpu_has_64bits         0
 #define cpu_has_64bit_zero_reg 0
@@ -51,5 +53,9 @@
 
 #define cpu_dcache_line_size() 32
 #define cpu_icache_line_size() 32
+#define cpu_has_vtag_icache    0
+#define cpu_has_dc_aliases     1
+#define cpu_has_ic_fills_f_dc  0
+#define cpu_has_pindexed_dcache        0
 
 #endif /* __ASM_MACH_ATH79_CPU_FEATURE_OVERRIDES_H */
diff --git a/arch/mips/include/asm/mach-bmips/dma-coherence.h b/arch/mips/include/asm/mach-bmips/dma-coherence.h
deleted file mode 100644 (file)
index d29781f..0000000
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * Copyright (C) 2006 Ralf Baechle <ralf@linux-mips.org>
- * Copyright (C) 2009 Broadcom Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef __ASM_MACH_BMIPS_DMA_COHERENCE_H
-#define __ASM_MACH_BMIPS_DMA_COHERENCE_H
-
-#include <asm/bmips.h>
-#include <asm/cpu-type.h>
-#include <asm/cpu.h>
-
-struct device;
-
-extern dma_addr_t plat_map_dma_mem(struct device *dev, void *addr, size_t size);
-extern dma_addr_t plat_map_dma_mem_page(struct device *dev, struct page *page);
-extern unsigned long plat_dma_addr_to_phys(struct device *dev,
-       dma_addr_t dma_addr);
-
-static inline void plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr,
-       size_t size, enum dma_data_direction direction)
-{
-}
-
-static inline int plat_dma_supported(struct device *dev, u64 mask)
-{
-       /*
-        * we fall back to GFP_DMA when the mask isn't all 1s,
-        * so we can't guarantee allocations that must be
-        * within a tighter range than GFP_DMA..
-        */
-       if (mask < DMA_BIT_MASK(24))
-               return 0;
-
-       return 1;
-}
-
-static inline int plat_device_is_coherent(struct device *dev)
-{
-       return 0;
-}
-
-#define plat_post_dma_flush    bmips_post_dma_flush
-
-#endif /* __ASM_MACH_BMIPS_DMA_COHERENCE_H */
diff --git a/arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h b/arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h
deleted file mode 100644 (file)
index 6eb1ee5..0000000
+++ /dev/null
@@ -1,79 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2006  Ralf Baechle <ralf@linux-mips.org>
- *
- *
- * Similar to mach-generic/dma-coherence.h except
- * plat_device_is_coherent hard coded to return 1.
- *
- */
-#ifndef __ASM_MACH_CAVIUM_OCTEON_DMA_COHERENCE_H
-#define __ASM_MACH_CAVIUM_OCTEON_DMA_COHERENCE_H
-
-#include <linux/bug.h>
-
-struct device;
-
-extern void octeon_pci_dma_init(void);
-
-static inline dma_addr_t plat_map_dma_mem(struct device *dev, void *addr,
-       size_t size)
-{
-       BUG();
-       return 0;
-}
-
-static inline dma_addr_t plat_map_dma_mem_page(struct device *dev,
-       struct page *page)
-{
-       BUG();
-       return 0;
-}
-
-static inline unsigned long plat_dma_addr_to_phys(struct device *dev,
-       dma_addr_t dma_addr)
-{
-       BUG();
-       return 0;
-}
-
-static inline void plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr,
-       size_t size, enum dma_data_direction direction)
-{
-       BUG();
-}
-
-static inline int plat_dma_supported(struct device *dev, u64 mask)
-{
-       BUG();
-       return 0;
-}
-
-static inline int plat_device_is_coherent(struct device *dev)
-{
-       return 1;
-}
-
-static inline void plat_post_dma_flush(struct device *dev)
-{
-}
-
-static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
-{
-       if (!dev->dma_mask)
-               return false;
-
-       return addr + size - 1 <= *dev->dma_mask;
-}
-
-dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr);
-phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t daddr);
-
-struct dma_map_ops;
-extern const struct dma_map_ops *octeon_pci_dma_map_ops;
-extern char *octeon_swiotlb;
-
-#endif /* __ASM_MACH_CAVIUM_OCTEON_DMA_COHERENCE_H */
diff --git a/arch/mips/include/asm/mach-generic/dma-coherence.h b/arch/mips/include/asm/mach-generic/dma-coherence.h
deleted file mode 100644 (file)
index 8ad7a40..0000000
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2006  Ralf Baechle <ralf@linux-mips.org>
- *
- */
-#ifndef __ASM_MACH_GENERIC_DMA_COHERENCE_H
-#define __ASM_MACH_GENERIC_DMA_COHERENCE_H
-
-struct device;
-
-static inline dma_addr_t plat_map_dma_mem(struct device *dev, void *addr,
-       size_t size)
-{
-       return virt_to_phys(addr);
-}
-
-static inline dma_addr_t plat_map_dma_mem_page(struct device *dev,
-       struct page *page)
-{
-       return page_to_phys(page);
-}
-
-static inline unsigned long plat_dma_addr_to_phys(struct device *dev,
-       dma_addr_t dma_addr)
-{
-       return dma_addr;
-}
-
-static inline void plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr,
-       size_t size, enum dma_data_direction direction)
-{
-}
-
-static inline int plat_dma_supported(struct device *dev, u64 mask)
-{
-       /*
-        * we fall back to GFP_DMA when the mask isn't all 1s,
-        * so we can't guarantee allocations that must be
-        * within a tighter range than GFP_DMA..
-        */
-       if (mask < DMA_BIT_MASK(24))
-               return 0;
-
-       return 1;
-}
-
-static inline int plat_device_is_coherent(struct device *dev)
-{
-#ifdef CONFIG_DMA_PERDEV_COHERENT
-       return dev->archdata.dma_coherent;
-#else
-       switch (coherentio) {
-       default:
-       case IO_COHERENCE_DEFAULT:
-               return hw_coherentio;
-       case IO_COHERENCE_ENABLED:
-               return 1;
-       case IO_COHERENCE_DISABLED:
-               return 0;
-       }
-#endif
-}
-
-#ifndef plat_post_dma_flush
-static inline void plat_post_dma_flush(struct device *dev)
-{
-}
-#endif
-
-#endif /* __ASM_MACH_GENERIC_DMA_COHERENCE_H */
index 74207c7bd00d9b51c98991e83d3d865cc51767bc..649a98338886cd9f27cc4091df8e4ecc52b6a62c 100644 (file)
@@ -2,8 +2,7 @@
 #ifndef __ASM_MACH_GENERIC_KMALLOC_H
 #define __ASM_MACH_GENERIC_KMALLOC_H
 
-
-#ifndef CONFIG_DMA_COHERENT
+#ifdef CONFIG_DMA_NONCOHERENT
 /*
  * Total overkill for most systems but need as a safe default.
  * Set this one if any device in the system might do non-coherent DMA.
index 952b0fdfda0e637849315f534043137f10972414..ee5ebe98f6cfb6b7104beff6510983266c1b685d 100644 (file)
 /*
  * This gives the physical RAM offset.
  */
-#ifndef PHYS_OFFSET
-#define PHYS_OFFSET            _AC(0, UL)
-#endif
+#ifndef __ASSEMBLY__
+# if defined(CONFIG_MIPS_AUTO_PFN_OFFSET)
+#  define PHYS_OFFSET          ((unsigned long)PFN_PHYS(ARCH_PFN_OFFSET))
+# elif !defined(PHYS_OFFSET)
+#  define PHYS_OFFSET          _AC(0, UL)
+# endif
+#endif /* __ASSEMBLY__ */
 
 #ifdef CONFIG_32BIT
 #ifdef CONFIG_KVM_GUEST
diff --git a/arch/mips/include/asm/mach-ip27/dma-coherence.h b/arch/mips/include/asm/mach-ip27/dma-coherence.h
deleted file mode 100644 (file)
index 04d8620..0000000
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2006  Ralf Baechle <ralf@linux-mips.org>
- *
- */
-#ifndef __ASM_MACH_IP27_DMA_COHERENCE_H
-#define __ASM_MACH_IP27_DMA_COHERENCE_H
-
-#include <asm/pci/bridge.h>
-
-#define pdev_to_baddr(pdev, addr) \
-       (BRIDGE_CONTROLLER(pdev->bus)->baddr + (addr))
-#define dev_to_baddr(dev, addr) \
-       pdev_to_baddr(to_pci_dev(dev), (addr))
-
-struct device;
-
-static inline dma_addr_t plat_map_dma_mem(struct device *dev, void *addr,
-       size_t size)
-{
-       dma_addr_t pa = dev_to_baddr(dev, virt_to_phys(addr));
-
-       return pa;
-}
-
-static inline dma_addr_t plat_map_dma_mem_page(struct device *dev,
-       struct page *page)
-{
-       dma_addr_t pa = dev_to_baddr(dev, page_to_phys(page));
-
-       return pa;
-}
-
-static inline unsigned long plat_dma_addr_to_phys(struct device *dev,
-       dma_addr_t dma_addr)
-{
-       return dma_addr & ~(0xffUL << 56);
-}
-
-static inline void plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr,
-       size_t size, enum dma_data_direction direction)
-{
-}
-
-static inline int plat_dma_supported(struct device *dev, u64 mask)
-{
-       /*
-        * we fall back to GFP_DMA when the mask isn't all 1s,
-        * so we can't guarantee allocations that must be
-        * within a tighter range than GFP_DMA..
-        */
-       if (mask < DMA_BIT_MASK(24))
-               return 0;
-
-       return 1;
-}
-
-static inline void plat_post_dma_flush(struct device *dev)
-{
-}
-
-static inline int plat_device_is_coherent(struct device *dev)
-{
-       return 1;               /* IP27 non-coherent mode is unsupported */
-}
-
-#endif /* __ASM_MACH_IP27_DMA_COHERENCE_H */
diff --git a/arch/mips/include/asm/mach-ip32/dma-coherence.h b/arch/mips/include/asm/mach-ip32/dma-coherence.h
deleted file mode 100644 (file)
index 7bdf212..0000000
+++ /dev/null
@@ -1,92 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2006  Ralf Baechle <ralf@linux-mips.org>
- *
- */
-#ifndef __ASM_MACH_IP32_DMA_COHERENCE_H
-#define __ASM_MACH_IP32_DMA_COHERENCE_H
-
-#include <asm/ip32/crime.h>
-
-struct device;
-
-/*
- * Few notes.
- * 1. CPU sees memory as two chunks: 0-256M@0x0, and the rest @0x40000000+256M
- * 2. PCI sees memory as one big chunk @0x0 (or we could use 0x40000000 for
- *    native-endian)
- * 3. All other devices see memory as one big chunk at 0x40000000
- * 4. Non-PCI devices will pass NULL as struct device*
- *
- * Thus we translate differently, depending on device.
- */
-
-#define RAM_OFFSET_MASK 0x3fffffffUL
-
-static inline dma_addr_t plat_map_dma_mem(struct device *dev, void *addr,
-       size_t size)
-{
-       dma_addr_t pa = virt_to_phys(addr) & RAM_OFFSET_MASK;
-
-       if (dev == NULL)
-               pa += CRIME_HI_MEM_BASE;
-
-       return pa;
-}
-
-static inline dma_addr_t plat_map_dma_mem_page(struct device *dev,
-       struct page *page)
-{
-       dma_addr_t pa;
-
-       pa = page_to_phys(page) & RAM_OFFSET_MASK;
-
-       if (dev == NULL)
-               pa += CRIME_HI_MEM_BASE;
-
-       return pa;
-}
-
-/* This is almost certainly wrong but it's what dma-ip32.c used to use */
-static inline unsigned long plat_dma_addr_to_phys(struct device *dev,
-       dma_addr_t dma_addr)
-{
-       unsigned long addr = dma_addr & RAM_OFFSET_MASK;
-
-       if (dma_addr >= 256*1024*1024)
-               addr += CRIME_HI_MEM_BASE;
-
-       return addr;
-}
-
-static inline void plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr,
-       size_t size, enum dma_data_direction direction)
-{
-}
-
-static inline int plat_dma_supported(struct device *dev, u64 mask)
-{
-       /*
-        * we fall back to GFP_DMA when the mask isn't all 1s,
-        * so we can't guarantee allocations that must be
-        * within a tighter range than GFP_DMA..
-        */
-       if (mask < DMA_BIT_MASK(24))
-               return 0;
-
-       return 1;
-}
-
-static inline void plat_post_dma_flush(struct device *dev)
-{
-}
-
-static inline int plat_device_is_coherent(struct device *dev)
-{
-       return 0;               /* IP32 is non-coherent */
-}
-
-#endif /* __ASM_MACH_IP32_DMA_COHERENCE_H */
diff --git a/arch/mips/include/asm/mach-jazz/dma-coherence.h b/arch/mips/include/asm/mach-jazz/dma-coherence.h
deleted file mode 100644 (file)
index dc347c2..0000000
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2006  Ralf Baechle <ralf@linux-mips.org>
- */
-#ifndef __ASM_MACH_JAZZ_DMA_COHERENCE_H
-#define __ASM_MACH_JAZZ_DMA_COHERENCE_H
-
-#include <asm/jazzdma.h>
-
-struct device;
-
-static inline dma_addr_t plat_map_dma_mem(struct device *dev, void *addr, size_t size)
-{
-       return vdma_alloc(virt_to_phys(addr), size);
-}
-
-static inline dma_addr_t plat_map_dma_mem_page(struct device *dev,
-       struct page *page)
-{
-       return vdma_alloc(page_to_phys(page), PAGE_SIZE);
-}
-
-static inline unsigned long plat_dma_addr_to_phys(struct device *dev,
-       dma_addr_t dma_addr)
-{
-       return vdma_log2phys(dma_addr);
-}
-
-static inline void plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr,
-       size_t size, enum dma_data_direction direction)
-{
-       vdma_free(dma_addr);
-}
-
-static inline int plat_dma_supported(struct device *dev, u64 mask)
-{
-       /*
-        * we fall back to GFP_DMA when the mask isn't all 1s,
-        * so we can't guarantee allocations that must be
-        * within a tighter range than GFP_DMA..
-        */
-       if (mask < DMA_BIT_MASK(24))
-               return 0;
-
-       return 1;
-}
-
-static inline void plat_post_dma_flush(struct device *dev)
-{
-}
-
-static inline int plat_device_is_coherent(struct device *dev)
-{
-       return 0;
-}
-
-#endif /* __ASM_MACH_JAZZ_DMA_COHERENCE_H */
diff --git a/arch/mips/include/asm/mach-loongson64/dma-coherence.h b/arch/mips/include/asm/mach-loongson64/dma-coherence.h
deleted file mode 100644 (file)
index 64fc44d..0000000
+++ /dev/null
@@ -1,93 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2006, 07  Ralf Baechle <ralf@linux-mips.org>
- * Copyright (C) 2007 Lemote, Inc. & Institute of Computing Technology
- * Author: Fuxin Zhang, zhangfx@lemote.com
- *
- */
-#ifndef __ASM_MACH_LOONGSON64_DMA_COHERENCE_H
-#define __ASM_MACH_LOONGSON64_DMA_COHERENCE_H
-
-#ifdef CONFIG_SWIOTLB
-#include <linux/swiotlb.h>
-#endif
-
-struct device;
-
-static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
-{
-       if (!dev->dma_mask)
-               return false;
-
-       return addr + size - 1 <= *dev->dma_mask;
-}
-
-extern dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr);
-extern phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t daddr);
-static inline dma_addr_t plat_map_dma_mem(struct device *dev, void *addr,
-                                         size_t size)
-{
-#ifdef CONFIG_CPU_LOONGSON3
-       return __phys_to_dma(dev, virt_to_phys(addr));
-#else
-       return virt_to_phys(addr) | 0x80000000;
-#endif
-}
-
-static inline dma_addr_t plat_map_dma_mem_page(struct device *dev,
-                                              struct page *page)
-{
-#ifdef CONFIG_CPU_LOONGSON3
-       return __phys_to_dma(dev, page_to_phys(page));
-#else
-       return page_to_phys(page) | 0x80000000;
-#endif
-}
-
-static inline unsigned long plat_dma_addr_to_phys(struct device *dev,
-       dma_addr_t dma_addr)
-{
-#if defined(CONFIG_CPU_LOONGSON3) && defined(CONFIG_64BIT)
-       return __dma_to_phys(dev, dma_addr);
-#elif defined(CONFIG_CPU_LOONGSON2F) && defined(CONFIG_64BIT)
-       return (dma_addr > 0x8fffffff) ? dma_addr : (dma_addr & 0x0fffffff);
-#else
-       return dma_addr & 0x7fffffff;
-#endif
-}
-
-static inline void plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr,
-       size_t size, enum dma_data_direction direction)
-{
-}
-
-static inline int plat_dma_supported(struct device *dev, u64 mask)
-{
-       /*
-        * we fall back to GFP_DMA when the mask isn't all 1s,
-        * so we can't guarantee allocations that must be
-        * within a tighter range than GFP_DMA..
-        */
-       if (mask < DMA_BIT_MASK(24))
-               return 0;
-
-       return 1;
-}
-
-static inline int plat_device_is_coherent(struct device *dev)
-{
-#ifdef CONFIG_DMA_NONCOHERENT
-       return 0;
-#else
-       return 1;
-#endif /* CONFIG_DMA_NONCOHERENT */
-}
-
-static inline void plat_post_dma_flush(struct device *dev)
-{
-}
-
-#endif /* __ASM_MACH_LOONGSON64_DMA_COHERENCE_H */
index 8393bc548987d782b9f590b0a9ffecda9fabc61c..312739117bb02e3cf61b4c6817a8b9a97a0682c7 100644 (file)
        .set    push
        .set    mips64
        /* Set LPA on LOONGSON3 config3 */
-       mfc0    t0, $16, 3
+       mfc0    t0, CP0_CONFIG3
        or      t0, (0x1 << 7)
-       mtc0    t0, $16, 3
+       mtc0    t0, CP0_CONFIG3
        /* Set ELPA on LOONGSON3 pagegrain */
-       mfc0    t0, $5, 1
+       mfc0    t0, CP0_PAGEGRAIN
        or      t0, (0x1 << 29)
-       mtc0    t0, $5, 1
+       mtc0    t0, CP0_PAGEGRAIN
 #ifdef CONFIG_LOONGSON3_ENHANCEMENT
        /* Enable STFill Buffer */
-       mfc0    t0, $16, 6
+       mfc0    t0, CP0_CONFIG6
        or      t0, 0x100
-       mtc0    t0, $16, 6
+       mtc0    t0, CP0_CONFIG6
 #endif
        _ehb
        .set    pop
        .set    push
        .set    mips64
        /* Set LPA on LOONGSON3 config3 */
-       mfc0    t0, $16, 3
+       mfc0    t0, CP0_CONFIG3
        or      t0, (0x1 << 7)
-       mtc0    t0, $16, 3
+       mtc0    t0, CP0_CONFIG3
        /* Set ELPA on LOONGSON3 pagegrain */
-       mfc0    t0, $5, 1
+       mfc0    t0, CP0_PAGEGRAIN
        or      t0, (0x1 << 29)
-       mtc0    t0, $5, 1
+       mtc0    t0, CP0_PAGEGRAIN
 #ifdef CONFIG_LOONGSON3_ENHANCEMENT
        /* Enable STFill Buffer */
-       mfc0    t0, $16, 6
+       mfc0    t0, CP0_CONFIG6
        or      t0, 0x100
-       mtc0    t0, $16, 6
+       mtc0    t0, CP0_CONFIG6
 #endif
        _ehb
        .set    pop
index 046a0a9aa8b3d8fc77cc032003bf8fce80a14c55..a1b9783b76eaff35926b41b44fbd0cb3594d899a 100644 (file)
@@ -16,7 +16,6 @@
 
 #ifdef CONFIG_PIC32MZDA
 #define PHYS_OFFSET    _AC(0x08000000, UL)
-#define UNCAC_BASE     _AC(0xa8000000, UL)
 #endif
 
 #include <asm/mach-generic/spaces.h>
index ae461d91cd1faef06dc39399e7910eb28471d930..01df9ad62fb83d3b8e50006b342a6f182f6fc862 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/linkage.h>
 #include <linux/types.h>
 #include <asm/hazards.h>
+#include <asm/isa-rev.h>
 #include <asm/war.h>
 
 /*
@@ -51,6 +52,7 @@
 #define CP0_GLOBALNUMBER $3, 1
 #define CP0_CONTEXT $4
 #define CP0_PAGEMASK $5
+#define CP0_PAGEGRAIN $5, 1
 #define CP0_SEGCTL0 $5, 2
 #define CP0_SEGCTL1 $5, 3
 #define CP0_SEGCTL2 $5, 4
@@ -77,6 +79,7 @@
 #define CP0_CONFIG $16
 #define CP0_CONFIG3 $16, 3
 #define CP0_CONFIG5 $16, 5
+#define CP0_CONFIG6 $16, 6
 #define CP0_LLADDR $17
 #define CP0_WATCHLO $18
 #define CP0_WATCHHI $19
@@ -1481,32 +1484,38 @@ do {                                                                    \
 
 #define __write_64bit_c0_split(source, sel, val)                       \
 do {                                                                   \
-       unsigned long long __tmp;                                       \
+       unsigned long long __tmp = (val);                               \
        unsigned long __flags;                                          \
                                                                        \
        local_irq_save(__flags);                                        \
-       if (sel == 0)                                                   \
+       if (MIPS_ISA_REV >= 2)                                          \
+               __asm__ __volatile__(                                   \
+                       ".set\tpush\n\t"                                \
+                       ".set\t" MIPS_ISA_LEVEL "\n\t"                  \
+                       "dins\t%L0, %M0, 32, 32\n\t"                    \
+                       "dmtc0\t%L0, " #source ", " #sel "\n\t"         \
+                       ".set\tpop"                                     \
+                       : "+r" (__tmp));                                \
+       else if (sel == 0)                                              \
                __asm__ __volatile__(                                   \
                        ".set\tmips64\n\t"                              \
-                       "dsll\t%L0, %L1, 32\n\t"                        \
+                       "dsll\t%L0, %L0, 32\n\t"                        \
                        "dsrl\t%L0, %L0, 32\n\t"                        \
-                       "dsll\t%M0, %M1, 32\n\t"                        \
+                       "dsll\t%M0, %M0, 32\n\t"                        \
                        "or\t%L0, %L0, %M0\n\t"                         \
                        "dmtc0\t%L0, " #source "\n\t"                   \
                        ".set\tmips0"                                   \
-                       : "=&r,r" (__tmp)                               \
-                       : "r,0" (val));                                 \
+                       : "+r" (__tmp));                                \
        else                                                            \
                __asm__ __volatile__(                                   \
                        ".set\tmips64\n\t"                              \
-                       "dsll\t%L0, %L1, 32\n\t"                        \
+                       "dsll\t%L0, %L0, 32\n\t"                        \
                        "dsrl\t%L0, %L0, 32\n\t"                        \
-                       "dsll\t%M0, %M1, 32\n\t"                        \
+                       "dsll\t%M0, %M0, 32\n\t"                        \
                        "or\t%L0, %L0, %M0\n\t"                         \
                        "dmtc0\t%L0, " #source ", " #sel "\n\t"         \
                        ".set\tmips0"                                   \
-                       : "=&r,r" (__tmp)                               \
-                       : "r,0" (val));                                 \
+                       : "+r" (__tmp));                                \
        local_irq_restore(__flags);                                     \
 } while (0)
 
index da2004cef2d5c8a4f8291647e72dbfd7b3d91b7c..b509371a6b0cce4842ab634c80c658e35d245db6 100644 (file)
@@ -126,8 +126,6 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm)
        for_each_possible_cpu(i)
                cpu_context(i, mm) = 0;
 
-       atomic_set(&mm->context.fp_mode_switching, 0);
-
        mm->context.bd_emupage_allocmap = NULL;
        spin_lock_init(&mm->context.bd_emupage_lock);
        init_waitqueue_head(&mm->context.bd_emupage_queue);
index 5604db3d18362bd4585f8d721c63466647210661..d79c68fa78d9bbd0b2263180dfbf2244e13f2735 100644 (file)
@@ -301,8 +301,6 @@ static inline int nlm_fmn_send(unsigned int size, unsigned int code,
        for (i = 0; i < 8; i++) {
                nlm_msgsnd(dest);
                status = nlm_read_c2_status0();
-               if ((status & 0x2) == 1)
-                       pr_info("Send pending fail!\n");
                if ((status & 0x4) == 0)
                        return 0;
        }
index a1e21a3854cff2d3e33a347251def607996d2a57..1eef155979f3c75dae8f376c3c2deeb461dd46cc 100644 (file)
@@ -4,7 +4,7 @@
  * Contact: support@caviumnetworks.com
  * This file is part of the OCTEON SDK
  *
- * Copyright (c) 2003-2012 Cavium Networks
+ * Copyright (C) 2003-2018 Cavium, Inc.
  *
  * This file is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License, Version 2, as
@@ -55,6 +55,8 @@
 #define CVMX_ASXX_TX_HI_WATERX(offset, block_id) (CVMX_ADD_IO_SEG(0x00011800B0000080ull) + (((offset) & 3) + ((block_id) & 1) * 0x1000000ull) * 8)
 #define CVMX_ASXX_TX_PRT_EN(block_id) (CVMX_ADD_IO_SEG(0x00011800B0000008ull) + ((block_id) & 1) * 0x8000000ull)
 
+void __cvmx_interrupt_asxx_enable(int block);
+
 union cvmx_asxx_gmii_rx_clk_set {
        uint64_t u64;
        struct cvmx_asxx_gmii_rx_clk_set_s {
index 6e61792d9248b4af91b0e64978205c463a24aa49..1d18be8cdddc0b5346319e28b11052a2b2f24cb6 100644 (file)
-/***********************license start***************
- * Author: Cavium Networks
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Octeon CIU definitions
  *
- * Contact: support@caviumnetworks.com
- * This file is part of the OCTEON SDK
- *
- * Copyright (c) 2003-2012 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful, but
- * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT.  See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this file; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- * or visit http://www.gnu.org/licenses/.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium Networks for more information
- ***********************license end**************************************/
+ * Copyright (C) 2003-2018 Cavium, Inc.
+ */
 
 #ifndef __CVMX_CIU_DEFS_H__
 #define __CVMX_CIU_DEFS_H__
 
-#define CVMX_CIU_BIST (CVMX_ADD_IO_SEG(0x0001070000000730ull))
-#define CVMX_CIU_BLOCK_INT (CVMX_ADD_IO_SEG(0x00010700000007C0ull))
-#define CVMX_CIU_DINT (CVMX_ADD_IO_SEG(0x0001070000000720ull))
-#define CVMX_CIU_EN2_IOX_INT(offset) (CVMX_ADD_IO_SEG(0x000107000000A600ull) + ((offset) & 1) * 8)
-#define CVMX_CIU_EN2_IOX_INT_W1C(offset) (CVMX_ADD_IO_SEG(0x000107000000CE00ull) + ((offset) & 1) * 8)
-#define CVMX_CIU_EN2_IOX_INT_W1S(offset) (CVMX_ADD_IO_SEG(0x000107000000AE00ull) + ((offset) & 1) * 8)
-#define CVMX_CIU_EN2_PPX_IP2(offset) (CVMX_ADD_IO_SEG(0x000107000000A000ull) + ((offset) & 15) * 8)
-#define CVMX_CIU_EN2_PPX_IP2_W1C(offset) (CVMX_ADD_IO_SEG(0x000107000000C800ull) + ((offset) & 15) * 8)
-#define CVMX_CIU_EN2_PPX_IP2_W1S(offset) (CVMX_ADD_IO_SEG(0x000107000000A800ull) + ((offset) & 15) * 8)
-#define CVMX_CIU_EN2_PPX_IP3(offset) (CVMX_ADD_IO_SEG(0x000107000000A200ull) + ((offset) & 15) * 8)
-#define CVMX_CIU_EN2_PPX_IP3_W1C(offset) (CVMX_ADD_IO_SEG(0x000107000000CA00ull) + ((offset) & 15) * 8)
-#define CVMX_CIU_EN2_PPX_IP3_W1S(offset) (CVMX_ADD_IO_SEG(0x000107000000AA00ull) + ((offset) & 15) * 8)
-#define CVMX_CIU_EN2_PPX_IP4(offset) (CVMX_ADD_IO_SEG(0x000107000000A400ull) + ((offset) & 15) * 8)
-#define CVMX_CIU_EN2_PPX_IP4_W1C(offset) (CVMX_ADD_IO_SEG(0x000107000000CC00ull) + ((offset) & 15) * 8)
-#define CVMX_CIU_EN2_PPX_IP4_W1S(offset) (CVMX_ADD_IO_SEG(0x000107000000AC00ull) + ((offset) & 15) * 8)
-#define CVMX_CIU_FUSE (CVMX_ADD_IO_SEG(0x0001070000000728ull))
-#define CVMX_CIU_GSTOP (CVMX_ADD_IO_SEG(0x0001070000000710ull))
-#define CVMX_CIU_INT33_SUM0 (CVMX_ADD_IO_SEG(0x0001070000000110ull))
-#define CVMX_CIU_INTX_EN0(offset) (CVMX_ADD_IO_SEG(0x0001070000000200ull) + ((offset) & 63) * 16)
-#define CVMX_CIU_INTX_EN0_W1C(offset) (CVMX_ADD_IO_SEG(0x0001070000002200ull) + ((offset) & 63) * 16)
-#define CVMX_CIU_INTX_EN0_W1S(offset) (CVMX_ADD_IO_SEG(0x0001070000006200ull) + ((offset) & 63) * 16)
-#define CVMX_CIU_INTX_EN1(offset) (CVMX_ADD_IO_SEG(0x0001070000000208ull) + ((offset) & 63) * 16)
-#define CVMX_CIU_INTX_EN1_W1C(offset) (CVMX_ADD_IO_SEG(0x0001070000002208ull) + ((offset) & 63) * 16)
-#define CVMX_CIU_INTX_EN1_W1S(offset) (CVMX_ADD_IO_SEG(0x0001070000006208ull) + ((offset) & 63) * 16)
-#define CVMX_CIU_INTX_EN4_0(offset) (CVMX_ADD_IO_SEG(0x0001070000000C80ull) + ((offset) & 15) * 16)
-#define CVMX_CIU_INTX_EN4_0_W1C(offset) (CVMX_ADD_IO_SEG(0x0001070000002C80ull) + ((offset) & 15) * 16)
-#define CVMX_CIU_INTX_EN4_0_W1S(offset) (CVMX_ADD_IO_SEG(0x0001070000006C80ull) + ((offset) & 15) * 16)
-#define CVMX_CIU_INTX_EN4_1(offset) (CVMX_ADD_IO_SEG(0x0001070000000C88ull) + ((offset) & 15) * 16)
-#define CVMX_CIU_INTX_EN4_1_W1C(offset) (CVMX_ADD_IO_SEG(0x0001070000002C88ull) + ((offset) & 15) * 16)
-#define CVMX_CIU_INTX_EN4_1_W1S(offset) (CVMX_ADD_IO_SEG(0x0001070000006C88ull) + ((offset) & 15) * 16)
-#define CVMX_CIU_INTX_SUM0(offset) (CVMX_ADD_IO_SEG(0x0001070000000000ull) + ((offset) & 63) * 8)
-#define CVMX_CIU_INTX_SUM4(offset) (CVMX_ADD_IO_SEG(0x0001070000000C00ull) + ((offset) & 15) * 8)
-#define CVMX_CIU_INT_DBG_SEL (CVMX_ADD_IO_SEG(0x00010700000007D0ull))
-#define CVMX_CIU_INT_SUM1 (CVMX_ADD_IO_SEG(0x0001070000000108ull))
-static inline uint64_t CVMX_CIU_MBOX_CLRX(unsigned long offset)
+#include <asm/bitfield.h>
+
+#define CVMX_CIU_ADDR(addr, coreid, coremask, offset)                         \
+       (CVMX_ADD_IO_SEG(0x0001070000000000ull + addr##ull) +                  \
+       (((coreid) & (coremask)) * offset))
+
+#define CVMX_CIU_EN2_PPX_IP4(c)                CVMX_CIU_ADDR(0xA400, c, 0x0F, 8)
+#define CVMX_CIU_EN2_PPX_IP4_W1C(c)    CVMX_CIU_ADDR(0xCC00, c, 0x0F, 8)
+#define CVMX_CIU_EN2_PPX_IP4_W1S(c)    CVMX_CIU_ADDR(0xAC00, c, 0x0F, 8)
+#define CVMX_CIU_FUSE                  CVMX_CIU_ADDR(0x0728, 0, 0x00, 0)
+#define CVMX_CIU_INT_SUM1              CVMX_CIU_ADDR(0x0108, 0, 0x00, 0)
+#define CVMX_CIU_INTX_EN0(c)           CVMX_CIU_ADDR(0x0200, c, 0x3F, 16)
+#define CVMX_CIU_INTX_EN0_W1C(c)       CVMX_CIU_ADDR(0x2200, c, 0x3F, 16)
+#define CVMX_CIU_INTX_EN0_W1S(c)       CVMX_CIU_ADDR(0x6200, c, 0x3F, 16)
+#define CVMX_CIU_INTX_EN1(c)           CVMX_CIU_ADDR(0x0208, c, 0x3F, 16)
+#define CVMX_CIU_INTX_EN1_W1C(c)       CVMX_CIU_ADDR(0x2208, c, 0x3F, 16)
+#define CVMX_CIU_INTX_EN1_W1S(c)       CVMX_CIU_ADDR(0x6208, c, 0x3F, 16)
+#define CVMX_CIU_INTX_SUM0(c)          CVMX_CIU_ADDR(0x0000, c, 0x3F, 8)
+#define CVMX_CIU_NMI                   CVMX_CIU_ADDR(0x0718, 0, 0x00, 0)
+#define CVMX_CIU_PCI_INTA              CVMX_CIU_ADDR(0x0750, 0, 0x00, 0)
+#define CVMX_CIU_PP_BIST_STAT          CVMX_CIU_ADDR(0x07E0, 0, 0x00, 0)
+#define CVMX_CIU_PP_DBG                        CVMX_CIU_ADDR(0x0708, 0, 0x00, 0)
+#define CVMX_CIU_PP_RST                        CVMX_CIU_ADDR(0x0700, 0, 0x00, 0)
+#define CVMX_CIU_QLM0                  CVMX_CIU_ADDR(0x0780, 0, 0x00, 0)
+#define CVMX_CIU_QLM1                  CVMX_CIU_ADDR(0x0788, 0, 0x00, 0)
+#define CVMX_CIU_QLM_JTGC              CVMX_CIU_ADDR(0x0768, 0, 0x00, 0)
+#define CVMX_CIU_QLM_JTGD              CVMX_CIU_ADDR(0x0770, 0, 0x00, 0)
+#define CVMX_CIU_SOFT_BIST             CVMX_CIU_ADDR(0x0738, 0, 0x00, 0)
+#define CVMX_CIU_SOFT_PRST1            CVMX_CIU_ADDR(0x0758, 0, 0x00, 0)
+#define CVMX_CIU_SOFT_PRST             CVMX_CIU_ADDR(0x0748, 0, 0x00, 0)
+#define CVMX_CIU_SOFT_RST              CVMX_CIU_ADDR(0x0740, 0, 0x00, 0)
+#define CVMX_CIU_SUM2_PPX_IP4(c)       CVMX_CIU_ADDR(0x8C00, c, 0x0F, 8)
+#define CVMX_CIU_TIM_MULTI_CAST                CVMX_CIU_ADDR(0xC200, 0, 0x00, 0)
+#define CVMX_CIU_TIMX(c)               CVMX_CIU_ADDR(0x0480, c, 0x0F, 8)
+
+static inline uint64_t CVMX_CIU_MBOX_CLRX(unsigned int coreid)
 {
-       switch (cvmx_get_octeon_family()) {
-       case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
-               return CVMX_ADD_IO_SEG(0x0001070000000680ull) + (offset) * 8;
-       case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
-       case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
-       case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
-               return CVMX_ADD_IO_SEG(0x0001070000000680ull) + (offset) * 8;
-       case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
-       case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
-               return CVMX_ADD_IO_SEG(0x0001070000000680ull) + (offset) * 8;
-       case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
-       case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
-               return CVMX_ADD_IO_SEG(0x0001070000000680ull) + (offset) * 8;
-       case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
-               return CVMX_ADD_IO_SEG(0x0001070000000680ull) + (offset) * 8;
-       case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
-               return CVMX_ADD_IO_SEG(0x0001070000000680ull) + (offset) * 8;
-       case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
-               return CVMX_ADD_IO_SEG(0x0001070000000680ull) + (offset) * 8;
-       case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
-               return CVMX_ADD_IO_SEG(0x0001070100100600ull) + (offset) * 8;
-       }
-       return CVMX_ADD_IO_SEG(0x0001070000000680ull) + (offset) * 8;
+       if (cvmx_get_octeon_family() == (OCTEON_CN68XX & OCTEON_FAMILY_MASK))
+               return CVMX_CIU_ADDR(0x100100600, coreid, 0x0F, 8);
+       else
+               return CVMX_CIU_ADDR(0x000000680, coreid, 0x0F, 8);
 }
 
-static inline uint64_t CVMX_CIU_MBOX_SETX(unsigned long offset)
+static inline uint64_t CVMX_CIU_MBOX_SETX(unsigned int coreid)
 {
-       switch (cvmx_get_octeon_family()) {
-       case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
-               return CVMX_ADD_IO_SEG(0x0001070000000600ull) + (offset) * 8;
-       case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
-       case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
-       case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
-               return CVMX_ADD_IO_SEG(0x0001070000000600ull) + (offset) * 8;
-       case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
-       case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
-               return CVMX_ADD_IO_SEG(0x0001070000000600ull) + (offset) * 8;
-       case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
-       case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
-               return CVMX_ADD_IO_SEG(0x0001070000000600ull) + (offset) * 8;
-       case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
-               return CVMX_ADD_IO_SEG(0x0001070000000600ull) + (offset) * 8;
-       case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
-               return CVMX_ADD_IO_SEG(0x0001070000000600ull) + (offset) * 8;
-       case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
-               return CVMX_ADD_IO_SEG(0x0001070000000600ull) + (offset) * 8;
-       case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
-               return CVMX_ADD_IO_SEG(0x0001070100100400ull) + (offset) * 8;
-       }
-       return CVMX_ADD_IO_SEG(0x0001070000000600ull) + (offset) * 8;
+       if (cvmx_get_octeon_family() == (OCTEON_CN68XX & OCTEON_FAMILY_MASK))
+               return CVMX_CIU_ADDR(0x100100400, coreid, 0x0F, 8);
+       else
+               return CVMX_CIU_ADDR(0x000000600, coreid, 0x0F, 8);
 }
 
-#define CVMX_CIU_NMI (CVMX_ADD_IO_SEG(0x0001070000000718ull))
-#define CVMX_CIU_PCI_INTA (CVMX_ADD_IO_SEG(0x0001070000000750ull))
-#define CVMX_CIU_PP_BIST_STAT (CVMX_ADD_IO_SEG(0x00010700000007E0ull))
-#define CVMX_CIU_PP_DBG (CVMX_ADD_IO_SEG(0x0001070000000708ull))
-static inline uint64_t CVMX_CIU_PP_POKEX(unsigned long offset)
+static inline uint64_t CVMX_CIU_PP_POKEX(unsigned int coreid)
 {
        switch (cvmx_get_octeon_family()) {
-       case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
-               return CVMX_ADD_IO_SEG(0x0001070000000580ull) + (offset) * 8;
-       case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
-       case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
-       case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
-       case OCTEON_CN70XX & OCTEON_FAMILY_MASK:
-               return CVMX_ADD_IO_SEG(0x0001070000000580ull) + (offset) * 8;
-       case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
-       case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
-               return CVMX_ADD_IO_SEG(0x0001070000000580ull) + (offset) * 8;
-       case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
-       case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
-               return CVMX_ADD_IO_SEG(0x0001070000000580ull) + (offset) * 8;
-       case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
-               return CVMX_ADD_IO_SEG(0x0001070000000580ull) + (offset) * 8;
-       case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
-               return CVMX_ADD_IO_SEG(0x0001070000000580ull) + (offset) * 8;
-       case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
-               return CVMX_ADD_IO_SEG(0x0001070000000580ull) + (offset) * 8;
        case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
-               return CVMX_ADD_IO_SEG(0x0001070100100200ull) + (offset) * 8;
+               return CVMX_CIU_ADDR(0x100100200, coreid, 0x0F, 8);
        case OCTEON_CNF75XX & OCTEON_FAMILY_MASK:
        case OCTEON_CN73XX & OCTEON_FAMILY_MASK:
        case OCTEON_CN78XX & OCTEON_FAMILY_MASK:
-               return CVMX_ADD_IO_SEG(0x0001010000030000ull) + (offset) * 8;
+               return CVMX_CIU_ADDR(0x000030000, coreid, 0x0F, 8) -
+                       0x60000000000ull;
+       default:
+               return CVMX_CIU_ADDR(0x000000580, coreid, 0x0F, 8);
        }
-       return CVMX_ADD_IO_SEG(0x0001070000000580ull) + (offset) * 8;
 }
 
-#define CVMX_CIU_PP_RST (CVMX_ADD_IO_SEG(0x0001070000000700ull))
-#define CVMX_CIU_QLM0 (CVMX_ADD_IO_SEG(0x0001070000000780ull))
-#define CVMX_CIU_QLM1 (CVMX_ADD_IO_SEG(0x0001070000000788ull))
-#define CVMX_CIU_QLM2 (CVMX_ADD_IO_SEG(0x0001070000000790ull))
-#define CVMX_CIU_QLM3 (CVMX_ADD_IO_SEG(0x0001070000000798ull))
-#define CVMX_CIU_QLM4 (CVMX_ADD_IO_SEG(0x00010700000007A0ull))
-#define CVMX_CIU_QLM_DCOK (CVMX_ADD_IO_SEG(0x0001070000000760ull))
-#define CVMX_CIU_QLM_JTGC (CVMX_ADD_IO_SEG(0x0001070000000768ull))
-#define CVMX_CIU_QLM_JTGD (CVMX_ADD_IO_SEG(0x0001070000000770ull))
-#define CVMX_CIU_SOFT_BIST (CVMX_ADD_IO_SEG(0x0001070000000738ull))
-#define CVMX_CIU_SOFT_PRST (CVMX_ADD_IO_SEG(0x0001070000000748ull))
-#define CVMX_CIU_SOFT_PRST1 (CVMX_ADD_IO_SEG(0x0001070000000758ull))
-#define CVMX_CIU_SOFT_PRST2 (CVMX_ADD_IO_SEG(0x00010700000007D8ull))
-#define CVMX_CIU_SOFT_PRST3 (CVMX_ADD_IO_SEG(0x00010700000007E0ull))
-#define CVMX_CIU_SOFT_RST (CVMX_ADD_IO_SEG(0x0001070000000740ull))
-#define CVMX_CIU_SUM1_IOX_INT(offset) (CVMX_ADD_IO_SEG(0x0001070000008600ull) + ((offset) & 1) * 8)
-#define CVMX_CIU_SUM1_PPX_IP2(offset) (CVMX_ADD_IO_SEG(0x0001070000008000ull) + ((offset) & 15) * 8)
-#define CVMX_CIU_SUM1_PPX_IP3(offset) (CVMX_ADD_IO_SEG(0x0001070000008200ull) + ((offset) & 15) * 8)
-#define CVMX_CIU_SUM1_PPX_IP4(offset) (CVMX_ADD_IO_SEG(0x0001070000008400ull) + ((offset) & 15) * 8)
-#define CVMX_CIU_SUM2_IOX_INT(offset) (CVMX_ADD_IO_SEG(0x0001070000008E00ull) + ((offset) & 1) * 8)
-#define CVMX_CIU_SUM2_PPX_IP2(offset) (CVMX_ADD_IO_SEG(0x0001070000008800ull) + ((offset) & 15) * 8)
-#define CVMX_CIU_SUM2_PPX_IP3(offset) (CVMX_ADD_IO_SEG(0x0001070000008A00ull) + ((offset) & 15) * 8)
-#define CVMX_CIU_SUM2_PPX_IP4(offset) (CVMX_ADD_IO_SEG(0x0001070000008C00ull) + ((offset) & 15) * 8)
-#define CVMX_CIU_TIMX(offset) (CVMX_ADD_IO_SEG(0x0001070000000480ull) + ((offset) & 15) * 8)
-#define CVMX_CIU_TIM_MULTI_CAST (CVMX_ADD_IO_SEG(0x000107000000C200ull))
-static inline uint64_t CVMX_CIU_WDOGX(unsigned long offset)
+static inline uint64_t CVMX_CIU_WDOGX(unsigned int coreid)
 {
        switch (cvmx_get_octeon_family()) {
-       case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
-               return CVMX_ADD_IO_SEG(0x0001070000000500ull) + (offset) * 8;
-       case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
-       case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
-       case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
-       case OCTEON_CN70XX & OCTEON_FAMILY_MASK:
-               return CVMX_ADD_IO_SEG(0x0001070000000500ull) + (offset) * 8;
-       case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
-       case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
-               return CVMX_ADD_IO_SEG(0x0001070000000500ull) + (offset) * 8;
-       case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
-       case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
-               return CVMX_ADD_IO_SEG(0x0001070000000500ull) + (offset) * 8;
-       case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
-               return CVMX_ADD_IO_SEG(0x0001070000000500ull) + (offset) * 8;
-       case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
-               return CVMX_ADD_IO_SEG(0x0001070000000500ull) + (offset) * 8;
-       case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
-               return CVMX_ADD_IO_SEG(0x0001070000000500ull) + (offset) * 8;
        case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
-               return CVMX_ADD_IO_SEG(0x0001070100100000ull) + (offset) * 8;
+               return CVMX_CIU_ADDR(0x100100000, coreid, 0x0F, 8);
        case OCTEON_CNF75XX & OCTEON_FAMILY_MASK:
        case OCTEON_CN73XX & OCTEON_FAMILY_MASK:
        case OCTEON_CN78XX & OCTEON_FAMILY_MASK:
-               return CVMX_ADD_IO_SEG(0x0001010000020000ull) + (offset) * 8;
+               return CVMX_CIU_ADDR(0x000020000, coreid, 0x0F, 8) -
+                       0x60000000000ull;
+       default:
+               return CVMX_CIU_ADDR(0x000000500, coreid, 0x0F, 8);
        }
-       return CVMX_ADD_IO_SEG(0x0001070000000500ull) + (offset) * 8;
 }
 
-union cvmx_ciu_bist {
-       uint64_t u64;
-       struct cvmx_ciu_bist_s {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_7_63:57;
-               uint64_t bist:7;
-#else
-               uint64_t bist:7;
-               uint64_t reserved_7_63:57;
-#endif
-       } s;
-       struct cvmx_ciu_bist_cn30xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_4_63:60;
-               uint64_t bist:4;
-#else
-               uint64_t bist:4;
-               uint64_t reserved_4_63:60;
-#endif
-       } cn30xx;
-       struct cvmx_ciu_bist_cn30xx cn31xx;
-       struct cvmx_ciu_bist_cn30xx cn38xx;
-       struct cvmx_ciu_bist_cn30xx cn38xxp2;
-       struct cvmx_ciu_bist_cn50xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_2_63:62;
-               uint64_t bist:2;
-#else
-               uint64_t bist:2;
-               uint64_t reserved_2_63:62;
-#endif
-       } cn50xx;
-       struct cvmx_ciu_bist_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_3_63:61;
-               uint64_t bist:3;
-#else
-               uint64_t bist:3;
-               uint64_t reserved_3_63:61;
-#endif
-       } cn52xx;
-       struct cvmx_ciu_bist_cn52xx cn52xxp1;
-       struct cvmx_ciu_bist_cn30xx cn56xx;
-       struct cvmx_ciu_bist_cn30xx cn56xxp1;
-       struct cvmx_ciu_bist_cn30xx cn58xx;
-       struct cvmx_ciu_bist_cn30xx cn58xxp1;
-       struct cvmx_ciu_bist_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_6_63:58;
-               uint64_t bist:6;
-#else
-               uint64_t bist:6;
-               uint64_t reserved_6_63:58;
-#endif
-       } cn61xx;
-       struct cvmx_ciu_bist_cn63xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_5_63:59;
-               uint64_t bist:5;
-#else
-               uint64_t bist:5;
-               uint64_t reserved_5_63:59;
-#endif
-       } cn63xx;
-       struct cvmx_ciu_bist_cn63xx cn63xxp1;
-       struct cvmx_ciu_bist_cn61xx cn66xx;
-       struct cvmx_ciu_bist_s cn68xx;
-       struct cvmx_ciu_bist_s cn68xxp1;
-       struct cvmx_ciu_bist_cn61xx cnf71xx;
-};
-
-union cvmx_ciu_block_int {
-       uint64_t u64;
-       struct cvmx_ciu_block_int_s {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_62_63:2;
-               uint64_t srio3:1;
-               uint64_t srio2:1;
-               uint64_t reserved_43_59:17;
-               uint64_t ptp:1;
-               uint64_t dpi:1;
-               uint64_t dfm:1;
-               uint64_t reserved_34_39:6;
-               uint64_t srio1:1;
-               uint64_t srio0:1;
-               uint64_t reserved_31_31:1;
-               uint64_t iob:1;
-               uint64_t reserved_29_29:1;
-               uint64_t agl:1;
-               uint64_t reserved_27_27:1;
-               uint64_t pem1:1;
-               uint64_t pem0:1;
-               uint64_t reserved_24_24:1;
-               uint64_t asxpcs1:1;
-               uint64_t asxpcs0:1;
-               uint64_t reserved_21_21:1;
-               uint64_t pip:1;
-               uint64_t reserved_18_19:2;
-               uint64_t lmc0:1;
-               uint64_t l2c:1;
-               uint64_t reserved_15_15:1;
-               uint64_t rad:1;
-               uint64_t usb:1;
-               uint64_t pow:1;
-               uint64_t tim:1;
-               uint64_t pko:1;
-               uint64_t ipd:1;
-               uint64_t reserved_8_8:1;
-               uint64_t zip:1;
-               uint64_t dfa:1;
-               uint64_t fpa:1;
-               uint64_t key:1;
-               uint64_t sli:1;
-               uint64_t gmx1:1;
-               uint64_t gmx0:1;
-               uint64_t mio:1;
-#else
-               uint64_t mio:1;
-               uint64_t gmx0:1;
-               uint64_t gmx1:1;
-               uint64_t sli:1;
-               uint64_t key:1;
-               uint64_t fpa:1;
-               uint64_t dfa:1;
-               uint64_t zip:1;
-               uint64_t reserved_8_8:1;
-               uint64_t ipd:1;
-               uint64_t pko:1;
-               uint64_t tim:1;
-               uint64_t pow:1;
-               uint64_t usb:1;
-               uint64_t rad:1;
-               uint64_t reserved_15_15:1;
-               uint64_t l2c:1;
-               uint64_t lmc0:1;
-               uint64_t reserved_18_19:2;
-               uint64_t pip:1;
-               uint64_t reserved_21_21:1;
-               uint64_t asxpcs0:1;
-               uint64_t asxpcs1:1;
-               uint64_t reserved_24_24:1;
-               uint64_t pem0:1;
-               uint64_t pem1:1;
-               uint64_t reserved_27_27:1;
-               uint64_t agl:1;
-               uint64_t reserved_29_29:1;
-               uint64_t iob:1;
-               uint64_t reserved_31_31:1;
-               uint64_t srio0:1;
-               uint64_t srio1:1;
-               uint64_t reserved_34_39:6;
-               uint64_t dfm:1;
-               uint64_t dpi:1;
-               uint64_t ptp:1;
-               uint64_t reserved_43_59:17;
-               uint64_t srio2:1;
-               uint64_t srio3:1;
-               uint64_t reserved_62_63:2;
-#endif
-       } s;
-       struct cvmx_ciu_block_int_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_43_63:21;
-               uint64_t ptp:1;
-               uint64_t dpi:1;
-               uint64_t reserved_31_40:10;
-               uint64_t iob:1;
-               uint64_t reserved_29_29:1;
-               uint64_t agl:1;
-               uint64_t reserved_27_27:1;
-               uint64_t pem1:1;
-               uint64_t pem0:1;
-               uint64_t reserved_24_24:1;
-               uint64_t asxpcs1:1;
-               uint64_t asxpcs0:1;
-               uint64_t reserved_21_21:1;
-               uint64_t pip:1;
-               uint64_t reserved_18_19:2;
-               uint64_t lmc0:1;
-               uint64_t l2c:1;
-               uint64_t reserved_15_15:1;
-               uint64_t rad:1;
-               uint64_t usb:1;
-               uint64_t pow:1;
-               uint64_t tim:1;
-               uint64_t pko:1;
-               uint64_t ipd:1;
-               uint64_t reserved_8_8:1;
-               uint64_t zip:1;
-               uint64_t dfa:1;
-               uint64_t fpa:1;
-               uint64_t key:1;
-               uint64_t sli:1;
-               uint64_t gmx1:1;
-               uint64_t gmx0:1;
-               uint64_t mio:1;
-#else
-               uint64_t mio:1;
-               uint64_t gmx0:1;
-               uint64_t gmx1:1;
-               uint64_t sli:1;
-               uint64_t key:1;
-               uint64_t fpa:1;
-               uint64_t dfa:1;
-               uint64_t zip:1;
-               uint64_t reserved_8_8:1;
-               uint64_t ipd:1;
-               uint64_t pko:1;
-               uint64_t tim:1;
-               uint64_t pow:1;
-               uint64_t usb:1;
-               uint64_t rad:1;
-               uint64_t reserved_15_15:1;
-               uint64_t l2c:1;
-               uint64_t lmc0:1;
-               uint64_t reserved_18_19:2;
-               uint64_t pip:1;
-               uint64_t reserved_21_21:1;
-               uint64_t asxpcs0:1;
-               uint64_t asxpcs1:1;
-               uint64_t reserved_24_24:1;
-               uint64_t pem0:1;
-               uint64_t pem1:1;
-               uint64_t reserved_27_27:1;
-               uint64_t agl:1;
-               uint64_t reserved_29_29:1;
-               uint64_t iob:1;
-               uint64_t reserved_31_40:10;
-               uint64_t dpi:1;
-               uint64_t ptp:1;
-               uint64_t reserved_43_63:21;
-#endif
-       } cn61xx;
-       struct cvmx_ciu_block_int_cn63xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_43_63:21;
-               uint64_t ptp:1;
-               uint64_t dpi:1;
-               uint64_t dfm:1;
-               uint64_t reserved_34_39:6;
-               uint64_t srio1:1;
-               uint64_t srio0:1;
-               uint64_t reserved_31_31:1;
-               uint64_t iob:1;
-               uint64_t reserved_29_29:1;
-               uint64_t agl:1;
-               uint64_t reserved_27_27:1;
-               uint64_t pem1:1;
-               uint64_t pem0:1;
-               uint64_t reserved_23_24:2;
-               uint64_t asxpcs0:1;
-               uint64_t reserved_21_21:1;
-               uint64_t pip:1;
-               uint64_t reserved_18_19:2;
-               uint64_t lmc0:1;
-               uint64_t l2c:1;
-               uint64_t reserved_15_15:1;
-               uint64_t rad:1;
-               uint64_t usb:1;
-               uint64_t pow:1;
-               uint64_t tim:1;
-               uint64_t pko:1;
-               uint64_t ipd:1;
-               uint64_t reserved_8_8:1;
-               uint64_t zip:1;
-               uint64_t dfa:1;
-               uint64_t fpa:1;
-               uint64_t key:1;
-               uint64_t sli:1;
-               uint64_t reserved_2_2:1;
-               uint64_t gmx0:1;
-               uint64_t mio:1;
-#else
-               uint64_t mio:1;
-               uint64_t gmx0:1;
-               uint64_t reserved_2_2:1;
-               uint64_t sli:1;
-               uint64_t key:1;
-               uint64_t fpa:1;
-               uint64_t dfa:1;
-               uint64_t zip:1;
-               uint64_t reserved_8_8:1;
-               uint64_t ipd:1;
-               uint64_t pko:1;
-               uint64_t tim:1;
-               uint64_t pow:1;
-               uint64_t usb:1;
-               uint64_t rad:1;
-               uint64_t reserved_15_15:1;
-               uint64_t l2c:1;
-               uint64_t lmc0:1;
-               uint64_t reserved_18_19:2;
-               uint64_t pip:1;
-               uint64_t reserved_21_21:1;
-               uint64_t asxpcs0:1;
-               uint64_t reserved_23_24:2;
-               uint64_t pem0:1;
-               uint64_t pem1:1;
-               uint64_t reserved_27_27:1;
-               uint64_t agl:1;
-               uint64_t reserved_29_29:1;
-               uint64_t iob:1;
-               uint64_t reserved_31_31:1;
-               uint64_t srio0:1;
-               uint64_t srio1:1;
-               uint64_t reserved_34_39:6;
-               uint64_t dfm:1;
-               uint64_t dpi:1;
-               uint64_t ptp:1;
-               uint64_t reserved_43_63:21;
-#endif
-       } cn63xx;
-       struct cvmx_ciu_block_int_cn63xx cn63xxp1;
-       struct cvmx_ciu_block_int_cn66xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_62_63:2;
-               uint64_t srio3:1;
-               uint64_t srio2:1;
-               uint64_t reserved_43_59:17;
-               uint64_t ptp:1;
-               uint64_t dpi:1;
-               uint64_t dfm:1;
-               uint64_t reserved_33_39:7;
-               uint64_t srio0:1;
-               uint64_t reserved_31_31:1;
-               uint64_t iob:1;
-               uint64_t reserved_29_29:1;
-               uint64_t agl:1;
-               uint64_t reserved_27_27:1;
-               uint64_t pem1:1;
-               uint64_t pem0:1;
-               uint64_t reserved_24_24:1;
-               uint64_t asxpcs1:1;
-               uint64_t asxpcs0:1;
-               uint64_t reserved_21_21:1;
-               uint64_t pip:1;
-               uint64_t reserved_18_19:2;
-               uint64_t lmc0:1;
-               uint64_t l2c:1;
-               uint64_t reserved_15_15:1;
-               uint64_t rad:1;
-               uint64_t usb:1;
-               uint64_t pow:1;
-               uint64_t tim:1;
-               uint64_t pko:1;
-               uint64_t ipd:1;
-               uint64_t reserved_8_8:1;
-               uint64_t zip:1;
-               uint64_t dfa:1;
-               uint64_t fpa:1;
-               uint64_t key:1;
-               uint64_t sli:1;
-               uint64_t gmx1:1;
-               uint64_t gmx0:1;
-               uint64_t mio:1;
-#else
-               uint64_t mio:1;
-               uint64_t gmx0:1;
-               uint64_t gmx1:1;
-               uint64_t sli:1;
-               uint64_t key:1;
-               uint64_t fpa:1;
-               uint64_t dfa:1;
-               uint64_t zip:1;
-               uint64_t reserved_8_8:1;
-               uint64_t ipd:1;
-               uint64_t pko:1;
-               uint64_t tim:1;
-               uint64_t pow:1;
-               uint64_t usb:1;
-               uint64_t rad:1;
-               uint64_t reserved_15_15:1;
-               uint64_t l2c:1;
-               uint64_t lmc0:1;
-               uint64_t reserved_18_19:2;
-               uint64_t pip:1;
-               uint64_t reserved_21_21:1;
-               uint64_t asxpcs0:1;
-               uint64_t asxpcs1:1;
-               uint64_t reserved_24_24:1;
-               uint64_t pem0:1;
-               uint64_t pem1:1;
-               uint64_t reserved_27_27:1;
-               uint64_t agl:1;
-               uint64_t reserved_29_29:1;
-               uint64_t iob:1;
-               uint64_t reserved_31_31:1;
-               uint64_t srio0:1;
-               uint64_t reserved_33_39:7;
-               uint64_t dfm:1;
-               uint64_t dpi:1;
-               uint64_t ptp:1;
-               uint64_t reserved_43_59:17;
-               uint64_t srio2:1;
-               uint64_t srio3:1;
-               uint64_t reserved_62_63:2;
-#endif
-       } cn66xx;
-       struct cvmx_ciu_block_int_cnf71xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_43_63:21;
-               uint64_t ptp:1;
-               uint64_t dpi:1;
-               uint64_t reserved_31_40:10;
-               uint64_t iob:1;
-               uint64_t reserved_27_29:3;
-               uint64_t pem1:1;
-               uint64_t pem0:1;
-               uint64_t reserved_23_24:2;
-               uint64_t asxpcs0:1;
-               uint64_t reserved_21_21:1;
-               uint64_t pip:1;
-               uint64_t reserved_18_19:2;
-               uint64_t lmc0:1;
-               uint64_t l2c:1;
-               uint64_t reserved_15_15:1;
-               uint64_t rad:1;
-               uint64_t usb:1;
-               uint64_t pow:1;
-               uint64_t tim:1;
-               uint64_t pko:1;
-               uint64_t ipd:1;
-               uint64_t reserved_6_8:3;
-               uint64_t fpa:1;
-               uint64_t key:1;
-               uint64_t sli:1;
-               uint64_t reserved_2_2:1;
-               uint64_t gmx0:1;
-               uint64_t mio:1;
-#else
-               uint64_t mio:1;
-               uint64_t gmx0:1;
-               uint64_t reserved_2_2:1;
-               uint64_t sli:1;
-               uint64_t key:1;
-               uint64_t fpa:1;
-               uint64_t reserved_6_8:3;
-               uint64_t ipd:1;
-               uint64_t pko:1;
-               uint64_t tim:1;
-               uint64_t pow:1;
-               uint64_t usb:1;
-               uint64_t rad:1;
-               uint64_t reserved_15_15:1;
-               uint64_t l2c:1;
-               uint64_t lmc0:1;
-               uint64_t reserved_18_19:2;
-               uint64_t pip:1;
-               uint64_t reserved_21_21:1;
-               uint64_t asxpcs0:1;
-               uint64_t reserved_23_24:2;
-               uint64_t pem0:1;
-               uint64_t pem1:1;
-               uint64_t reserved_27_29:3;
-               uint64_t iob:1;
-               uint64_t reserved_31_40:10;
-               uint64_t dpi:1;
-               uint64_t ptp:1;
-               uint64_t reserved_43_63:21;
-#endif
-       } cnf71xx;
-};
-
-union cvmx_ciu_dint {
-       uint64_t u64;
-       struct cvmx_ciu_dint_s {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_32_63:32;
-               uint64_t dint:32;
-#else
-               uint64_t dint:32;
-               uint64_t reserved_32_63:32;
-#endif
-       } s;
-       struct cvmx_ciu_dint_cn30xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_1_63:63;
-               uint64_t dint:1;
-#else
-               uint64_t dint:1;
-               uint64_t reserved_1_63:63;
-#endif
-       } cn30xx;
-       struct cvmx_ciu_dint_cn31xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_2_63:62;
-               uint64_t dint:2;
-#else
-               uint64_t dint:2;
-               uint64_t reserved_2_63:62;
-#endif
-       } cn31xx;
-       struct cvmx_ciu_dint_cn38xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_16_63:48;
-               uint64_t dint:16;
-#else
-               uint64_t dint:16;
-               uint64_t reserved_16_63:48;
-#endif
-       } cn38xx;
-       struct cvmx_ciu_dint_cn38xx cn38xxp2;
-       struct cvmx_ciu_dint_cn31xx cn50xx;
-       struct cvmx_ciu_dint_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_4_63:60;
-               uint64_t dint:4;
-#else
-               uint64_t dint:4;
-               uint64_t reserved_4_63:60;
-#endif
-       } cn52xx;
-       struct cvmx_ciu_dint_cn52xx cn52xxp1;
-       struct cvmx_ciu_dint_cn56xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_12_63:52;
-               uint64_t dint:12;
-#else
-               uint64_t dint:12;
-               uint64_t reserved_12_63:52;
-#endif
-       } cn56xx;
-       struct cvmx_ciu_dint_cn56xx cn56xxp1;
-       struct cvmx_ciu_dint_cn38xx cn58xx;
-       struct cvmx_ciu_dint_cn38xx cn58xxp1;
-       struct cvmx_ciu_dint_cn52xx cn61xx;
-       struct cvmx_ciu_dint_cn63xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_6_63:58;
-               uint64_t dint:6;
-#else
-               uint64_t dint:6;
-               uint64_t reserved_6_63:58;
-#endif
-       } cn63xx;
-       struct cvmx_ciu_dint_cn63xx cn63xxp1;
-       struct cvmx_ciu_dint_cn66xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_10_63:54;
-               uint64_t dint:10;
-#else
-               uint64_t dint:10;
-               uint64_t reserved_10_63:54;
-#endif
-       } cn66xx;
-       struct cvmx_ciu_dint_s cn68xx;
-       struct cvmx_ciu_dint_s cn68xxp1;
-       struct cvmx_ciu_dint_cn52xx cnf71xx;
-};
-
-union cvmx_ciu_en2_iox_int {
-       uint64_t u64;
-       struct cvmx_ciu_en2_iox_int_s {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_15_63:49;
-               uint64_t endor:2;
-               uint64_t eoi:1;
-               uint64_t reserved_10_11:2;
-               uint64_t timer:6;
-               uint64_t reserved_0_3:4;
-#else
-               uint64_t reserved_0_3:4;
-               uint64_t timer:6;
-               uint64_t reserved_10_11:2;
-               uint64_t eoi:1;
-               uint64_t endor:2;
-               uint64_t reserved_15_63:49;
-#endif
-       } s;
-       struct cvmx_ciu_en2_iox_int_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_10_63:54;
-               uint64_t timer:6;
-               uint64_t reserved_0_3:4;
-#else
-               uint64_t reserved_0_3:4;
-               uint64_t timer:6;
-               uint64_t reserved_10_63:54;
-#endif
-       } cn61xx;
-       struct cvmx_ciu_en2_iox_int_cn61xx cn66xx;
-       struct cvmx_ciu_en2_iox_int_s cnf71xx;
-};
-
-union cvmx_ciu_en2_iox_int_w1c {
-       uint64_t u64;
-       struct cvmx_ciu_en2_iox_int_w1c_s {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_15_63:49;
-               uint64_t endor:2;
-               uint64_t eoi:1;
-               uint64_t reserved_10_11:2;
-               uint64_t timer:6;
-               uint64_t reserved_0_3:4;
-#else
-               uint64_t reserved_0_3:4;
-               uint64_t timer:6;
-               uint64_t reserved_10_11:2;
-               uint64_t eoi:1;
-               uint64_t endor:2;
-               uint64_t reserved_15_63:49;
-#endif
-       } s;
-       struct cvmx_ciu_en2_iox_int_w1c_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_10_63:54;
-               uint64_t timer:6;
-               uint64_t reserved_0_3:4;
-#else
-               uint64_t reserved_0_3:4;
-               uint64_t timer:6;
-               uint64_t reserved_10_63:54;
-#endif
-       } cn61xx;
-       struct cvmx_ciu_en2_iox_int_w1c_cn61xx cn66xx;
-       struct cvmx_ciu_en2_iox_int_w1c_s cnf71xx;
-};
-
-union cvmx_ciu_en2_iox_int_w1s {
-       uint64_t u64;
-       struct cvmx_ciu_en2_iox_int_w1s_s {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_15_63:49;
-               uint64_t endor:2;
-               uint64_t eoi:1;
-               uint64_t reserved_10_11:2;
-               uint64_t timer:6;
-               uint64_t reserved_0_3:4;
-#else
-               uint64_t reserved_0_3:4;
-               uint64_t timer:6;
-               uint64_t reserved_10_11:2;
-               uint64_t eoi:1;
-               uint64_t endor:2;
-               uint64_t reserved_15_63:49;
-#endif
-       } s;
-       struct cvmx_ciu_en2_iox_int_w1s_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_10_63:54;
-               uint64_t timer:6;
-               uint64_t reserved_0_3:4;
-#else
-               uint64_t reserved_0_3:4;
-               uint64_t timer:6;
-               uint64_t reserved_10_63:54;
-#endif
-       } cn61xx;
-       struct cvmx_ciu_en2_iox_int_w1s_cn61xx cn66xx;
-       struct cvmx_ciu_en2_iox_int_w1s_s cnf71xx;
-};
-
-union cvmx_ciu_en2_ppx_ip2 {
-       uint64_t u64;
-       struct cvmx_ciu_en2_ppx_ip2_s {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_15_63:49;
-               uint64_t endor:2;
-               uint64_t eoi:1;
-               uint64_t reserved_10_11:2;
-               uint64_t timer:6;
-               uint64_t reserved_0_3:4;
-#else
-               uint64_t reserved_0_3:4;
-               uint64_t timer:6;
-               uint64_t reserved_10_11:2;
-               uint64_t eoi:1;
-               uint64_t endor:2;
-               uint64_t reserved_15_63:49;
-#endif
-       } s;
-       struct cvmx_ciu_en2_ppx_ip2_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_10_63:54;
-               uint64_t timer:6;
-               uint64_t reserved_0_3:4;
-#else
-               uint64_t reserved_0_3:4;
-               uint64_t timer:6;
-               uint64_t reserved_10_63:54;
-#endif
-       } cn61xx;
-       struct cvmx_ciu_en2_ppx_ip2_cn61xx cn66xx;
-       struct cvmx_ciu_en2_ppx_ip2_s cnf71xx;
-};
-
-union cvmx_ciu_en2_ppx_ip2_w1c {
-       uint64_t u64;
-       struct cvmx_ciu_en2_ppx_ip2_w1c_s {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_15_63:49;
-               uint64_t endor:2;
-               uint64_t eoi:1;
-               uint64_t reserved_10_11:2;
-               uint64_t timer:6;
-               uint64_t reserved_0_3:4;
-#else
-               uint64_t reserved_0_3:4;
-               uint64_t timer:6;
-               uint64_t reserved_10_11:2;
-               uint64_t eoi:1;
-               uint64_t endor:2;
-               uint64_t reserved_15_63:49;
-#endif
-       } s;
-       struct cvmx_ciu_en2_ppx_ip2_w1c_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_10_63:54;
-               uint64_t timer:6;
-               uint64_t reserved_0_3:4;
-#else
-               uint64_t reserved_0_3:4;
-               uint64_t timer:6;
-               uint64_t reserved_10_63:54;
-#endif
-       } cn61xx;
-       struct cvmx_ciu_en2_ppx_ip2_w1c_cn61xx cn66xx;
-       struct cvmx_ciu_en2_ppx_ip2_w1c_s cnf71xx;
-};
-
-union cvmx_ciu_en2_ppx_ip2_w1s {
-       uint64_t u64;
-       struct cvmx_ciu_en2_ppx_ip2_w1s_s {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_15_63:49;
-               uint64_t endor:2;
-               uint64_t eoi:1;
-               uint64_t reserved_10_11:2;
-               uint64_t timer:6;
-               uint64_t reserved_0_3:4;
-#else
-               uint64_t reserved_0_3:4;
-               uint64_t timer:6;
-               uint64_t reserved_10_11:2;
-               uint64_t eoi:1;
-               uint64_t endor:2;
-               uint64_t reserved_15_63:49;
-#endif
-       } s;
-       struct cvmx_ciu_en2_ppx_ip2_w1s_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_10_63:54;
-               uint64_t timer:6;
-               uint64_t reserved_0_3:4;
-#else
-               uint64_t reserved_0_3:4;
-               uint64_t timer:6;
-               uint64_t reserved_10_63:54;
-#endif
-       } cn61xx;
-       struct cvmx_ciu_en2_ppx_ip2_w1s_cn61xx cn66xx;
-       struct cvmx_ciu_en2_ppx_ip2_w1s_s cnf71xx;
-};
-
-union cvmx_ciu_en2_ppx_ip3 {
-       uint64_t u64;
-       struct cvmx_ciu_en2_ppx_ip3_s {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_15_63:49;
-               uint64_t endor:2;
-               uint64_t eoi:1;
-               uint64_t reserved_10_11:2;
-               uint64_t timer:6;
-               uint64_t reserved_0_3:4;
-#else
-               uint64_t reserved_0_3:4;
-               uint64_t timer:6;
-               uint64_t reserved_10_11:2;
-               uint64_t eoi:1;
-               uint64_t endor:2;
-               uint64_t reserved_15_63:49;
-#endif
-       } s;
-       struct cvmx_ciu_en2_ppx_ip3_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_10_63:54;
-               uint64_t timer:6;
-               uint64_t reserved_0_3:4;
-#else
-               uint64_t reserved_0_3:4;
-               uint64_t timer:6;
-               uint64_t reserved_10_63:54;
-#endif
-       } cn61xx;
-       struct cvmx_ciu_en2_ppx_ip3_cn61xx cn66xx;
-       struct cvmx_ciu_en2_ppx_ip3_s cnf71xx;
-};
-
-union cvmx_ciu_en2_ppx_ip3_w1c {
-       uint64_t u64;
-       struct cvmx_ciu_en2_ppx_ip3_w1c_s {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_15_63:49;
-               uint64_t endor:2;
-               uint64_t eoi:1;
-               uint64_t reserved_10_11:2;
-               uint64_t timer:6;
-               uint64_t reserved_0_3:4;
-#else
-               uint64_t reserved_0_3:4;
-               uint64_t timer:6;
-               uint64_t reserved_10_11:2;
-               uint64_t eoi:1;
-               uint64_t endor:2;
-               uint64_t reserved_15_63:49;
-#endif
-       } s;
-       struct cvmx_ciu_en2_ppx_ip3_w1c_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_10_63:54;
-               uint64_t timer:6;
-               uint64_t reserved_0_3:4;
-#else
-               uint64_t reserved_0_3:4;
-               uint64_t timer:6;
-               uint64_t reserved_10_63:54;
-#endif
-       } cn61xx;
-       struct cvmx_ciu_en2_ppx_ip3_w1c_cn61xx cn66xx;
-       struct cvmx_ciu_en2_ppx_ip3_w1c_s cnf71xx;
-};
-
-union cvmx_ciu_en2_ppx_ip3_w1s {
-       uint64_t u64;
-       struct cvmx_ciu_en2_ppx_ip3_w1s_s {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_15_63:49;
-               uint64_t endor:2;
-               uint64_t eoi:1;
-               uint64_t reserved_10_11:2;
-               uint64_t timer:6;
-               uint64_t reserved_0_3:4;
-#else
-               uint64_t reserved_0_3:4;
-               uint64_t timer:6;
-               uint64_t reserved_10_11:2;
-               uint64_t eoi:1;
-               uint64_t endor:2;
-               uint64_t reserved_15_63:49;
-#endif
-       } s;
-       struct cvmx_ciu_en2_ppx_ip3_w1s_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_10_63:54;
-               uint64_t timer:6;
-               uint64_t reserved_0_3:4;
-#else
-               uint64_t reserved_0_3:4;
-               uint64_t timer:6;
-               uint64_t reserved_10_63:54;
-#endif
-       } cn61xx;
-       struct cvmx_ciu_en2_ppx_ip3_w1s_cn61xx cn66xx;
-       struct cvmx_ciu_en2_ppx_ip3_w1s_s cnf71xx;
-};
-
-union cvmx_ciu_en2_ppx_ip4 {
-       uint64_t u64;
-       struct cvmx_ciu_en2_ppx_ip4_s {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_15_63:49;
-               uint64_t endor:2;
-               uint64_t eoi:1;
-               uint64_t reserved_10_11:2;
-               uint64_t timer:6;
-               uint64_t reserved_0_3:4;
-#else
-               uint64_t reserved_0_3:4;
-               uint64_t timer:6;
-               uint64_t reserved_10_11:2;
-               uint64_t eoi:1;
-               uint64_t endor:2;
-               uint64_t reserved_15_63:49;
-#endif
-       } s;
-       struct cvmx_ciu_en2_ppx_ip4_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_10_63:54;
-               uint64_t timer:6;
-               uint64_t reserved_0_3:4;
-#else
-               uint64_t reserved_0_3:4;
-               uint64_t timer:6;
-               uint64_t reserved_10_63:54;
-#endif
-       } cn61xx;
-       struct cvmx_ciu_en2_ppx_ip4_cn61xx cn66xx;
-       struct cvmx_ciu_en2_ppx_ip4_s cnf71xx;
-};
-
-union cvmx_ciu_en2_ppx_ip4_w1c {
-       uint64_t u64;
-       struct cvmx_ciu_en2_ppx_ip4_w1c_s {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_15_63:49;
-               uint64_t endor:2;
-               uint64_t eoi:1;
-               uint64_t reserved_10_11:2;
-               uint64_t timer:6;
-               uint64_t reserved_0_3:4;
-#else
-               uint64_t reserved_0_3:4;
-               uint64_t timer:6;
-               uint64_t reserved_10_11:2;
-               uint64_t eoi:1;
-               uint64_t endor:2;
-               uint64_t reserved_15_63:49;
-#endif
-       } s;
-       struct cvmx_ciu_en2_ppx_ip4_w1c_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_10_63:54;
-               uint64_t timer:6;
-               uint64_t reserved_0_3:4;
-#else
-               uint64_t reserved_0_3:4;
-               uint64_t timer:6;
-               uint64_t reserved_10_63:54;
-#endif
-       } cn61xx;
-       struct cvmx_ciu_en2_ppx_ip4_w1c_cn61xx cn66xx;
-       struct cvmx_ciu_en2_ppx_ip4_w1c_s cnf71xx;
-};
-
-union cvmx_ciu_en2_ppx_ip4_w1s {
-       uint64_t u64;
-       struct cvmx_ciu_en2_ppx_ip4_w1s_s {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_15_63:49;
-               uint64_t endor:2;
-               uint64_t eoi:1;
-               uint64_t reserved_10_11:2;
-               uint64_t timer:6;
-               uint64_t reserved_0_3:4;
-#else
-               uint64_t reserved_0_3:4;
-               uint64_t timer:6;
-               uint64_t reserved_10_11:2;
-               uint64_t eoi:1;
-               uint64_t endor:2;
-               uint64_t reserved_15_63:49;
-#endif
-       } s;
-       struct cvmx_ciu_en2_ppx_ip4_w1s_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_10_63:54;
-               uint64_t timer:6;
-               uint64_t reserved_0_3:4;
-#else
-               uint64_t reserved_0_3:4;
-               uint64_t timer:6;
-               uint64_t reserved_10_63:54;
-#endif
-       } cn61xx;
-       struct cvmx_ciu_en2_ppx_ip4_w1s_cn61xx cn66xx;
-       struct cvmx_ciu_en2_ppx_ip4_w1s_s cnf71xx;
-};
-
-union cvmx_ciu_fuse {
-       uint64_t u64;
-       struct cvmx_ciu_fuse_s {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_32_63:32;
-               uint64_t fuse:32;
-#else
-               uint64_t fuse:32;
-               uint64_t reserved_32_63:32;
-#endif
-       } s;
-       struct cvmx_ciu_fuse_cn30xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_1_63:63;
-               uint64_t fuse:1;
-#else
-               uint64_t fuse:1;
-               uint64_t reserved_1_63:63;
-#endif
-       } cn30xx;
-       struct cvmx_ciu_fuse_cn31xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_2_63:62;
-               uint64_t fuse:2;
-#else
-               uint64_t fuse:2;
-               uint64_t reserved_2_63:62;
-#endif
-       } cn31xx;
-       struct cvmx_ciu_fuse_cn38xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_16_63:48;
-               uint64_t fuse:16;
-#else
-               uint64_t fuse:16;
-               uint64_t reserved_16_63:48;
-#endif
-       } cn38xx;
-       struct cvmx_ciu_fuse_cn38xx cn38xxp2;
-       struct cvmx_ciu_fuse_cn31xx cn50xx;
-       struct cvmx_ciu_fuse_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_4_63:60;
-               uint64_t fuse:4;
-#else
-               uint64_t fuse:4;
-               uint64_t reserved_4_63:60;
-#endif
-       } cn52xx;
-       struct cvmx_ciu_fuse_cn52xx cn52xxp1;
-       struct cvmx_ciu_fuse_cn56xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_12_63:52;
-               uint64_t fuse:12;
-#else
-               uint64_t fuse:12;
-               uint64_t reserved_12_63:52;
-#endif
-       } cn56xx;
-       struct cvmx_ciu_fuse_cn56xx cn56xxp1;
-       struct cvmx_ciu_fuse_cn38xx cn58xx;
-       struct cvmx_ciu_fuse_cn38xx cn58xxp1;
-       struct cvmx_ciu_fuse_cn52xx cn61xx;
-       struct cvmx_ciu_fuse_cn63xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_6_63:58;
-               uint64_t fuse:6;
-#else
-               uint64_t fuse:6;
-               uint64_t reserved_6_63:58;
-#endif
-       } cn63xx;
-       struct cvmx_ciu_fuse_cn63xx cn63xxp1;
-       struct cvmx_ciu_fuse_cn66xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_10_63:54;
-               uint64_t fuse:10;
-#else
-               uint64_t fuse:10;
-               uint64_t reserved_10_63:54;
-#endif
-       } cn66xx;
-       struct cvmx_ciu_fuse_s cn68xx;
-       struct cvmx_ciu_fuse_s cn68xxp1;
-       struct cvmx_ciu_fuse_cn52xx cnf71xx;
-};
-
-union cvmx_ciu_gstop {
-       uint64_t u64;
-       struct cvmx_ciu_gstop_s {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_1_63:63;
-               uint64_t gstop:1;
-#else
-               uint64_t gstop:1;
-               uint64_t reserved_1_63:63;
-#endif
-       } s;
-       struct cvmx_ciu_gstop_s cn30xx;
-       struct cvmx_ciu_gstop_s cn31xx;
-       struct cvmx_ciu_gstop_s cn38xx;
-       struct cvmx_ciu_gstop_s cn38xxp2;
-       struct cvmx_ciu_gstop_s cn50xx;
-       struct cvmx_ciu_gstop_s cn52xx;
-       struct cvmx_ciu_gstop_s cn52xxp1;
-       struct cvmx_ciu_gstop_s cn56xx;
-       struct cvmx_ciu_gstop_s cn56xxp1;
-       struct cvmx_ciu_gstop_s cn58xx;
-       struct cvmx_ciu_gstop_s cn58xxp1;
-       struct cvmx_ciu_gstop_s cn61xx;
-       struct cvmx_ciu_gstop_s cn63xx;
-       struct cvmx_ciu_gstop_s cn63xxp1;
-       struct cvmx_ciu_gstop_s cn66xx;
-       struct cvmx_ciu_gstop_s cn68xx;
-       struct cvmx_ciu_gstop_s cn68xxp1;
-       struct cvmx_ciu_gstop_s cnf71xx;
-};
-
-union cvmx_ciu_intx_en0 {
-       uint64_t u64;
-       struct cvmx_ciu_intx_en0_s {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t bootdma:1;
-               uint64_t mii:1;
-               uint64_t ipdppthr:1;
-               uint64_t powiq:1;
-               uint64_t twsi2:1;
-               uint64_t mpi:1;
-               uint64_t pcm:1;
-               uint64_t usb:1;
-               uint64_t timer:4;
-               uint64_t key_zero:1;
-               uint64_t ipd_drp:1;
-               uint64_t gmx_drp:2;
-               uint64_t trace:1;
-               uint64_t rml:1;
-               uint64_t twsi:1;
-               uint64_t reserved_44_44:1;
-               uint64_t pci_msi:4;
-               uint64_t pci_int:4;
-               uint64_t uart:2;
-               uint64_t mbox:2;
-               uint64_t gpio:16;
-               uint64_t workq:16;
-#else
-               uint64_t workq:16;
-               uint64_t gpio:16;
-               uint64_t mbox:2;
-               uint64_t uart:2;
-               uint64_t pci_int:4;
-               uint64_t pci_msi:4;
-               uint64_t reserved_44_44:1;
-               uint64_t twsi:1;
-               uint64_t rml:1;
-               uint64_t trace:1;
-               uint64_t gmx_drp:2;
-               uint64_t ipd_drp:1;
-               uint64_t key_zero:1;
-               uint64_t timer:4;
-               uint64_t usb:1;
-               uint64_t pcm:1;
-               uint64_t mpi:1;
-               uint64_t twsi2:1;
-               uint64_t powiq:1;
-               uint64_t ipdppthr:1;
-               uint64_t mii:1;
-               uint64_t bootdma:1;
-#endif
-       } s;
-       struct cvmx_ciu_intx_en0_cn30xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_59_63:5;
-               uint64_t mpi:1;
-               uint64_t pcm:1;
-               uint64_t usb:1;
-               uint64_t timer:4;
-               uint64_t reserved_51_51:1;
-               uint64_t ipd_drp:1;
-               uint64_t reserved_49_49:1;
-               uint64_t gmx_drp:1;
-               uint64_t reserved_47_47:1;
-               uint64_t rml:1;
-               uint64_t twsi:1;
-               uint64_t reserved_44_44:1;
-               uint64_t pci_msi:4;
-               uint64_t pci_int:4;
-               uint64_t uart:2;
-               uint64_t mbox:2;
-               uint64_t gpio:16;
-               uint64_t workq:16;
-#else
-               uint64_t workq:16;
-               uint64_t gpio:16;
-               uint64_t mbox:2;
-               uint64_t uart:2;
-               uint64_t pci_int:4;
-               uint64_t pci_msi:4;
-               uint64_t reserved_44_44:1;
-               uint64_t twsi:1;
-               uint64_t rml:1;
-               uint64_t reserved_47_47:1;
-               uint64_t gmx_drp:1;
-               uint64_t reserved_49_49:1;
-               uint64_t ipd_drp:1;
-               uint64_t reserved_51_51:1;
-               uint64_t timer:4;
-               uint64_t usb:1;
-               uint64_t pcm:1;
-               uint64_t mpi:1;
-               uint64_t reserved_59_63:5;
-#endif
-       } cn30xx;
-       struct cvmx_ciu_intx_en0_cn31xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_59_63:5;
-               uint64_t mpi:1;
-               uint64_t pcm:1;
-               uint64_t usb:1;
-               uint64_t timer:4;
-               uint64_t reserved_51_51:1;
-               uint64_t ipd_drp:1;
-               uint64_t reserved_49_49:1;
-               uint64_t gmx_drp:1;
-               uint64_t trace:1;
-               uint64_t rml:1;
-               uint64_t twsi:1;
-               uint64_t reserved_44_44:1;
-               uint64_t pci_msi:4;
-               uint64_t pci_int:4;
-               uint64_t uart:2;
-               uint64_t mbox:2;
-               uint64_t gpio:16;
-               uint64_t workq:16;
-#else
-               uint64_t workq:16;
-               uint64_t gpio:16;
-               uint64_t mbox:2;
-               uint64_t uart:2;
-               uint64_t pci_int:4;
-               uint64_t pci_msi:4;
-               uint64_t reserved_44_44:1;
-               uint64_t twsi:1;
-               uint64_t rml:1;
-               uint64_t trace:1;
-               uint64_t gmx_drp:1;
-               uint64_t reserved_49_49:1;
-               uint64_t ipd_drp:1;
-               uint64_t reserved_51_51:1;
-               uint64_t timer:4;
-               uint64_t usb:1;
-               uint64_t pcm:1;
-               uint64_t mpi:1;
-               uint64_t reserved_59_63:5;
-#endif
-       } cn31xx;
-       struct cvmx_ciu_intx_en0_cn38xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_56_63:8;
-               uint64_t timer:4;
-               uint64_t key_zero:1;
-               uint64_t ipd_drp:1;
-               uint64_t gmx_drp:2;
-               uint64_t trace:1;
-               uint64_t rml:1;
-               uint64_t twsi:1;
-               uint64_t reserved_44_44:1;
-               uint64_t pci_msi:4;
-               uint64_t pci_int:4;
-               uint64_t uart:2;
-               uint64_t mbox:2;
-               uint64_t gpio:16;
-               uint64_t workq:16;
-#else
-               uint64_t workq:16;
-               uint64_t gpio:16;
-               uint64_t mbox:2;
-               uint64_t uart:2;
-               uint64_t pci_int:4;
-               uint64_t pci_msi:4;
-               uint64_t reserved_44_44:1;
-               uint64_t twsi:1;
-               uint64_t rml:1;
-               uint64_t trace:1;
-               uint64_t gmx_drp:2;
-               uint64_t ipd_drp:1;
-               uint64_t key_zero:1;
-               uint64_t timer:4;
-               uint64_t reserved_56_63:8;
-#endif
-       } cn38xx;
-       struct cvmx_ciu_intx_en0_cn38xx cn38xxp2;
-       struct cvmx_ciu_intx_en0_cn30xx cn50xx;
-       struct cvmx_ciu_intx_en0_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t bootdma:1;
-               uint64_t mii:1;
-               uint64_t ipdppthr:1;
-               uint64_t powiq:1;
-               uint64_t twsi2:1;
-               uint64_t reserved_57_58:2;
-               uint64_t usb:1;
-               uint64_t timer:4;
-               uint64_t reserved_51_51:1;
-               uint64_t ipd_drp:1;
-               uint64_t reserved_49_49:1;
-               uint64_t gmx_drp:1;
-               uint64_t trace:1;
-               uint64_t rml:1;
-               uint64_t twsi:1;
-               uint64_t reserved_44_44:1;
-               uint64_t pci_msi:4;
-               uint64_t pci_int:4;
-               uint64_t uart:2;
-               uint64_t mbox:2;
-               uint64_t gpio:16;
-               uint64_t workq:16;
-#else
-               uint64_t workq:16;
-               uint64_t gpio:16;
-               uint64_t mbox:2;
-               uint64_t uart:2;
-               uint64_t pci_int:4;
-               uint64_t pci_msi:4;
-               uint64_t reserved_44_44:1;
-               uint64_t twsi:1;
-               uint64_t rml:1;
-               uint64_t trace:1;
-               uint64_t gmx_drp:1;
-               uint64_t reserved_49_49:1;
-               uint64_t ipd_drp:1;
-               uint64_t reserved_51_51:1;
-               uint64_t timer:4;
-               uint64_t usb:1;
-               uint64_t reserved_57_58:2;
-               uint64_t twsi2:1;
-               uint64_t powiq:1;
-               uint64_t ipdppthr:1;
-               uint64_t mii:1;
-               uint64_t bootdma:1;
-#endif
-       } cn52xx;
-       struct cvmx_ciu_intx_en0_cn52xx cn52xxp1;
-       struct cvmx_ciu_intx_en0_cn56xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t bootdma:1;
-               uint64_t mii:1;
-               uint64_t ipdppthr:1;
-               uint64_t powiq:1;
-               uint64_t twsi2:1;
-               uint64_t reserved_57_58:2;
-               uint64_t usb:1;
-               uint64_t timer:4;
-               uint64_t key_zero:1;
-               uint64_t ipd_drp:1;
-               uint64_t gmx_drp:2;
-               uint64_t trace:1;
-               uint64_t rml:1;
-               uint64_t twsi:1;
-               uint64_t reserved_44_44:1;
-               uint64_t pci_msi:4;
-               uint64_t pci_int:4;
-               uint64_t uart:2;
-               uint64_t mbox:2;
-               uint64_t gpio:16;
-               uint64_t workq:16;
-#else
-               uint64_t workq:16;
-               uint64_t gpio:16;
-               uint64_t mbox:2;
-               uint64_t uart:2;
-               uint64_t pci_int:4;
-               uint64_t pci_msi:4;
-               uint64_t reserved_44_44:1;
-               uint64_t twsi:1;
-               uint64_t rml:1;
-               uint64_t trace:1;
-               uint64_t gmx_drp:2;
-               uint64_t ipd_drp:1;
-               uint64_t key_zero:1;
-               uint64_t timer:4;
-               uint64_t usb:1;
-               uint64_t reserved_57_58:2;
-               uint64_t twsi2:1;
-               uint64_t powiq:1;
-               uint64_t ipdppthr:1;
-               uint64_t mii:1;
-               uint64_t bootdma:1;
-#endif
-       } cn56xx;
-       struct cvmx_ciu_intx_en0_cn56xx cn56xxp1;
-       struct cvmx_ciu_intx_en0_cn38xx cn58xx;
-       struct cvmx_ciu_intx_en0_cn38xx cn58xxp1;
-       struct cvmx_ciu_intx_en0_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t bootdma:1;
-               uint64_t mii:1;
-               uint64_t ipdppthr:1;
-               uint64_t powiq:1;
-               uint64_t twsi2:1;
-               uint64_t mpi:1;
-               uint64_t pcm:1;
-               uint64_t usb:1;
-               uint64_t timer:4;
-               uint64_t reserved_51_51:1;
-               uint64_t ipd_drp:1;
-               uint64_t gmx_drp:2;
-               uint64_t trace:1;
-               uint64_t rml:1;
-               uint64_t twsi:1;
-               uint64_t reserved_44_44:1;
-               uint64_t pci_msi:4;
-               uint64_t pci_int:4;
-               uint64_t uart:2;
-               uint64_t mbox:2;
-               uint64_t gpio:16;
-               uint64_t workq:16;
-#else
-               uint64_t workq:16;
-               uint64_t gpio:16;
-               uint64_t mbox:2;
-               uint64_t uart:2;
-               uint64_t pci_int:4;
-               uint64_t pci_msi:4;
-               uint64_t reserved_44_44:1;
-               uint64_t twsi:1;
-               uint64_t rml:1;
-               uint64_t trace:1;
-               uint64_t gmx_drp:2;
-               uint64_t ipd_drp:1;
-               uint64_t reserved_51_51:1;
-               uint64_t timer:4;
-               uint64_t usb:1;
-               uint64_t pcm:1;
-               uint64_t mpi:1;
-               uint64_t twsi2:1;
-               uint64_t powiq:1;
-               uint64_t ipdppthr:1;
-               uint64_t mii:1;
-               uint64_t bootdma:1;
-#endif
-       } cn61xx;
-       struct cvmx_ciu_intx_en0_cn52xx cn63xx;
-       struct cvmx_ciu_intx_en0_cn52xx cn63xxp1;
-       struct cvmx_ciu_intx_en0_cn66xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t bootdma:1;
-               uint64_t mii:1;
-               uint64_t ipdppthr:1;
-               uint64_t powiq:1;
-               uint64_t twsi2:1;
-               uint64_t mpi:1;
-               uint64_t reserved_57_57:1;
-               uint64_t usb:1;
-               uint64_t timer:4;
-               uint64_t reserved_51_51:1;
-               uint64_t ipd_drp:1;
-               uint64_t gmx_drp:2;
-               uint64_t trace:1;
-               uint64_t rml:1;
-               uint64_t twsi:1;
-               uint64_t reserved_44_44:1;
-               uint64_t pci_msi:4;
-               uint64_t pci_int:4;
-               uint64_t uart:2;
-               uint64_t mbox:2;
-               uint64_t gpio:16;
-               uint64_t workq:16;
-#else
-               uint64_t workq:16;
-               uint64_t gpio:16;
-               uint64_t mbox:2;
-               uint64_t uart:2;
-               uint64_t pci_int:4;
-               uint64_t pci_msi:4;
-               uint64_t reserved_44_44:1;
-               uint64_t twsi:1;
-               uint64_t rml:1;
-               uint64_t trace:1;
-               uint64_t gmx_drp:2;
-               uint64_t ipd_drp:1;
-               uint64_t reserved_51_51:1;
-               uint64_t timer:4;
-               uint64_t usb:1;
-               uint64_t reserved_57_57:1;
-               uint64_t mpi:1;
-               uint64_t twsi2:1;
-               uint64_t powiq:1;
-               uint64_t ipdppthr:1;
-               uint64_t mii:1;
-               uint64_t bootdma:1;
-#endif
-       } cn66xx;
-       struct cvmx_ciu_intx_en0_cnf71xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t bootdma:1;
-               uint64_t reserved_62_62:1;
-               uint64_t ipdppthr:1;
-               uint64_t powiq:1;
-               uint64_t twsi2:1;
-               uint64_t mpi:1;
-               uint64_t pcm:1;
-               uint64_t usb:1;
-               uint64_t timer:4;
-               uint64_t reserved_51_51:1;
-               uint64_t ipd_drp:1;
-               uint64_t reserved_49_49:1;
-               uint64_t gmx_drp:1;
-               uint64_t trace:1;
-               uint64_t rml:1;
-               uint64_t twsi:1;
-               uint64_t reserved_44_44:1;
-               uint64_t pci_msi:4;
-               uint64_t pci_int:4;
-               uint64_t uart:2;
-               uint64_t mbox:2;
-               uint64_t gpio:16;
-               uint64_t workq:16;
-#else
-               uint64_t workq:16;
-               uint64_t gpio:16;
-               uint64_t mbox:2;
-               uint64_t uart:2;
-               uint64_t pci_int:4;
-               uint64_t pci_msi:4;
-               uint64_t reserved_44_44:1;
-               uint64_t twsi:1;
-               uint64_t rml:1;
-               uint64_t trace:1;
-               uint64_t gmx_drp:1;
-               uint64_t reserved_49_49:1;
-               uint64_t ipd_drp:1;
-               uint64_t reserved_51_51:1;
-               uint64_t timer:4;
-               uint64_t usb:1;
-               uint64_t pcm:1;
-               uint64_t mpi:1;
-               uint64_t twsi2:1;
-               uint64_t powiq:1;
-               uint64_t ipdppthr:1;
-               uint64_t reserved_62_62:1;
-               uint64_t bootdma:1;
-#endif
-       } cnf71xx;
-};
-
-union cvmx_ciu_intx_en0_w1c {
-       uint64_t u64;
-       struct cvmx_ciu_intx_en0_w1c_s {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t bootdma:1;
-               uint64_t mii:1;
-               uint64_t ipdppthr:1;
-               uint64_t powiq:1;
-               uint64_t twsi2:1;
-               uint64_t mpi:1;
-               uint64_t pcm:1;
-               uint64_t usb:1;
-               uint64_t timer:4;
-               uint64_t key_zero:1;
-               uint64_t ipd_drp:1;
-               uint64_t gmx_drp:2;
-               uint64_t trace:1;
-               uint64_t rml:1;
-               uint64_t twsi:1;
-               uint64_t reserved_44_44:1;
-               uint64_t pci_msi:4;
-               uint64_t pci_int:4;
-               uint64_t uart:2;
-               uint64_t mbox:2;
-               uint64_t gpio:16;
-               uint64_t workq:16;
-#else
-               uint64_t workq:16;
-               uint64_t gpio:16;
-               uint64_t mbox:2;
-               uint64_t uart:2;
-               uint64_t pci_int:4;
-               uint64_t pci_msi:4;
-               uint64_t reserved_44_44:1;
-               uint64_t twsi:1;
-               uint64_t rml:1;
-               uint64_t trace:1;
-               uint64_t gmx_drp:2;
-               uint64_t ipd_drp:1;
-               uint64_t key_zero:1;
-               uint64_t timer:4;
-               uint64_t usb:1;
-               uint64_t pcm:1;
-               uint64_t mpi:1;
-               uint64_t twsi2:1;
-               uint64_t powiq:1;
-               uint64_t ipdppthr:1;
-               uint64_t mii:1;
-               uint64_t bootdma:1;
-#endif
-       } s;
-       struct cvmx_ciu_intx_en0_w1c_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t bootdma:1;
-               uint64_t mii:1;
-               uint64_t ipdppthr:1;
-               uint64_t powiq:1;
-               uint64_t twsi2:1;
-               uint64_t reserved_57_58:2;
-               uint64_t usb:1;
-               uint64_t timer:4;
-               uint64_t reserved_51_51:1;
-               uint64_t ipd_drp:1;
-               uint64_t reserved_49_49:1;
-               uint64_t gmx_drp:1;
-               uint64_t trace:1;
-               uint64_t rml:1;
-               uint64_t twsi:1;
-               uint64_t reserved_44_44:1;
-               uint64_t pci_msi:4;
-               uint64_t pci_int:4;
-               uint64_t uart:2;
-               uint64_t mbox:2;
-               uint64_t gpio:16;
-               uint64_t workq:16;
-#else
-               uint64_t workq:16;
-               uint64_t gpio:16;
-               uint64_t mbox:2;
-               uint64_t uart:2;
-               uint64_t pci_int:4;
-               uint64_t pci_msi:4;
-               uint64_t reserved_44_44:1;
-               uint64_t twsi:1;
-               uint64_t rml:1;
-               uint64_t trace:1;
-               uint64_t gmx_drp:1;
-               uint64_t reserved_49_49:1;
-               uint64_t ipd_drp:1;
-               uint64_t reserved_51_51:1;
-               uint64_t timer:4;
-               uint64_t usb:1;
-               uint64_t reserved_57_58:2;
-               uint64_t twsi2:1;
-               uint64_t powiq:1;
-               uint64_t ipdppthr:1;
-               uint64_t mii:1;
-               uint64_t bootdma:1;
-#endif
-       } cn52xx;
-       struct cvmx_ciu_intx_en0_w1c_cn56xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t bootdma:1;
-               uint64_t mii:1;
-               uint64_t ipdppthr:1;
-               uint64_t powiq:1;
-               uint64_t twsi2:1;
-               uint64_t reserved_57_58:2;
-               uint64_t usb:1;
-               uint64_t timer:4;
-               uint64_t key_zero:1;
-               uint64_t ipd_drp:1;
-               uint64_t gmx_drp:2;
-               uint64_t trace:1;
-               uint64_t rml:1;
-               uint64_t twsi:1;
-               uint64_t reserved_44_44:1;
-               uint64_t pci_msi:4;
-               uint64_t pci_int:4;
-               uint64_t uart:2;
-               uint64_t mbox:2;
-               uint64_t gpio:16;
-               uint64_t workq:16;
-#else
-               uint64_t workq:16;
-               uint64_t gpio:16;
-               uint64_t mbox:2;
-               uint64_t uart:2;
-               uint64_t pci_int:4;
-               uint64_t pci_msi:4;
-               uint64_t reserved_44_44:1;
-               uint64_t twsi:1;
-               uint64_t rml:1;
-               uint64_t trace:1;
-               uint64_t gmx_drp:2;
-               uint64_t ipd_drp:1;
-               uint64_t key_zero:1;
-               uint64_t timer:4;
-               uint64_t usb:1;
-               uint64_t reserved_57_58:2;
-               uint64_t twsi2:1;
-               uint64_t powiq:1;
-               uint64_t ipdppthr:1;
-               uint64_t mii:1;
-               uint64_t bootdma:1;
-#endif
-       } cn56xx;
-       struct cvmx_ciu_intx_en0_w1c_cn58xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_56_63:8;
-               uint64_t timer:4;
-               uint64_t key_zero:1;
-               uint64_t ipd_drp:1;
-               uint64_t gmx_drp:2;
-               uint64_t trace:1;
-               uint64_t rml:1;
-               uint64_t twsi:1;
-               uint64_t reserved_44_44:1;
-               uint64_t pci_msi:4;
-               uint64_t pci_int:4;
-               uint64_t uart:2;
-               uint64_t mbox:2;
-               uint64_t gpio:16;
-               uint64_t workq:16;
-#else
-               uint64_t workq:16;
-               uint64_t gpio:16;
-               uint64_t mbox:2;
-               uint64_t uart:2;
-               uint64_t pci_int:4;
-               uint64_t pci_msi:4;
-               uint64_t reserved_44_44:1;
-               uint64_t twsi:1;
-               uint64_t rml:1;
-               uint64_t trace:1;
-               uint64_t gmx_drp:2;
-               uint64_t ipd_drp:1;
-               uint64_t key_zero:1;
-               uint64_t timer:4;
-               uint64_t reserved_56_63:8;
-#endif
-       } cn58xx;
-       struct cvmx_ciu_intx_en0_w1c_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t bootdma:1;
-               uint64_t mii:1;
-               uint64_t ipdppthr:1;
-               uint64_t powiq:1;
-               uint64_t twsi2:1;
-               uint64_t mpi:1;
-               uint64_t pcm:1;
-               uint64_t usb:1;
-               uint64_t timer:4;
-               uint64_t reserved_51_51:1;
-               uint64_t ipd_drp:1;
-               uint64_t gmx_drp:2;
-               uint64_t trace:1;
-               uint64_t rml:1;
-               uint64_t twsi:1;
-               uint64_t reserved_44_44:1;
-               uint64_t pci_msi:4;
-               uint64_t pci_int:4;
-               uint64_t uart:2;
-               uint64_t mbox:2;
-               uint64_t gpio:16;
-               uint64_t workq:16;
-#else
-               uint64_t workq:16;
-               uint64_t gpio:16;
-               uint64_t mbox:2;
-               uint64_t uart:2;
-               uint64_t pci_int:4;
-               uint64_t pci_msi:4;
-               uint64_t reserved_44_44:1;
-               uint64_t twsi:1;
-               uint64_t rml:1;
-               uint64_t trace:1;
-               uint64_t gmx_drp:2;
-               uint64_t ipd_drp:1;
-               uint64_t reserved_51_51:1;
-               uint64_t timer:4;
-               uint64_t usb:1;
-               uint64_t pcm:1;
-               uint64_t mpi:1;
-               uint64_t twsi2:1;
-               uint64_t powiq:1;
-               uint64_t ipdppthr:1;
-               uint64_t mii:1;
-               uint64_t bootdma:1;
-#endif
-       } cn61xx;
-       struct cvmx_ciu_intx_en0_w1c_cn52xx cn63xx;
-       struct cvmx_ciu_intx_en0_w1c_cn52xx cn63xxp1;
-       struct cvmx_ciu_intx_en0_w1c_cn66xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t bootdma:1;
-               uint64_t mii:1;
-               uint64_t ipdppthr:1;
-               uint64_t powiq:1;
-               uint64_t twsi2:1;
-               uint64_t mpi:1;
-               uint64_t reserved_57_57:1;
-               uint64_t usb:1;
-               uint64_t timer:4;
-               uint64_t reserved_51_51:1;
-               uint64_t ipd_drp:1;
-               uint64_t gmx_drp:2;
-               uint64_t trace:1;
-               uint64_t rml:1;
-               uint64_t twsi:1;
-               uint64_t reserved_44_44:1;
-               uint64_t pci_msi:4;
-               uint64_t pci_int:4;
-               uint64_t uart:2;
-               uint64_t mbox:2;
-               uint64_t gpio:16;
-               uint64_t workq:16;
-#else
-               uint64_t workq:16;
-               uint64_t gpio:16;
-               uint64_t mbox:2;
-               uint64_t uart:2;
-               uint64_t pci_int:4;
-               uint64_t pci_msi:4;
-               uint64_t reserved_44_44:1;
-               uint64_t twsi:1;
-               uint64_t rml:1;
-               uint64_t trace:1;
-               uint64_t gmx_drp:2;
-               uint64_t ipd_drp:1;
-               uint64_t reserved_51_51:1;
-               uint64_t timer:4;
-               uint64_t usb:1;
-               uint64_t reserved_57_57:1;
-               uint64_t mpi:1;
-               uint64_t twsi2:1;
-               uint64_t powiq:1;
-               uint64_t ipdppthr:1;
-               uint64_t mii:1;
-               uint64_t bootdma:1;
-#endif
-       } cn66xx;
-       struct cvmx_ciu_intx_en0_w1c_cnf71xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t bootdma:1;
-               uint64_t reserved_62_62:1;
-               uint64_t ipdppthr:1;
-               uint64_t powiq:1;
-               uint64_t twsi2:1;
-               uint64_t mpi:1;
-               uint64_t pcm:1;
-               uint64_t usb:1;
-               uint64_t timer:4;
-               uint64_t reserved_51_51:1;
-               uint64_t ipd_drp:1;
-               uint64_t reserved_49_49:1;
-               uint64_t gmx_drp:1;
-               uint64_t trace:1;
-               uint64_t rml:1;
-               uint64_t twsi:1;
-               uint64_t reserved_44_44:1;
-               uint64_t pci_msi:4;
-               uint64_t pci_int:4;
-               uint64_t uart:2;
-               uint64_t mbox:2;
-               uint64_t gpio:16;
-               uint64_t workq:16;
-#else
-               uint64_t workq:16;
-               uint64_t gpio:16;
-               uint64_t mbox:2;
-               uint64_t uart:2;
-               uint64_t pci_int:4;
-               uint64_t pci_msi:4;
-               uint64_t reserved_44_44:1;
-               uint64_t twsi:1;
-               uint64_t rml:1;
-               uint64_t trace:1;
-               uint64_t gmx_drp:1;
-               uint64_t reserved_49_49:1;
-               uint64_t ipd_drp:1;
-               uint64_t reserved_51_51:1;
-               uint64_t timer:4;
-               uint64_t usb:1;
-               uint64_t pcm:1;
-               uint64_t mpi:1;
-               uint64_t twsi2:1;
-               uint64_t powiq:1;
-               uint64_t ipdppthr:1;
-               uint64_t reserved_62_62:1;
-               uint64_t bootdma:1;
-#endif
-       } cnf71xx;
-};
-
-union cvmx_ciu_intx_en0_w1s {
-       uint64_t u64;
-       struct cvmx_ciu_intx_en0_w1s_s {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t bootdma:1;
-               uint64_t mii:1;
-               uint64_t ipdppthr:1;
-               uint64_t powiq:1;
-               uint64_t twsi2:1;
-               uint64_t mpi:1;
-               uint64_t pcm:1;
-               uint64_t usb:1;
-               uint64_t timer:4;
-               uint64_t key_zero:1;
-               uint64_t ipd_drp:1;
-               uint64_t gmx_drp:2;
-               uint64_t trace:1;
-               uint64_t rml:1;
-               uint64_t twsi:1;
-               uint64_t reserved_44_44:1;
-               uint64_t pci_msi:4;
-               uint64_t pci_int:4;
-               uint64_t uart:2;
-               uint64_t mbox:2;
-               uint64_t gpio:16;
-               uint64_t workq:16;
-#else
-               uint64_t workq:16;
-               uint64_t gpio:16;
-               uint64_t mbox:2;
-               uint64_t uart:2;
-               uint64_t pci_int:4;
-               uint64_t pci_msi:4;
-               uint64_t reserved_44_44:1;
-               uint64_t twsi:1;
-               uint64_t rml:1;
-               uint64_t trace:1;
-               uint64_t gmx_drp:2;
-               uint64_t ipd_drp:1;
-               uint64_t key_zero:1;
-               uint64_t timer:4;
-               uint64_t usb:1;
-               uint64_t pcm:1;
-               uint64_t mpi:1;
-               uint64_t twsi2:1;
-               uint64_t powiq:1;
-               uint64_t ipdppthr:1;
-               uint64_t mii:1;
-               uint64_t bootdma:1;
-#endif
-       } s;
-       struct cvmx_ciu_intx_en0_w1s_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t bootdma:1;
-               uint64_t mii:1;
-               uint64_t ipdppthr:1;
-               uint64_t powiq:1;
-               uint64_t twsi2:1;
-               uint64_t reserved_57_58:2;
-               uint64_t usb:1;
-               uint64_t timer:4;
-               uint64_t reserved_51_51:1;
-               uint64_t ipd_drp:1;
-               uint64_t reserved_49_49:1;
-               uint64_t gmx_drp:1;
-               uint64_t trace:1;
-               uint64_t rml:1;
-               uint64_t twsi:1;
-               uint64_t reserved_44_44:1;
-               uint64_t pci_msi:4;
-               uint64_t pci_int:4;
-               uint64_t uart:2;
-               uint64_t mbox:2;
-               uint64_t gpio:16;
-               uint64_t workq:16;
-#else
-               uint64_t workq:16;
-               uint64_t gpio:16;
-               uint64_t mbox:2;
-               uint64_t uart:2;
-               uint64_t pci_int:4;
-               uint64_t pci_msi:4;
-               uint64_t reserved_44_44:1;
-               uint64_t twsi:1;
-               uint64_t rml:1;
-               uint64_t trace:1;
-               uint64_t gmx_drp:1;
-               uint64_t reserved_49_49:1;
-               uint64_t ipd_drp:1;
-               uint64_t reserved_51_51:1;
-               uint64_t timer:4;
-               uint64_t usb:1;
-               uint64_t reserved_57_58:2;
-               uint64_t twsi2:1;
-               uint64_t powiq:1;
-               uint64_t ipdppthr:1;
-               uint64_t mii:1;
-               uint64_t bootdma:1;
-#endif
-       } cn52xx;
-       struct cvmx_ciu_intx_en0_w1s_cn56xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t bootdma:1;
-               uint64_t mii:1;
-               uint64_t ipdppthr:1;
-               uint64_t powiq:1;
-               uint64_t twsi2:1;
-               uint64_t reserved_57_58:2;
-               uint64_t usb:1;
-               uint64_t timer:4;
-               uint64_t key_zero:1;
-               uint64_t ipd_drp:1;
-               uint64_t gmx_drp:2;
-               uint64_t trace:1;
-               uint64_t rml:1;
-               uint64_t twsi:1;
-               uint64_t reserved_44_44:1;
-               uint64_t pci_msi:4;
-               uint64_t pci_int:4;
-               uint64_t uart:2;
-               uint64_t mbox:2;
-               uint64_t gpio:16;
-               uint64_t workq:16;
-#else
-               uint64_t workq:16;
-               uint64_t gpio:16;
-               uint64_t mbox:2;
-               uint64_t uart:2;
-               uint64_t pci_int:4;
-               uint64_t pci_msi:4;
-               uint64_t reserved_44_44:1;
-               uint64_t twsi:1;
-               uint64_t rml:1;
-               uint64_t trace:1;
-               uint64_t gmx_drp:2;
-               uint64_t ipd_drp:1;
-               uint64_t key_zero:1;
-               uint64_t timer:4;
-               uint64_t usb:1;
-               uint64_t reserved_57_58:2;
-               uint64_t twsi2:1;
-               uint64_t powiq:1;
-               uint64_t ipdppthr:1;
-               uint64_t mii:1;
-               uint64_t bootdma:1;
-#endif
-       } cn56xx;
-       struct cvmx_ciu_intx_en0_w1s_cn58xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_56_63:8;
-               uint64_t timer:4;
-               uint64_t key_zero:1;
-               uint64_t ipd_drp:1;
-               uint64_t gmx_drp:2;
-               uint64_t trace:1;
-               uint64_t rml:1;
-               uint64_t twsi:1;
-               uint64_t reserved_44_44:1;
-               uint64_t pci_msi:4;
-               uint64_t pci_int:4;
-               uint64_t uart:2;
-               uint64_t mbox:2;
-               uint64_t gpio:16;
-               uint64_t workq:16;
-#else
-               uint64_t workq:16;
-               uint64_t gpio:16;
-               uint64_t mbox:2;
-               uint64_t uart:2;
-               uint64_t pci_int:4;
-               uint64_t pci_msi:4;
-               uint64_t reserved_44_44:1;
-               uint64_t twsi:1;
-               uint64_t rml:1;
-               uint64_t trace:1;
-               uint64_t gmx_drp:2;
-               uint64_t ipd_drp:1;
-               uint64_t key_zero:1;
-               uint64_t timer:4;
-               uint64_t reserved_56_63:8;
-#endif
-       } cn58xx;
-       struct cvmx_ciu_intx_en0_w1s_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t bootdma:1;
-               uint64_t mii:1;
-               uint64_t ipdppthr:1;
-               uint64_t powiq:1;
-               uint64_t twsi2:1;
-               uint64_t mpi:1;
-               uint64_t pcm:1;
-               uint64_t usb:1;
-               uint64_t timer:4;
-               uint64_t reserved_51_51:1;
-               uint64_t ipd_drp:1;
-               uint64_t gmx_drp:2;
-               uint64_t trace:1;
-               uint64_t rml:1;
-               uint64_t twsi:1;
-               uint64_t reserved_44_44:1;
-               uint64_t pci_msi:4;
-               uint64_t pci_int:4;
-               uint64_t uart:2;
-               uint64_t mbox:2;
-               uint64_t gpio:16;
-               uint64_t workq:16;
-#else
-               uint64_t workq:16;
-               uint64_t gpio:16;
-               uint64_t mbox:2;
-               uint64_t uart:2;
-               uint64_t pci_int:4;
-               uint64_t pci_msi:4;
-               uint64_t reserved_44_44:1;
-               uint64_t twsi:1;
-               uint64_t rml:1;
-               uint64_t trace:1;
-               uint64_t gmx_drp:2;
-               uint64_t ipd_drp:1;
-               uint64_t reserved_51_51:1;
-               uint64_t timer:4;
-               uint64_t usb:1;
-               uint64_t pcm:1;
-               uint64_t mpi:1;
-               uint64_t twsi2:1;
-               uint64_t powiq:1;
-               uint64_t ipdppthr:1;
-               uint64_t mii:1;
-               uint64_t bootdma:1;
-#endif
-       } cn61xx;
-       struct cvmx_ciu_intx_en0_w1s_cn52xx cn63xx;
-       struct cvmx_ciu_intx_en0_w1s_cn52xx cn63xxp1;
-       struct cvmx_ciu_intx_en0_w1s_cn66xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t bootdma:1;
-               uint64_t mii:1;
-               uint64_t ipdppthr:1;
-               uint64_t powiq:1;
-               uint64_t twsi2:1;
-               uint64_t mpi:1;
-               uint64_t reserved_57_57:1;
-               uint64_t usb:1;
-               uint64_t timer:4;
-               uint64_t reserved_51_51:1;
-               uint64_t ipd_drp:1;
-               uint64_t gmx_drp:2;
-               uint64_t trace:1;
-               uint64_t rml:1;
-               uint64_t twsi:1;
-               uint64_t reserved_44_44:1;
-               uint64_t pci_msi:4;
-               uint64_t pci_int:4;
-               uint64_t uart:2;
-               uint64_t mbox:2;
-               uint64_t gpio:16;
-               uint64_t workq:16;
-#else
-               uint64_t workq:16;
-               uint64_t gpio:16;
-               uint64_t mbox:2;
-               uint64_t uart:2;
-               uint64_t pci_int:4;
-               uint64_t pci_msi:4;
-               uint64_t reserved_44_44:1;
-               uint64_t twsi:1;
-               uint64_t rml:1;
-               uint64_t trace:1;
-               uint64_t gmx_drp:2;
-               uint64_t ipd_drp:1;
-               uint64_t reserved_51_51:1;
-               uint64_t timer:4;
-               uint64_t usb:1;
-               uint64_t reserved_57_57:1;
-               uint64_t mpi:1;
-               uint64_t twsi2:1;
-               uint64_t powiq:1;
-               uint64_t ipdppthr:1;
-               uint64_t mii:1;
-               uint64_t bootdma:1;
-#endif
-       } cn66xx;
-       struct cvmx_ciu_intx_en0_w1s_cnf71xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t bootdma:1;
-               uint64_t reserved_62_62:1;
-               uint64_t ipdppthr:1;
-               uint64_t powiq:1;
-               uint64_t twsi2:1;
-               uint64_t mpi:1;
-               uint64_t pcm:1;
-               uint64_t usb:1;
-               uint64_t timer:4;
-               uint64_t reserved_51_51:1;
-               uint64_t ipd_drp:1;
-               uint64_t reserved_49_49:1;
-               uint64_t gmx_drp:1;
-               uint64_t trace:1;
-               uint64_t rml:1;
-               uint64_t twsi:1;
-               uint64_t reserved_44_44:1;
-               uint64_t pci_msi:4;
-               uint64_t pci_int:4;
-               uint64_t uart:2;
-               uint64_t mbox:2;
-               uint64_t gpio:16;
-               uint64_t workq:16;
-#else
-               uint64_t workq:16;
-               uint64_t gpio:16;
-               uint64_t mbox:2;
-               uint64_t uart:2;
-               uint64_t pci_int:4;
-               uint64_t pci_msi:4;
-               uint64_t reserved_44_44:1;
-               uint64_t twsi:1;
-               uint64_t rml:1;
-               uint64_t trace:1;
-               uint64_t gmx_drp:1;
-               uint64_t reserved_49_49:1;
-               uint64_t ipd_drp:1;
-               uint64_t reserved_51_51:1;
-               uint64_t timer:4;
-               uint64_t usb:1;
-               uint64_t pcm:1;
-               uint64_t mpi:1;
-               uint64_t twsi2:1;
-               uint64_t powiq:1;
-               uint64_t ipdppthr:1;
-               uint64_t reserved_62_62:1;
-               uint64_t bootdma:1;
-#endif
-       } cnf71xx;
-};
-
-union cvmx_ciu_intx_en1 {
-       uint64_t u64;
-       struct cvmx_ciu_intx_en1_s {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t rst:1;
-               uint64_t reserved_62_62:1;
-               uint64_t srio3:1;
-               uint64_t srio2:1;
-               uint64_t reserved_57_59:3;
-               uint64_t dfm:1;
-               uint64_t reserved_53_55:3;
-               uint64_t lmc0:1;
-               uint64_t srio1:1;
-               uint64_t srio0:1;
-               uint64_t pem1:1;
-               uint64_t pem0:1;
-               uint64_t ptp:1;
-               uint64_t agl:1;
-               uint64_t reserved_41_45:5;
-               uint64_t dpi_dma:1;
-               uint64_t reserved_38_39:2;
-               uint64_t agx1:1;
-               uint64_t agx0:1;
-               uint64_t dpi:1;
-               uint64_t sli:1;
-               uint64_t usb:1;
-               uint64_t dfa:1;
-               uint64_t key:1;
-               uint64_t rad:1;
-               uint64_t tim:1;
-               uint64_t zip:1;
-               uint64_t pko:1;
-               uint64_t pip:1;
-               uint64_t ipd:1;
-               uint64_t l2c:1;
-               uint64_t pow:1;
-               uint64_t fpa:1;
-               uint64_t iob:1;
-               uint64_t mio:1;
-               uint64_t nand:1;
-               uint64_t mii1:1;
-               uint64_t usb1:1;
-               uint64_t uart2:1;
-               uint64_t wdog:16;
-#else
-               uint64_t wdog:16;
-               uint64_t uart2:1;
-               uint64_t usb1:1;
-               uint64_t mii1:1;
-               uint64_t nand:1;
-               uint64_t mio:1;
-               uint64_t iob:1;
-               uint64_t fpa:1;
-               uint64_t pow:1;
-               uint64_t l2c:1;
-               uint64_t ipd:1;
-               uint64_t pip:1;
-               uint64_t pko:1;
-               uint64_t zip:1;
-               uint64_t tim:1;
-               uint64_t rad:1;
-               uint64_t key:1;
-               uint64_t dfa:1;
-               uint64_t usb:1;
-               uint64_t sli:1;
-               uint64_t dpi:1;
-               uint64_t agx0:1;
-               uint64_t agx1:1;
-               uint64_t reserved_38_39:2;
-               uint64_t dpi_dma:1;
-               uint64_t reserved_41_45:5;
-               uint64_t agl:1;
-               uint64_t ptp:1;
-               uint64_t pem0:1;
-               uint64_t pem1:1;
-               uint64_t srio0:1;
-               uint64_t srio1:1;
-               uint64_t lmc0:1;
-               uint64_t reserved_53_55:3;
-               uint64_t dfm:1;
-               uint64_t reserved_57_59:3;
-               uint64_t srio2:1;
-               uint64_t srio3:1;
-               uint64_t reserved_62_62:1;
-               uint64_t rst:1;
-#endif
-       } s;
-       struct cvmx_ciu_intx_en1_cn30xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_1_63:63;
-               uint64_t wdog:1;
-#else
-               uint64_t wdog:1;
-               uint64_t reserved_1_63:63;
-#endif
-       } cn30xx;
-       struct cvmx_ciu_intx_en1_cn31xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_2_63:62;
-               uint64_t wdog:2;
-#else
-               uint64_t wdog:2;
-               uint64_t reserved_2_63:62;
-#endif
-       } cn31xx;
-       struct cvmx_ciu_intx_en1_cn38xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_16_63:48;
-               uint64_t wdog:16;
-#else
-               uint64_t wdog:16;
-               uint64_t reserved_16_63:48;
-#endif
-       } cn38xx;
-       struct cvmx_ciu_intx_en1_cn38xx cn38xxp2;
-       struct cvmx_ciu_intx_en1_cn31xx cn50xx;
-       struct cvmx_ciu_intx_en1_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_20_63:44;
-               uint64_t nand:1;
-               uint64_t mii1:1;
-               uint64_t usb1:1;
-               uint64_t uart2:1;
-               uint64_t reserved_4_15:12;
-               uint64_t wdog:4;
-#else
-               uint64_t wdog:4;
-               uint64_t reserved_4_15:12;
-               uint64_t uart2:1;
-               uint64_t usb1:1;
-               uint64_t mii1:1;
-               uint64_t nand:1;
-               uint64_t reserved_20_63:44;
-#endif
-       } cn52xx;
-       struct cvmx_ciu_intx_en1_cn52xxp1 {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_19_63:45;
-               uint64_t mii1:1;
-               uint64_t usb1:1;
-               uint64_t uart2:1;
-               uint64_t reserved_4_15:12;
-               uint64_t wdog:4;
-#else
-               uint64_t wdog:4;
-               uint64_t reserved_4_15:12;
-               uint64_t uart2:1;
-               uint64_t usb1:1;
-               uint64_t mii1:1;
-               uint64_t reserved_19_63:45;
-#endif
-       } cn52xxp1;
-       struct cvmx_ciu_intx_en1_cn56xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_12_63:52;
-               uint64_t wdog:12;
-#else
-               uint64_t wdog:12;
-               uint64_t reserved_12_63:52;
-#endif
-       } cn56xx;
-       struct cvmx_ciu_intx_en1_cn56xx cn56xxp1;
-       struct cvmx_ciu_intx_en1_cn38xx cn58xx;
-       struct cvmx_ciu_intx_en1_cn38xx cn58xxp1;
-       struct cvmx_ciu_intx_en1_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t rst:1;
-               uint64_t reserved_53_62:10;
-               uint64_t lmc0:1;
-               uint64_t reserved_50_51:2;
-               uint64_t pem1:1;
-               uint64_t pem0:1;
-               uint64_t ptp:1;
-               uint64_t agl:1;
-               uint64_t reserved_41_45:5;
-               uint64_t dpi_dma:1;
-               uint64_t reserved_38_39:2;
-               uint64_t agx1:1;
-               uint64_t agx0:1;
-               uint64_t dpi:1;
-               uint64_t sli:1;
-               uint64_t usb:1;
-               uint64_t dfa:1;
-               uint64_t key:1;
-               uint64_t rad:1;
-               uint64_t tim:1;
-               uint64_t zip:1;
-               uint64_t pko:1;
-               uint64_t pip:1;
-               uint64_t ipd:1;
-               uint64_t l2c:1;
-               uint64_t pow:1;
-               uint64_t fpa:1;
-               uint64_t iob:1;
-               uint64_t mio:1;
-               uint64_t nand:1;
-               uint64_t mii1:1;
-               uint64_t reserved_4_17:14;
-               uint64_t wdog:4;
-#else
-               uint64_t wdog:4;
-               uint64_t reserved_4_17:14;
-               uint64_t mii1:1;
-               uint64_t nand:1;
-               uint64_t mio:1;
-               uint64_t iob:1;
-               uint64_t fpa:1;
-               uint64_t pow:1;
-               uint64_t l2c:1;
-               uint64_t ipd:1;
-               uint64_t pip:1;
-               uint64_t pko:1;
-               uint64_t zip:1;
-               uint64_t tim:1;
-               uint64_t rad:1;
-               uint64_t key:1;
-               uint64_t dfa:1;
-               uint64_t usb:1;
-               uint64_t sli:1;
-               uint64_t dpi:1;
-               uint64_t agx0:1;
-               uint64_t agx1:1;
-               uint64_t reserved_38_39:2;
-               uint64_t dpi_dma:1;
-               uint64_t reserved_41_45:5;
-               uint64_t agl:1;
-               uint64_t ptp:1;
-               uint64_t pem0:1;
-               uint64_t pem1:1;
-               uint64_t reserved_50_51:2;
-               uint64_t lmc0:1;
-               uint64_t reserved_53_62:10;
-               uint64_t rst:1;
-#endif
-       } cn61xx;
-       struct cvmx_ciu_intx_en1_cn63xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t rst:1;
-               uint64_t reserved_57_62:6;
-               uint64_t dfm:1;
-               uint64_t reserved_53_55:3;
-               uint64_t lmc0:1;
-               uint64_t srio1:1;
-               uint64_t srio0:1;
-               uint64_t pem1:1;
-               uint64_t pem0:1;
-               uint64_t ptp:1;
-               uint64_t agl:1;
-               uint64_t reserved_37_45:9;
-               uint64_t agx0:1;
-               uint64_t dpi:1;
-               uint64_t sli:1;
-               uint64_t usb:1;
-               uint64_t dfa:1;
-               uint64_t key:1;
-               uint64_t rad:1;
-               uint64_t tim:1;
-               uint64_t zip:1;
-               uint64_t pko:1;
-               uint64_t pip:1;
-               uint64_t ipd:1;
-               uint64_t l2c:1;
-               uint64_t pow:1;
-               uint64_t fpa:1;
-               uint64_t iob:1;
-               uint64_t mio:1;
-               uint64_t nand:1;
-               uint64_t mii1:1;
-               uint64_t reserved_6_17:12;
-               uint64_t wdog:6;
-#else
-               uint64_t wdog:6;
-               uint64_t reserved_6_17:12;
-               uint64_t mii1:1;
-               uint64_t nand:1;
-               uint64_t mio:1;
-               uint64_t iob:1;
-               uint64_t fpa:1;
-               uint64_t pow:1;
-               uint64_t l2c:1;
-               uint64_t ipd:1;
-               uint64_t pip:1;
-               uint64_t pko:1;
-               uint64_t zip:1;
-               uint64_t tim:1;
-               uint64_t rad:1;
-               uint64_t key:1;
-               uint64_t dfa:1;
-               uint64_t usb:1;
-               uint64_t sli:1;
-               uint64_t dpi:1;
-               uint64_t agx0:1;
-               uint64_t reserved_37_45:9;
-               uint64_t agl:1;
-               uint64_t ptp:1;
-               uint64_t pem0:1;
-               uint64_t pem1:1;
-               uint64_t srio0:1;
-               uint64_t srio1:1;
-               uint64_t lmc0:1;
-               uint64_t reserved_53_55:3;
-               uint64_t dfm:1;
-               uint64_t reserved_57_62:6;
-               uint64_t rst:1;
-#endif
-       } cn63xx;
-       struct cvmx_ciu_intx_en1_cn63xx cn63xxp1;
-       struct cvmx_ciu_intx_en1_cn66xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t rst:1;
-               uint64_t reserved_62_62:1;
-               uint64_t srio3:1;
-               uint64_t srio2:1;
-               uint64_t reserved_57_59:3;
-               uint64_t dfm:1;
-               uint64_t reserved_53_55:3;
-               uint64_t lmc0:1;
-               uint64_t reserved_51_51:1;
-               uint64_t srio0:1;
-               uint64_t pem1:1;
-               uint64_t pem0:1;
-               uint64_t ptp:1;
-               uint64_t agl:1;
-               uint64_t reserved_38_45:8;
-               uint64_t agx1:1;
-               uint64_t agx0:1;
-               uint64_t dpi:1;
-               uint64_t sli:1;
-               uint64_t usb:1;
-               uint64_t dfa:1;
-               uint64_t key:1;
-               uint64_t rad:1;
-               uint64_t tim:1;
-               uint64_t zip:1;
-               uint64_t pko:1;
-               uint64_t pip:1;
-               uint64_t ipd:1;
-               uint64_t l2c:1;
-               uint64_t pow:1;
-               uint64_t fpa:1;
-               uint64_t iob:1;
-               uint64_t mio:1;
-               uint64_t nand:1;
-               uint64_t mii1:1;
-               uint64_t reserved_10_17:8;
-               uint64_t wdog:10;
-#else
-               uint64_t wdog:10;
-               uint64_t reserved_10_17:8;
-               uint64_t mii1:1;
-               uint64_t nand:1;
-               uint64_t mio:1;
-               uint64_t iob:1;
-               uint64_t fpa:1;
-               uint64_t pow:1;
-               uint64_t l2c:1;
-               uint64_t ipd:1;
-               uint64_t pip:1;
-               uint64_t pko:1;
-               uint64_t zip:1;
-               uint64_t tim:1;
-               uint64_t rad:1;
-               uint64_t key:1;
-               uint64_t dfa:1;
-               uint64_t usb:1;
-               uint64_t sli:1;
-               uint64_t dpi:1;
-               uint64_t agx0:1;
-               uint64_t agx1:1;
-               uint64_t reserved_38_45:8;
-               uint64_t agl:1;
-               uint64_t ptp:1;
-               uint64_t pem0:1;
-               uint64_t pem1:1;
-               uint64_t srio0:1;
-               uint64_t reserved_51_51:1;
-               uint64_t lmc0:1;
-               uint64_t reserved_53_55:3;
-               uint64_t dfm:1;
-               uint64_t reserved_57_59:3;
-               uint64_t srio2:1;
-               uint64_t srio3:1;
-               uint64_t reserved_62_62:1;
-               uint64_t rst:1;
-#endif
-       } cn66xx;
-       struct cvmx_ciu_intx_en1_cnf71xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t rst:1;
-               uint64_t reserved_53_62:10;
-               uint64_t lmc0:1;
-               uint64_t reserved_50_51:2;
-               uint64_t pem1:1;
-               uint64_t pem0:1;
-               uint64_t ptp:1;
-               uint64_t reserved_41_46:6;
-               uint64_t dpi_dma:1;
-               uint64_t reserved_37_39:3;
-               uint64_t agx0:1;
-               uint64_t dpi:1;
-               uint64_t sli:1;
-               uint64_t usb:1;
-               uint64_t reserved_32_32:1;
-               uint64_t key:1;
-               uint64_t rad:1;
-               uint64_t tim:1;
-               uint64_t reserved_28_28:1;
-               uint64_t pko:1;
-               uint64_t pip:1;
-               uint64_t ipd:1;
-               uint64_t l2c:1;
-               uint64_t pow:1;
-               uint64_t fpa:1;
-               uint64_t iob:1;
-               uint64_t mio:1;
-               uint64_t nand:1;
-               uint64_t reserved_4_18:15;
-               uint64_t wdog:4;
-#else
-               uint64_t wdog:4;
-               uint64_t reserved_4_18:15;
-               uint64_t nand:1;
-               uint64_t mio:1;
-               uint64_t iob:1;
-               uint64_t fpa:1;
-               uint64_t pow:1;
-               uint64_t l2c:1;
-               uint64_t ipd:1;
-               uint64_t pip:1;
-               uint64_t pko:1;
-               uint64_t reserved_28_28:1;
-               uint64_t tim:1;
-               uint64_t rad:1;
-               uint64_t key:1;
-               uint64_t reserved_32_32:1;
-               uint64_t usb:1;
-               uint64_t sli:1;
-               uint64_t dpi:1;
-               uint64_t agx0:1;
-               uint64_t reserved_37_39:3;
-               uint64_t dpi_dma:1;
-               uint64_t reserved_41_46:6;
-               uint64_t ptp:1;
-               uint64_t pem0:1;
-               uint64_t pem1:1;
-               uint64_t reserved_50_51:2;
-               uint64_t lmc0:1;
-               uint64_t reserved_53_62:10;
-               uint64_t rst:1;
-#endif
-       } cnf71xx;
-};
-
-union cvmx_ciu_intx_en1_w1c {
-       uint64_t u64;
-       struct cvmx_ciu_intx_en1_w1c_s {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t rst:1;
-               uint64_t reserved_62_62:1;
-               uint64_t srio3:1;
-               uint64_t srio2:1;
-               uint64_t reserved_57_59:3;
-               uint64_t dfm:1;
-               uint64_t reserved_53_55:3;
-               uint64_t lmc0:1;
-               uint64_t srio1:1;
-               uint64_t srio0:1;
-               uint64_t pem1:1;
-               uint64_t pem0:1;
-               uint64_t ptp:1;
-               uint64_t agl:1;
-               uint64_t reserved_41_45:5;
-               uint64_t dpi_dma:1;
-               uint64_t reserved_38_39:2;
-               uint64_t agx1:1;
-               uint64_t agx0:1;
-               uint64_t dpi:1;
-               uint64_t sli:1;
-               uint64_t usb:1;
-               uint64_t dfa:1;
-               uint64_t key:1;
-               uint64_t rad:1;
-               uint64_t tim:1;
-               uint64_t zip:1;
-               uint64_t pko:1;
-               uint64_t pip:1;
-               uint64_t ipd:1;
-               uint64_t l2c:1;
-               uint64_t pow:1;
-               uint64_t fpa:1;
-               uint64_t iob:1;
-               uint64_t mio:1;
-               uint64_t nand:1;
-               uint64_t mii1:1;
-               uint64_t usb1:1;
-               uint64_t uart2:1;
-               uint64_t wdog:16;
-#else
-               uint64_t wdog:16;
-               uint64_t uart2:1;
-               uint64_t usb1:1;
-               uint64_t mii1:1;
-               uint64_t nand:1;
-               uint64_t mio:1;
-               uint64_t iob:1;
-               uint64_t fpa:1;
-               uint64_t pow:1;
-               uint64_t l2c:1;
-               uint64_t ipd:1;
-               uint64_t pip:1;
-               uint64_t pko:1;
-               uint64_t zip:1;
-               uint64_t tim:1;
-               uint64_t rad:1;
-               uint64_t key:1;
-               uint64_t dfa:1;
-               uint64_t usb:1;
-               uint64_t sli:1;
-               uint64_t dpi:1;
-               uint64_t agx0:1;
-               uint64_t agx1:1;
-               uint64_t reserved_38_39:2;
-               uint64_t dpi_dma:1;
-               uint64_t reserved_41_45:5;
-               uint64_t agl:1;
-               uint64_t ptp:1;
-               uint64_t pem0:1;
-               uint64_t pem1:1;
-               uint64_t srio0:1;
-               uint64_t srio1:1;
-               uint64_t lmc0:1;
-               uint64_t reserved_53_55:3;
-               uint64_t dfm:1;
-               uint64_t reserved_57_59:3;
-               uint64_t srio2:1;
-               uint64_t srio3:1;
-               uint64_t reserved_62_62:1;
-               uint64_t rst:1;
-#endif
-       } s;
-       struct cvmx_ciu_intx_en1_w1c_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_20_63:44;
-               uint64_t nand:1;
-               uint64_t mii1:1;
-               uint64_t usb1:1;
-               uint64_t uart2:1;
-               uint64_t reserved_4_15:12;
-               uint64_t wdog:4;
-#else
-               uint64_t wdog:4;
-               uint64_t reserved_4_15:12;
-               uint64_t uart2:1;
-               uint64_t usb1:1;
-               uint64_t mii1:1;
-               uint64_t nand:1;
-               uint64_t reserved_20_63:44;
-#endif
-       } cn52xx;
-       struct cvmx_ciu_intx_en1_w1c_cn56xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_12_63:52;
-               uint64_t wdog:12;
-#else
-               uint64_t wdog:12;
-               uint64_t reserved_12_63:52;
-#endif
-       } cn56xx;
-       struct cvmx_ciu_intx_en1_w1c_cn58xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_16_63:48;
-               uint64_t wdog:16;
-#else
-               uint64_t wdog:16;
-               uint64_t reserved_16_63:48;
-#endif
-       } cn58xx;
-       struct cvmx_ciu_intx_en1_w1c_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t rst:1;
-               uint64_t reserved_53_62:10;
-               uint64_t lmc0:1;
-               uint64_t reserved_50_51:2;
-               uint64_t pem1:1;
-               uint64_t pem0:1;
-               uint64_t ptp:1;
-               uint64_t agl:1;
-               uint64_t reserved_41_45:5;
-               uint64_t dpi_dma:1;
-               uint64_t reserved_38_39:2;
-               uint64_t agx1:1;
-               uint64_t agx0:1;
-               uint64_t dpi:1;
-               uint64_t sli:1;
-               uint64_t usb:1;
-               uint64_t dfa:1;
-               uint64_t key:1;
-               uint64_t rad:1;
-               uint64_t tim:1;
-               uint64_t zip:1;
-               uint64_t pko:1;
-               uint64_t pip:1;
-               uint64_t ipd:1;
-               uint64_t l2c:1;
-               uint64_t pow:1;
-               uint64_t fpa:1;
-               uint64_t iob:1;
-               uint64_t mio:1;
-               uint64_t nand:1;
-               uint64_t mii1:1;
-               uint64_t reserved_4_17:14;
-               uint64_t wdog:4;
-#else
-               uint64_t wdog:4;
-               uint64_t reserved_4_17:14;
-               uint64_t mii1:1;
-               uint64_t nand:1;
-               uint64_t mio:1;
-               uint64_t iob:1;
-               uint64_t fpa:1;
-               uint64_t pow:1;
-               uint64_t l2c:1;
-               uint64_t ipd:1;
-               uint64_t pip:1;
-               uint64_t pko:1;
-               uint64_t zip:1;
-               uint64_t tim:1;
-               uint64_t rad:1;
-               uint64_t key:1;
-               uint64_t dfa:1;
-               uint64_t usb:1;
-               uint64_t sli:1;
-               uint64_t dpi:1;
-               uint64_t agx0:1;
-               uint64_t agx1:1;
-               uint64_t reserved_38_39:2;
-               uint64_t dpi_dma:1;
-               uint64_t reserved_41_45:5;
-               uint64_t agl:1;
-               uint64_t ptp:1;
-               uint64_t pem0:1;
-               uint64_t pem1:1;
-               uint64_t reserved_50_51:2;
-               uint64_t lmc0:1;
-               uint64_t reserved_53_62:10;
-               uint64_t rst:1;
-#endif
-       } cn61xx;
-       struct cvmx_ciu_intx_en1_w1c_cn63xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t rst:1;
-               uint64_t reserved_57_62:6;
-               uint64_t dfm:1;
-               uint64_t reserved_53_55:3;
-               uint64_t lmc0:1;
-               uint64_t srio1:1;
-               uint64_t srio0:1;
-               uint64_t pem1:1;
-               uint64_t pem0:1;
-               uint64_t ptp:1;
-               uint64_t agl:1;
-               uint64_t reserved_37_45:9;
-               uint64_t agx0:1;
-               uint64_t dpi:1;
-               uint64_t sli:1;
-               uint64_t usb:1;
-               uint64_t dfa:1;
-               uint64_t key:1;
-               uint64_t rad:1;
-               uint64_t tim:1;
-               uint64_t zip:1;
-               uint64_t pko:1;
-               uint64_t pip:1;
-               uint64_t ipd:1;
-               uint64_t l2c:1;
-               uint64_t pow:1;
-               uint64_t fpa:1;
-               uint64_t iob:1;
-               uint64_t mio:1;
-               uint64_t nand:1;
-               uint64_t mii1:1;
-               uint64_t reserved_6_17:12;
-               uint64_t wdog:6;
-#else
-               uint64_t wdog:6;
-               uint64_t reserved_6_17:12;
-               uint64_t mii1:1;
-               uint64_t nand:1;
-               uint64_t mio:1;
-               uint64_t iob:1;
-               uint64_t fpa:1;
-               uint64_t pow:1;
-               uint64_t l2c:1;
-               uint64_t ipd:1;
-               uint64_t pip:1;
-               uint64_t pko:1;
-               uint64_t zip:1;
-               uint64_t tim:1;
-               uint64_t rad:1;
-               uint64_t key:1;
-               uint64_t dfa:1;
-               uint64_t usb:1;
-               uint64_t sli:1;
-               uint64_t dpi:1;
-               uint64_t agx0:1;
-               uint64_t reserved_37_45:9;
-               uint64_t agl:1;
-               uint64_t ptp:1;
-               uint64_t pem0:1;
-               uint64_t pem1:1;
-               uint64_t srio0:1;
-               uint64_t srio1:1;
-               uint64_t lmc0:1;
-               uint64_t reserved_53_55:3;
-               uint64_t dfm:1;
-               uint64_t reserved_57_62:6;
-               uint64_t rst:1;
-#endif
-       } cn63xx;
-       struct cvmx_ciu_intx_en1_w1c_cn63xx cn63xxp1;
-       struct cvmx_ciu_intx_en1_w1c_cn66xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t rst:1;
-               uint64_t reserved_62_62:1;
-               uint64_t srio3:1;
-               uint64_t srio2:1;
-               uint64_t reserved_57_59:3;
-               uint64_t dfm:1;
-               uint64_t reserved_53_55:3;
-               uint64_t lmc0:1;
-               uint64_t reserved_51_51:1;
-               uint64_t srio0:1;
-               uint64_t pem1:1;
-               uint64_t pem0:1;
-               uint64_t ptp:1;
-               uint64_t agl:1;
-               uint64_t reserved_38_45:8;
-               uint64_t agx1:1;
-               uint64_t agx0:1;
-               uint64_t dpi:1;
-               uint64_t sli:1;
-               uint64_t usb:1;
-               uint64_t dfa:1;
-               uint64_t key:1;
-               uint64_t rad:1;
-               uint64_t tim:1;
-               uint64_t zip:1;
-               uint64_t pko:1;
-               uint64_t pip:1;
-               uint64_t ipd:1;
-               uint64_t l2c:1;
-               uint64_t pow:1;
-               uint64_t fpa:1;
-               uint64_t iob:1;
-               uint64_t mio:1;
-               uint64_t nand:1;
-               uint64_t mii1:1;
-               uint64_t reserved_10_17:8;
-               uint64_t wdog:10;
-#else
-               uint64_t wdog:10;
-               uint64_t reserved_10_17:8;
-               uint64_t mii1:1;
-               uint64_t nand:1;
-               uint64_t mio:1;
-               uint64_t iob:1;
-               uint64_t fpa:1;
-               uint64_t pow:1;
-               uint64_t l2c:1;
-               uint64_t ipd:1;
-               uint64_t pip:1;
-               uint64_t pko:1;
-               uint64_t zip:1;
-               uint64_t tim:1;
-               uint64_t rad:1;
-               uint64_t key:1;
-               uint64_t dfa:1;
-               uint64_t usb:1;
-               uint64_t sli:1;
-               uint64_t dpi:1;
-               uint64_t agx0:1;
-               uint64_t agx1:1;
-               uint64_t reserved_38_45:8;
-               uint64_t agl:1;
-               uint64_t ptp:1;
-               uint64_t pem0:1;
-               uint64_t pem1:1;
-               uint64_t srio0:1;
-               uint64_t reserved_51_51:1;
-               uint64_t lmc0:1;
-               uint64_t reserved_53_55:3;
-               uint64_t dfm:1;
-               uint64_t reserved_57_59:3;
-               uint64_t srio2:1;
-               uint64_t srio3:1;
-               uint64_t reserved_62_62:1;
-               uint64_t rst:1;
-#endif
-       } cn66xx;
-       struct cvmx_ciu_intx_en1_w1c_cnf71xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t rst:1;
-               uint64_t reserved_53_62:10;
-               uint64_t lmc0:1;
-               uint64_t reserved_50_51:2;
-               uint64_t pem1:1;
-               uint64_t pem0:1;
-               uint64_t ptp:1;
-               uint64_t reserved_41_46:6;
-               uint64_t dpi_dma:1;
-               uint64_t reserved_37_39:3;
-               uint64_t agx0:1;
-               uint64_t dpi:1;
-               uint64_t sli:1;
-               uint64_t usb:1;
-               uint64_t reserved_32_32:1;
-               uint64_t key:1;
-               uint64_t rad:1;
-               uint64_t tim:1;
-               uint64_t reserved_28_28:1;
-               uint64_t pko:1;
-               uint64_t pip:1;
-               uint64_t ipd:1;
-               uint64_t l2c:1;
-               uint64_t pow:1;
-               uint64_t fpa:1;
-               uint64_t iob:1;
-               uint64_t mio:1;
-               uint64_t nand:1;
-               uint64_t reserved_4_18:15;
-               uint64_t wdog:4;
-#else
-               uint64_t wdog:4;
-               uint64_t reserved_4_18:15;
-               uint64_t nand:1;
-               uint64_t mio:1;
-               uint64_t iob:1;
-               uint64_t fpa:1;
-               uint64_t pow:1;
-               uint64_t l2c:1;
-               uint64_t ipd:1;
-               uint64_t pip:1;
-               uint64_t pko:1;
-               uint64_t reserved_28_28:1;
-               uint64_t tim:1;
-               uint64_t rad:1;
-               uint64_t key:1;
-               uint64_t reserved_32_32:1;
-               uint64_t usb:1;
-               uint64_t sli:1;
-               uint64_t dpi:1;
-               uint64_t agx0:1;
-               uint64_t reserved_37_39:3;
-               uint64_t dpi_dma:1;
-               uint64_t reserved_41_46:6;
-               uint64_t ptp:1;
-               uint64_t pem0:1;
-               uint64_t pem1:1;
-               uint64_t reserved_50_51:2;
-               uint64_t lmc0:1;
-               uint64_t reserved_53_62:10;
-               uint64_t rst:1;
-#endif
-       } cnf71xx;
-};
-
-union cvmx_ciu_intx_en1_w1s {
-       uint64_t u64;
-       struct cvmx_ciu_intx_en1_w1s_s {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t rst:1;
-               uint64_t reserved_62_62:1;
-               uint64_t srio3:1;
-               uint64_t srio2:1;
-               uint64_t reserved_57_59:3;
-               uint64_t dfm:1;
-               uint64_t reserved_53_55:3;
-               uint64_t lmc0:1;
-               uint64_t srio1:1;
-               uint64_t srio0:1;
-               uint64_t pem1:1;
-               uint64_t pem0:1;
-               uint64_t ptp:1;
-               uint64_t agl:1;
-               uint64_t reserved_41_45:5;
-               uint64_t dpi_dma:1;
-               uint64_t reserved_38_39:2;
-               uint64_t agx1:1;
-               uint64_t agx0:1;
-               uint64_t dpi:1;
-               uint64_t sli:1;
-               uint64_t usb:1;
-               uint64_t dfa:1;
-               uint64_t key:1;
-               uint64_t rad:1;
-               uint64_t tim:1;
-               uint64_t zip:1;
-               uint64_t pko:1;
-               uint64_t pip:1;
-               uint64_t ipd:1;
-               uint64_t l2c:1;
-               uint64_t pow:1;
-               uint64_t fpa:1;
-               uint64_t iob:1;
-               uint64_t mio:1;
-               uint64_t nand:1;
-               uint64_t mii1:1;
-               uint64_t usb1:1;
-               uint64_t uart2:1;
-               uint64_t wdog:16;
-#else
-               uint64_t wdog:16;
-               uint64_t uart2:1;
-               uint64_t usb1:1;
-               uint64_t mii1:1;
-               uint64_t nand:1;
-               uint64_t mio:1;
-               uint64_t iob:1;
-               uint64_t fpa:1;
-               uint64_t pow:1;
-               uint64_t l2c:1;
-               uint64_t ipd:1;
-               uint64_t pip:1;
-               uint64_t pko:1;
-               uint64_t zip:1;
-               uint64_t tim:1;
-               uint64_t rad:1;
-               uint64_t key:1;
-               uint64_t dfa:1;
-               uint64_t usb:1;
-               uint64_t sli:1;
-               uint64_t dpi:1;
-               uint64_t agx0:1;
-               uint64_t agx1:1;
-               uint64_t reserved_38_39:2;
-               uint64_t dpi_dma:1;
-               uint64_t reserved_41_45:5;
-               uint64_t agl:1;
-               uint64_t ptp:1;
-               uint64_t pem0:1;
-               uint64_t pem1:1;
-               uint64_t srio0:1;
-               uint64_t srio1:1;
-               uint64_t lmc0:1;
-               uint64_t reserved_53_55:3;
-               uint64_t dfm:1;
-               uint64_t reserved_57_59:3;
-               uint64_t srio2:1;
-               uint64_t srio3:1;
-               uint64_t reserved_62_62:1;
-               uint64_t rst:1;
-#endif
-       } s;
-       struct cvmx_ciu_intx_en1_w1s_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_20_63:44;
-               uint64_t nand:1;
-               uint64_t mii1:1;
-               uint64_t usb1:1;
-               uint64_t uart2:1;
-               uint64_t reserved_4_15:12;
-               uint64_t wdog:4;
-#else
-               uint64_t wdog:4;
-               uint64_t reserved_4_15:12;
-               uint64_t uart2:1;
-               uint64_t usb1:1;
-               uint64_t mii1:1;
-               uint64_t nand:1;
-               uint64_t reserved_20_63:44;
-#endif
-       } cn52xx;
-       struct cvmx_ciu_intx_en1_w1s_cn56xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_12_63:52;
-               uint64_t wdog:12;
-#else
-               uint64_t wdog:12;
-               uint64_t reserved_12_63:52;
-#endif
-       } cn56xx;
-       struct cvmx_ciu_intx_en1_w1s_cn58xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_16_63:48;
-               uint64_t wdog:16;
-#else
-               uint64_t wdog:16;
-               uint64_t reserved_16_63:48;
-#endif
-       } cn58xx;
-       struct cvmx_ciu_intx_en1_w1s_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t rst:1;
-               uint64_t reserved_53_62:10;
-               uint64_t lmc0:1;
-               uint64_t reserved_50_51:2;
-               uint64_t pem1:1;
-               uint64_t pem0:1;
-               uint64_t ptp:1;
-               uint64_t agl:1;
-               uint64_t reserved_41_45:5;
-               uint64_t dpi_dma:1;
-               uint64_t reserved_38_39:2;
-               uint64_t agx1:1;
-               uint64_t agx0:1;
-               uint64_t dpi:1;
-               uint64_t sli:1;
-               uint64_t usb:1;
-               uint64_t dfa:1;
-               uint64_t key:1;
-               uint64_t rad:1;
-               uint64_t tim:1;
-               uint64_t zip:1;
-               uint64_t pko:1;
-               uint64_t pip:1;
-               uint64_t ipd:1;
-               uint64_t l2c:1;
-               uint64_t pow:1;
-               uint64_t fpa:1;
-               uint64_t iob:1;
-               uint64_t mio:1;
-               uint64_t nand:1;
-               uint64_t mii1:1;
-               uint64_t reserved_4_17:14;
-               uint64_t wdog:4;
-#else
-               uint64_t wdog:4;
-               uint64_t reserved_4_17:14;
-               uint64_t mii1:1;
-               uint64_t nand:1;
-               uint64_t mio:1;
-               uint64_t iob:1;
-               uint64_t fpa:1;
-               uint64_t pow:1;
-               uint64_t l2c:1;
-               uint64_t ipd:1;
-               uint64_t pip:1;
-               uint64_t pko:1;
-               uint64_t zip:1;
-               uint64_t tim:1;
-               uint64_t rad:1;
-               uint64_t key:1;
-               uint64_t dfa:1;
-               uint64_t usb:1;
-               uint64_t sli:1;
-               uint64_t dpi:1;
-               uint64_t agx0:1;
-               uint64_t agx1:1;
-               uint64_t reserved_38_39:2;
-               uint64_t dpi_dma:1;
-               uint64_t reserved_41_45:5;
-               uint64_t agl:1;
-               uint64_t ptp:1;
-               uint64_t pem0:1;
-               uint64_t pem1:1;
-               uint64_t reserved_50_51:2;
-               uint64_t lmc0:1;
-               uint64_t reserved_53_62:10;
-               uint64_t rst:1;
-#endif
-       } cn61xx;
-       struct cvmx_ciu_intx_en1_w1s_cn63xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t rst:1;
-               uint64_t reserved_57_62:6;
-               uint64_t dfm:1;
-               uint64_t reserved_53_55:3;
-               uint64_t lmc0:1;
-               uint64_t srio1:1;
-               uint64_t srio0:1;
-               uint64_t pem1:1;
-               uint64_t pem0:1;
-               uint64_t ptp:1;
-               uint64_t agl:1;
-               uint64_t reserved_37_45:9;
-               uint64_t agx0:1;
-               uint64_t dpi:1;
-               uint64_t sli:1;
-               uint64_t usb:1;
-               uint64_t dfa:1;
-               uint64_t key:1;
-               uint64_t rad:1;
-               uint64_t tim:1;
-               uint64_t zip:1;
-               uint64_t pko:1;
-               uint64_t pip:1;
-               uint64_t ipd:1;
-               uint64_t l2c:1;
-               uint64_t pow:1;
-               uint64_t fpa:1;
-               uint64_t iob:1;
-               uint64_t mio:1;
-               uint64_t nand:1;
-               uint64_t mii1:1;
-               uint64_t reserved_6_17:12;
-               uint64_t wdog:6;
-#else
-               uint64_t wdog:6;
-               uint64_t reserved_6_17:12;
-               uint64_t mii1:1;
-               uint64_t nand:1;
-               uint64_t mio:1;
-               uint64_t iob:1;
-               uint64_t fpa:1;
-               uint64_t pow:1;
-               uint64_t l2c:1;
-               uint64_t ipd:1;
-               uint64_t pip:1;
-               uint64_t pko:1;
-               uint64_t zip:1;
-               uint64_t tim:1;
-               uint64_t rad:1;
-               uint64_t key:1;
-               uint64_t dfa:1;
-               uint64_t usb:1;
-               uint64_t sli:1;
-               uint64_t dpi:1;
-               uint64_t agx0:1;
-               uint64_t reserved_37_45:9;
-               uint64_t agl:1;
-               uint64_t ptp:1;
-               uint64_t pem0:1;
-               uint64_t pem1:1;
-               uint64_t srio0:1;
-               uint64_t srio1:1;
-               uint64_t lmc0:1;
-               uint64_t reserved_53_55:3;
-               uint64_t dfm:1;
-               uint64_t reserved_57_62:6;
-               uint64_t rst:1;
-#endif
-       } cn63xx;
-       struct cvmx_ciu_intx_en1_w1s_cn63xx cn63xxp1;
-       struct cvmx_ciu_intx_en1_w1s_cn66xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t rst:1;
-               uint64_t reserved_62_62:1;
-               uint64_t srio3:1;
-               uint64_t srio2:1;
-               uint64_t reserved_57_59:3;
-               uint64_t dfm:1;
-               uint64_t reserved_53_55:3;
-               uint64_t lmc0:1;
-               uint64_t reserved_51_51:1;
-               uint64_t srio0:1;
-               uint64_t pem1:1;
-               uint64_t pem0:1;
-               uint64_t ptp:1;
-               uint64_t agl:1;
-               uint64_t reserved_38_45:8;
-               uint64_t agx1:1;
-               uint64_t agx0:1;
-               uint64_t dpi:1;
-               uint64_t sli:1;
-               uint64_t usb:1;
-               uint64_t dfa:1;
-               uint64_t key:1;
-               uint64_t rad:1;
-               uint64_t tim:1;
-               uint64_t zip:1;
-               uint64_t pko:1;
-               uint64_t pip:1;
-               uint64_t ipd:1;
-               uint64_t l2c:1;
-               uint64_t pow:1;
-               uint64_t fpa:1;
-               uint64_t iob:1;
-               uint64_t mio:1;
-               uint64_t nand:1;
-               uint64_t mii1:1;
-               uint64_t reserved_10_17:8;
-               uint64_t wdog:10;
-#else
-               uint64_t wdog:10;
-               uint64_t reserved_10_17:8;
-               uint64_t mii1:1;
-               uint64_t nand:1;
-               uint64_t mio:1;
-               uint64_t iob:1;
-               uint64_t fpa:1;
-               uint64_t pow:1;
-               uint64_t l2c:1;
-               uint64_t ipd:1;
-               uint64_t pip:1;
-               uint64_t pko:1;
-               uint64_t zip:1;
-               uint64_t tim:1;
-               uint64_t rad:1;
-               uint64_t key:1;
-               uint64_t dfa:1;
-               uint64_t usb:1;
-               uint64_t sli:1;
-               uint64_t dpi:1;
-               uint64_t agx0:1;
-               uint64_t agx1:1;
-               uint64_t reserved_38_45:8;
-               uint64_t agl:1;
-               uint64_t ptp:1;
-               uint64_t pem0:1;
-               uint64_t pem1:1;
-               uint64_t srio0:1;
-               uint64_t reserved_51_51:1;
-               uint64_t lmc0:1;
-               uint64_t reserved_53_55:3;
-               uint64_t dfm:1;
-               uint64_t reserved_57_59:3;
-               uint64_t srio2:1;
-               uint64_t srio3:1;
-               uint64_t reserved_62_62:1;
-               uint64_t rst:1;
-#endif
-       } cn66xx;
-       struct cvmx_ciu_intx_en1_w1s_cnf71xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t rst:1;
-               uint64_t reserved_53_62:10;
-               uint64_t lmc0:1;
-               uint64_t reserved_50_51:2;
-               uint64_t pem1:1;
-               uint64_t pem0:1;
-               uint64_t ptp:1;
-               uint64_t reserved_41_46:6;
-               uint64_t dpi_dma:1;
-               uint64_t reserved_37_39:3;
-               uint64_t agx0:1;
-               uint64_t dpi:1;
-               uint64_t sli:1;
-               uint64_t usb:1;
-               uint64_t reserved_32_32:1;
-               uint64_t key:1;
-               uint64_t rad:1;
-               uint64_t tim:1;
-               uint64_t reserved_28_28:1;
-               uint64_t pko:1;
-               uint64_t pip:1;
-               uint64_t ipd:1;
-               uint64_t l2c:1;
-               uint64_t pow:1;
-               uint64_t fpa:1;
-               uint64_t iob:1;
-               uint64_t mio:1;
-               uint64_t nand:1;
-               uint64_t reserved_4_18:15;
-               uint64_t wdog:4;
-#else
-               uint64_t wdog:4;
-               uint64_t reserved_4_18:15;
-               uint64_t nand:1;
-               uint64_t mio:1;
-               uint64_t iob:1;
-               uint64_t fpa:1;
-               uint64_t pow:1;
-               uint64_t l2c:1;
-               uint64_t ipd:1;
-               uint64_t pip:1;
-               uint64_t pko:1;
-               uint64_t reserved_28_28:1;
-               uint64_t tim:1;
-               uint64_t rad:1;
-               uint64_t key:1;
-               uint64_t reserved_32_32:1;
-               uint64_t usb:1;
-               uint64_t sli:1;
-               uint64_t dpi:1;
-               uint64_t agx0:1;
-               uint64_t reserved_37_39:3;
-               uint64_t dpi_dma:1;
-               uint64_t reserved_41_46:6;
-               uint64_t ptp:1;
-               uint64_t pem0:1;
-               uint64_t pem1:1;
-               uint64_t reserved_50_51:2;
-               uint64_t lmc0:1;
-               uint64_t reserved_53_62:10;
-               uint64_t rst:1;
-#endif
-       } cnf71xx;
-};
-
-union cvmx_ciu_intx_en4_0 {
-       uint64_t u64;
-       struct cvmx_ciu_intx_en4_0_s {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t bootdma:1;
-               uint64_t mii:1;
-               uint64_t ipdppthr:1;
-               uint64_t powiq:1;
-               uint64_t twsi2:1;
-               uint64_t mpi:1;
-               uint64_t pcm:1;
-               uint64_t usb:1;
-               uint64_t timer:4;
-               uint64_t key_zero:1;
-               uint64_t ipd_drp:1;
-               uint64_t gmx_drp:2;
-               uint64_t trace:1;
-               uint64_t rml:1;
-               uint64_t twsi:1;
-               uint64_t reserved_44_44:1;
-               uint64_t pci_msi:4;
-               uint64_t pci_int:4;
-               uint64_t uart:2;
-               uint64_t mbox:2;
-               uint64_t gpio:16;
-               uint64_t workq:16;
-#else
-               uint64_t workq:16;
-               uint64_t gpio:16;
-               uint64_t mbox:2;
-               uint64_t uart:2;
-               uint64_t pci_int:4;
-               uint64_t pci_msi:4;
-               uint64_t reserved_44_44:1;
-               uint64_t twsi:1;
-               uint64_t rml:1;
-               uint64_t trace:1;
-               uint64_t gmx_drp:2;
-               uint64_t ipd_drp:1;
-               uint64_t key_zero:1;
-               uint64_t timer:4;
-               uint64_t usb:1;
-               uint64_t pcm:1;
-               uint64_t mpi:1;
-               uint64_t twsi2:1;
-               uint64_t powiq:1;
-               uint64_t ipdppthr:1;
-               uint64_t mii:1;
-               uint64_t bootdma:1;
-#endif
-       } s;
-       struct cvmx_ciu_intx_en4_0_cn50xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_59_63:5;
-               uint64_t mpi:1;
-               uint64_t pcm:1;
-               uint64_t usb:1;
-               uint64_t timer:4;
-               uint64_t reserved_51_51:1;
-               uint64_t ipd_drp:1;
-               uint64_t reserved_49_49:1;
-               uint64_t gmx_drp:1;
-               uint64_t reserved_47_47:1;
-               uint64_t rml:1;
-               uint64_t twsi:1;
-               uint64_t reserved_44_44:1;
-               uint64_t pci_msi:4;
-               uint64_t pci_int:4;
-               uint64_t uart:2;
-               uint64_t mbox:2;
-               uint64_t gpio:16;
-               uint64_t workq:16;
-#else
-               uint64_t workq:16;
-               uint64_t gpio:16;
-               uint64_t mbox:2;
-               uint64_t uart:2;
-               uint64_t pci_int:4;
-               uint64_t pci_msi:4;
-               uint64_t reserved_44_44:1;
-               uint64_t twsi:1;
-               uint64_t rml:1;
-               uint64_t reserved_47_47:1;
-               uint64_t gmx_drp:1;
-               uint64_t reserved_49_49:1;
-               uint64_t ipd_drp:1;
-               uint64_t reserved_51_51:1;
-               uint64_t timer:4;
-               uint64_t usb:1;
-               uint64_t pcm:1;
-               uint64_t mpi:1;
-               uint64_t reserved_59_63:5;
-#endif
-       } cn50xx;
-       struct cvmx_ciu_intx_en4_0_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t bootdma:1;
-               uint64_t mii:1;
-               uint64_t ipdppthr:1;
-               uint64_t powiq:1;
-               uint64_t twsi2:1;
-               uint64_t reserved_57_58:2;
-               uint64_t usb:1;
-               uint64_t timer:4;
-               uint64_t reserved_51_51:1;
-               uint64_t ipd_drp:1;
-               uint64_t reserved_49_49:1;
-               uint64_t gmx_drp:1;
-               uint64_t trace:1;
-               uint64_t rml:1;
-               uint64_t twsi:1;
-               uint64_t reserved_44_44:1;
-               uint64_t pci_msi:4;
-               uint64_t pci_int:4;
-               uint64_t uart:2;
-               uint64_t mbox:2;
-               uint64_t gpio:16;
-               uint64_t workq:16;
-#else
-               uint64_t workq:16;
-               uint64_t gpio:16;
-               uint64_t mbox:2;
-               uint64_t uart:2;
-               uint64_t pci_int:4;
-               uint64_t pci_msi:4;
-               uint64_t reserved_44_44:1;
-               uint64_t twsi:1;
-               uint64_t rml:1;
-               uint64_t trace:1;
-               uint64_t gmx_drp:1;
-               uint64_t reserved_49_49:1;
-               uint64_t ipd_drp:1;
-               uint64_t reserved_51_51:1;
-               uint64_t timer:4;
-               uint64_t usb:1;
-               uint64_t reserved_57_58:2;
-               uint64_t twsi2:1;
-               uint64_t powiq:1;
-               uint64_t ipdppthr:1;
-               uint64_t mii:1;
-               uint64_t bootdma:1;
-#endif
-       } cn52xx;
-       struct cvmx_ciu_intx_en4_0_cn52xx cn52xxp1;
-       struct cvmx_ciu_intx_en4_0_cn56xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t bootdma:1;
-               uint64_t mii:1;
-               uint64_t ipdppthr:1;
-               uint64_t powiq:1;
-               uint64_t twsi2:1;
-               uint64_t reserved_57_58:2;
-               uint64_t usb:1;
-               uint64_t timer:4;
-               uint64_t key_zero:1;
-               uint64_t ipd_drp:1;
-               uint64_t gmx_drp:2;
-               uint64_t trace:1;
-               uint64_t rml:1;
-               uint64_t twsi:1;
-               uint64_t reserved_44_44:1;
-               uint64_t pci_msi:4;
-               uint64_t pci_int:4;
-               uint64_t uart:2;
-               uint64_t mbox:2;
-               uint64_t gpio:16;
-               uint64_t workq:16;
-#else
-               uint64_t workq:16;
-               uint64_t gpio:16;
-               uint64_t mbox:2;
-               uint64_t uart:2;
-               uint64_t pci_int:4;
-               uint64_t pci_msi:4;
-               uint64_t reserved_44_44:1;
-               uint64_t twsi:1;
-               uint64_t rml:1;
-               uint64_t trace:1;
-               uint64_t gmx_drp:2;
-               uint64_t ipd_drp:1;
-               uint64_t key_zero:1;
-               uint64_t timer:4;
-               uint64_t usb:1;
-               uint64_t reserved_57_58:2;
-               uint64_t twsi2:1;
-               uint64_t powiq:1;
-               uint64_t ipdppthr:1;
-               uint64_t mii:1;
-               uint64_t bootdma:1;
-#endif
-       } cn56xx;
-       struct cvmx_ciu_intx_en4_0_cn56xx cn56xxp1;
-       struct cvmx_ciu_intx_en4_0_cn58xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_56_63:8;
-               uint64_t timer:4;
-               uint64_t key_zero:1;
-               uint64_t ipd_drp:1;
-               uint64_t gmx_drp:2;
-               uint64_t trace:1;
-               uint64_t rml:1;
-               uint64_t twsi:1;
-               uint64_t reserved_44_44:1;
-               uint64_t pci_msi:4;
-               uint64_t pci_int:4;
-               uint64_t uart:2;
-               uint64_t mbox:2;
-               uint64_t gpio:16;
-               uint64_t workq:16;
-#else
-               uint64_t workq:16;
-               uint64_t gpio:16;
-               uint64_t mbox:2;
-               uint64_t uart:2;
-               uint64_t pci_int:4;
-               uint64_t pci_msi:4;
-               uint64_t reserved_44_44:1;
-               uint64_t twsi:1;
-               uint64_t rml:1;
-               uint64_t trace:1;
-               uint64_t gmx_drp:2;
-               uint64_t ipd_drp:1;
-               uint64_t key_zero:1;
-               uint64_t timer:4;
-               uint64_t reserved_56_63:8;
-#endif
-       } cn58xx;
-       struct cvmx_ciu_intx_en4_0_cn58xx cn58xxp1;
-       struct cvmx_ciu_intx_en4_0_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t bootdma:1;
-               uint64_t mii:1;
-               uint64_t ipdppthr:1;
-               uint64_t powiq:1;
-               uint64_t twsi2:1;
-               uint64_t mpi:1;
-               uint64_t pcm:1;
-               uint64_t usb:1;
-               uint64_t timer:4;
-               uint64_t reserved_51_51:1;
-               uint64_t ipd_drp:1;
-               uint64_t gmx_drp:2;
-               uint64_t trace:1;
-               uint64_t rml:1;
-               uint64_t twsi:1;
-               uint64_t reserved_44_44:1;
-               uint64_t pci_msi:4;
-               uint64_t pci_int:4;
-               uint64_t uart:2;
-               uint64_t mbox:2;
-               uint64_t gpio:16;
-               uint64_t workq:16;
-#else
-               uint64_t workq:16;
-               uint64_t gpio:16;
-               uint64_t mbox:2;
-               uint64_t uart:2;
-               uint64_t pci_int:4;
-               uint64_t pci_msi:4;
-               uint64_t reserved_44_44:1;
-               uint64_t twsi:1;
-               uint64_t rml:1;
-               uint64_t trace:1;
-               uint64_t gmx_drp:2;
-               uint64_t ipd_drp:1;
-               uint64_t reserved_51_51:1;
-               uint64_t timer:4;
-               uint64_t usb:1;
-               uint64_t pcm:1;
-               uint64_t mpi:1;
-               uint64_t twsi2:1;
-               uint64_t powiq:1;
-               uint64_t ipdppthr:1;
-               uint64_t mii:1;
-               uint64_t bootdma:1;
-#endif
-       } cn61xx;
-       struct cvmx_ciu_intx_en4_0_cn52xx cn63xx;
-       struct cvmx_ciu_intx_en4_0_cn52xx cn63xxp1;
-       struct cvmx_ciu_intx_en4_0_cn66xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t bootdma:1;
-               uint64_t mii:1;
-               uint64_t ipdppthr:1;
-               uint64_t powiq:1;
-               uint64_t twsi2:1;
-               uint64_t mpi:1;
-               uint64_t reserved_57_57:1;
-               uint64_t usb:1;
-               uint64_t timer:4;
-               uint64_t reserved_51_51:1;
-               uint64_t ipd_drp:1;
-               uint64_t gmx_drp:2;
-               uint64_t trace:1;
-               uint64_t rml:1;
-               uint64_t twsi:1;
-               uint64_t reserved_44_44:1;
-               uint64_t pci_msi:4;
-               uint64_t pci_int:4;
-               uint64_t uart:2;
-               uint64_t mbox:2;
-               uint64_t gpio:16;
-               uint64_t workq:16;
-#else
-               uint64_t workq:16;
-               uint64_t gpio:16;
-               uint64_t mbox:2;
-               uint64_t uart:2;
-               uint64_t pci_int:4;
-               uint64_t pci_msi:4;
-               uint64_t reserved_44_44:1;
-               uint64_t twsi:1;
-               uint64_t rml:1;
-               uint64_t trace:1;
-               uint64_t gmx_drp:2;
-               uint64_t ipd_drp:1;
-               uint64_t reserved_51_51:1;
-               uint64_t timer:4;
-               uint64_t usb:1;
-               uint64_t reserved_57_57:1;
-               uint64_t mpi:1;
-               uint64_t twsi2:1;
-               uint64_t powiq:1;
-               uint64_t ipdppthr:1;
-               uint64_t mii:1;
-               uint64_t bootdma:1;
-#endif
-       } cn66xx;
-       struct cvmx_ciu_intx_en4_0_cnf71xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t bootdma:1;
-               uint64_t reserved_62_62:1;
-               uint64_t ipdppthr:1;
-               uint64_t powiq:1;
-               uint64_t twsi2:1;
-               uint64_t mpi:1;
-               uint64_t pcm:1;
-               uint64_t usb:1;
-               uint64_t timer:4;
-               uint64_t reserved_51_51:1;
-               uint64_t ipd_drp:1;
-               uint64_t reserved_49_49:1;
-               uint64_t gmx_drp:1;
-               uint64_t trace:1;
-               uint64_t rml:1;
-               uint64_t twsi:1;
-               uint64_t reserved_44_44:1;
-               uint64_t pci_msi:4;
-               uint64_t pci_int:4;
-               uint64_t uart:2;
-               uint64_t mbox:2;
-               uint64_t gpio:16;
-               uint64_t workq:16;
-#else
-               uint64_t workq:16;
-               uint64_t gpio:16;
-               uint64_t mbox:2;
-               uint64_t uart:2;
-               uint64_t pci_int:4;
-               uint64_t pci_msi:4;
-               uint64_t reserved_44_44:1;
-               uint64_t twsi:1;
-               uint64_t rml:1;
-               uint64_t trace:1;
-               uint64_t gmx_drp:1;
-               uint64_t reserved_49_49:1;
-               uint64_t ipd_drp:1;
-               uint64_t reserved_51_51:1;
-               uint64_t timer:4;
-               uint64_t usb:1;
-               uint64_t pcm:1;
-               uint64_t mpi:1;
-               uint64_t twsi2:1;
-               uint64_t powiq:1;
-               uint64_t ipdppthr:1;
-               uint64_t reserved_62_62:1;
-               uint64_t bootdma:1;
-#endif
-       } cnf71xx;
-};
-
-union cvmx_ciu_intx_en4_0_w1c {
-       uint64_t u64;
-       struct cvmx_ciu_intx_en4_0_w1c_s {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t bootdma:1;
-               uint64_t mii:1;
-               uint64_t ipdppthr:1;
-               uint64_t powiq:1;
-               uint64_t twsi2:1;
-               uint64_t mpi:1;
-               uint64_t pcm:1;
-               uint64_t usb:1;
-               uint64_t timer:4;
-               uint64_t key_zero:1;
-               uint64_t ipd_drp:1;
-               uint64_t gmx_drp:2;
-               uint64_t trace:1;
-               uint64_t rml:1;
-               uint64_t twsi:1;
-               uint64_t reserved_44_44:1;
-               uint64_t pci_msi:4;
-               uint64_t pci_int:4;
-               uint64_t uart:2;
-               uint64_t mbox:2;
-               uint64_t gpio:16;
-               uint64_t workq:16;
-#else
-               uint64_t workq:16;
-               uint64_t gpio:16;
-               uint64_t mbox:2;
-               uint64_t uart:2;
-               uint64_t pci_int:4;
-               uint64_t pci_msi:4;
-               uint64_t reserved_44_44:1;
-               uint64_t twsi:1;
-               uint64_t rml:1;
-               uint64_t trace:1;
-               uint64_t gmx_drp:2;
-               uint64_t ipd_drp:1;
-               uint64_t key_zero:1;
-               uint64_t timer:4;
-               uint64_t usb:1;
-               uint64_t pcm:1;
-               uint64_t mpi:1;
-               uint64_t twsi2:1;
-               uint64_t powiq:1;
-               uint64_t ipdppthr:1;
-               uint64_t mii:1;
-               uint64_t bootdma:1;
-#endif
-       } s;
-       struct cvmx_ciu_intx_en4_0_w1c_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t bootdma:1;
-               uint64_t mii:1;
-               uint64_t ipdppthr:1;
-               uint64_t powiq:1;
-               uint64_t twsi2:1;
-               uint64_t reserved_57_58:2;
-               uint64_t usb:1;
-               uint64_t timer:4;
-               uint64_t reserved_51_51:1;
-               uint64_t ipd_drp:1;
-               uint64_t reserved_49_49:1;
-               uint64_t gmx_drp:1;
-               uint64_t trace:1;
-               uint64_t rml:1;
-               uint64_t twsi:1;
-               uint64_t reserved_44_44:1;
-               uint64_t pci_msi:4;
-               uint64_t pci_int:4;
-               uint64_t uart:2;
-               uint64_t mbox:2;
-               uint64_t gpio:16;
-               uint64_t workq:16;
-#else
-               uint64_t workq:16;
-               uint64_t gpio:16;
-               uint64_t mbox:2;
-               uint64_t uart:2;
-               uint64_t pci_int:4;
-               uint64_t pci_msi:4;
-               uint64_t reserved_44_44:1;
-               uint64_t twsi:1;
-               uint64_t rml:1;
-               uint64_t trace:1;
-               uint64_t gmx_drp:1;
-               uint64_t reserved_49_49:1;
-               uint64_t ipd_drp:1;
-               uint64_t reserved_51_51:1;
-               uint64_t timer:4;
-               uint64_t usb:1;
-               uint64_t reserved_57_58:2;
-               uint64_t twsi2:1;
-               uint64_t powiq:1;
-               uint64_t ipdppthr:1;
-               uint64_t mii:1;
-               uint64_t bootdma:1;
-#endif
-       } cn52xx;
-       struct cvmx_ciu_intx_en4_0_w1c_cn56xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t bootdma:1;
-               uint64_t mii:1;
-               uint64_t ipdppthr:1;
-               uint64_t powiq:1;
-               uint64_t twsi2:1;
-               uint64_t reserved_57_58:2;
-               uint64_t usb:1;
-               uint64_t timer:4;
-               uint64_t key_zero:1;
-               uint64_t ipd_drp:1;
-               uint64_t gmx_drp:2;
-               uint64_t trace:1;
-               uint64_t rml:1;
-               uint64_t twsi:1;
-               uint64_t reserved_44_44:1;
-               uint64_t pci_msi:4;
-               uint64_t pci_int:4;
-               uint64_t uart:2;
-               uint64_t mbox:2;
-               uint64_t gpio:16;
-               uint64_t workq:16;
-#else
-               uint64_t workq:16;
-               uint64_t gpio:16;
-               uint64_t mbox:2;
-               uint64_t uart:2;
-               uint64_t pci_int:4;
-               uint64_t pci_msi:4;
-               uint64_t reserved_44_44:1;
-               uint64_t twsi:1;
-               uint64_t rml:1;
-               uint64_t trace:1;
-               uint64_t gmx_drp:2;
-               uint64_t ipd_drp:1;
-               uint64_t key_zero:1;
-               uint64_t timer:4;
-               uint64_t usb:1;
-               uint64_t reserved_57_58:2;
-               uint64_t twsi2:1;
-               uint64_t powiq:1;
-               uint64_t ipdppthr:1;
-               uint64_t mii:1;
-               uint64_t bootdma:1;
-#endif
-       } cn56xx;
-       struct cvmx_ciu_intx_en4_0_w1c_cn58xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_56_63:8;
-               uint64_t timer:4;
-               uint64_t key_zero:1;
-               uint64_t ipd_drp:1;
-               uint64_t gmx_drp:2;
-               uint64_t trace:1;
-               uint64_t rml:1;
-               uint64_t twsi:1;
-               uint64_t reserved_44_44:1;
-               uint64_t pci_msi:4;
-               uint64_t pci_int:4;
-               uint64_t uart:2;
-               uint64_t mbox:2;
-               uint64_t gpio:16;
-               uint64_t workq:16;
-#else
-               uint64_t workq:16;
-               uint64_t gpio:16;
-               uint64_t mbox:2;
-               uint64_t uart:2;
-               uint64_t pci_int:4;
-               uint64_t pci_msi:4;
-               uint64_t reserved_44_44:1;
-               uint64_t twsi:1;
-               uint64_t rml:1;
-               uint64_t trace:1;
-               uint64_t gmx_drp:2;
-               uint64_t ipd_drp:1;
-               uint64_t key_zero:1;
-               uint64_t timer:4;
-               uint64_t reserved_56_63:8;
-#endif
-       } cn58xx;
-       struct cvmx_ciu_intx_en4_0_w1c_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t bootdma:1;
-               uint64_t mii:1;
-               uint64_t ipdppthr:1;
-               uint64_t powiq:1;
-               uint64_t twsi2:1;
-               uint64_t mpi:1;
-               uint64_t pcm:1;
-               uint64_t usb:1;
-               uint64_t timer:4;
-               uint64_t reserved_51_51:1;
-               uint64_t ipd_drp:1;
-               uint64_t gmx_drp:2;
-               uint64_t trace:1;
-               uint64_t rml:1;
-               uint64_t twsi:1;
-               uint64_t reserved_44_44:1;
-               uint64_t pci_msi:4;
-               uint64_t pci_int:4;
-               uint64_t uart:2;
-               uint64_t mbox:2;
-               uint64_t gpio:16;
-               uint64_t workq:16;
-#else
-               uint64_t workq:16;
-               uint64_t gpio:16;
-               uint64_t mbox:2;
-               uint64_t uart:2;
-               uint64_t pci_int:4;
-               uint64_t pci_msi:4;
-               uint64_t reserved_44_44:1;
-               uint64_t twsi:1;
-               uint64_t rml:1;
-               uint64_t trace:1;
-               uint64_t gmx_drp:2;
-               uint64_t ipd_drp:1;
-               uint64_t reserved_51_51:1;
-               uint64_t timer:4;
-               uint64_t usb:1;
-               uint64_t pcm:1;
-               uint64_t mpi:1;
-               uint64_t twsi2:1;
-               uint64_t powiq:1;
-               uint64_t ipdppthr:1;
-               uint64_t mii:1;
-               uint64_t bootdma:1;
-#endif
-       } cn61xx;
-       struct cvmx_ciu_intx_en4_0_w1c_cn52xx cn63xx;
-       struct cvmx_ciu_intx_en4_0_w1c_cn52xx cn63xxp1;
-       struct cvmx_ciu_intx_en4_0_w1c_cn66xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t bootdma:1;
-               uint64_t mii:1;
-               uint64_t ipdppthr:1;
-               uint64_t powiq:1;
-               uint64_t twsi2:1;
-               uint64_t mpi:1;
-               uint64_t reserved_57_57:1;
-               uint64_t usb:1;
-               uint64_t timer:4;
-               uint64_t reserved_51_51:1;
-               uint64_t ipd_drp:1;
-               uint64_t gmx_drp:2;
-               uint64_t trace:1;
-               uint64_t rml:1;
-               uint64_t twsi:1;
-               uint64_t reserved_44_44:1;
-               uint64_t pci_msi:4;
-               uint64_t pci_int:4;
-               uint64_t uart:2;
-               uint64_t mbox:2;
-               uint64_t gpio:16;
-               uint64_t workq:16;
-#else
-               uint64_t workq:16;
-               uint64_t gpio:16;
-               uint64_t mbox:2;
-               uint64_t uart:2;
-               uint64_t pci_int:4;
-               uint64_t pci_msi:4;
-               uint64_t reserved_44_44:1;
-               uint64_t twsi:1;
-               uint64_t rml:1;
-               uint64_t trace:1;
-               uint64_t gmx_drp:2;
-               uint64_t ipd_drp:1;
-               uint64_t reserved_51_51:1;
-               uint64_t timer:4;
-               uint64_t usb:1;
-               uint64_t reserved_57_57:1;
-               uint64_t mpi:1;
-               uint64_t twsi2:1;
-               uint64_t powiq:1;
-               uint64_t ipdppthr:1;
-               uint64_t mii:1;
-               uint64_t bootdma:1;
-#endif
-       } cn66xx;
-       struct cvmx_ciu_intx_en4_0_w1c_cnf71xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t bootdma:1;
-               uint64_t reserved_62_62:1;
-               uint64_t ipdppthr:1;
-               uint64_t powiq:1;
-               uint64_t twsi2:1;
-               uint64_t mpi:1;
-               uint64_t pcm:1;
-               uint64_t usb:1;
-               uint64_t timer:4;
-               uint64_t reserved_51_51:1;
-               uint64_t ipd_drp:1;
-               uint64_t reserved_49_49:1;
-               uint64_t gmx_drp:1;
-               uint64_t trace:1;
-               uint64_t rml:1;
-               uint64_t twsi:1;
-               uint64_t reserved_44_44:1;
-               uint64_t pci_msi:4;
-               uint64_t pci_int:4;
-               uint64_t uart:2;
-               uint64_t mbox:2;
-               uint64_t gpio:16;
-               uint64_t workq:16;
-#else
-               uint64_t workq:16;
-               uint64_t gpio:16;
-               uint64_t mbox:2;
-               uint64_t uart:2;
-               uint64_t pci_int:4;
-               uint64_t pci_msi:4;
-               uint64_t reserved_44_44:1;
-               uint64_t twsi:1;
-               uint64_t rml:1;
-               uint64_t trace:1;
-               uint64_t gmx_drp:1;
-               uint64_t reserved_49_49:1;
-               uint64_t ipd_drp:1;
-               uint64_t reserved_51_51:1;
-               uint64_t timer:4;
-               uint64_t usb:1;
-               uint64_t pcm:1;
-               uint64_t mpi:1;
-               uint64_t twsi2:1;
-               uint64_t powiq:1;
-               uint64_t ipdppthr:1;
-               uint64_t reserved_62_62:1;
-               uint64_t bootdma:1;
-#endif
-       } cnf71xx;
-};
-
-union cvmx_ciu_intx_en4_0_w1s {
-       uint64_t u64;
-       struct cvmx_ciu_intx_en4_0_w1s_s {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t bootdma:1;
-               uint64_t mii:1;
-               uint64_t ipdppthr:1;
-               uint64_t powiq:1;
-               uint64_t twsi2:1;
-               uint64_t mpi:1;
-               uint64_t pcm:1;
-               uint64_t usb:1;
-               uint64_t timer:4;
-               uint64_t key_zero:1;
-               uint64_t ipd_drp:1;
-               uint64_t gmx_drp:2;
-               uint64_t trace:1;
-               uint64_t rml:1;
-               uint64_t twsi:1;
-               uint64_t reserved_44_44:1;
-               uint64_t pci_msi:4;
-               uint64_t pci_int:4;
-               uint64_t uart:2;
-               uint64_t mbox:2;
-               uint64_t gpio:16;
-               uint64_t workq:16;
-#else
-               uint64_t workq:16;
-               uint64_t gpio:16;
-               uint64_t mbox:2;
-               uint64_t uart:2;
-               uint64_t pci_int:4;
-               uint64_t pci_msi:4;
-               uint64_t reserved_44_44:1;
-               uint64_t twsi:1;
-               uint64_t rml:1;
-               uint64_t trace:1;
-               uint64_t gmx_drp:2;
-               uint64_t ipd_drp:1;
-               uint64_t key_zero:1;
-               uint64_t timer:4;
-               uint64_t usb:1;
-               uint64_t pcm:1;
-               uint64_t mpi:1;
-               uint64_t twsi2:1;
-               uint64_t powiq:1;
-               uint64_t ipdppthr:1;
-               uint64_t mii:1;
-               uint64_t bootdma:1;
-#endif
-       } s;
-       struct cvmx_ciu_intx_en4_0_w1s_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t bootdma:1;
-               uint64_t mii:1;
-               uint64_t ipdppthr:1;
-               uint64_t powiq:1;
-               uint64_t twsi2:1;
-               uint64_t reserved_57_58:2;
-               uint64_t usb:1;
-               uint64_t timer:4;
-               uint64_t reserved_51_51:1;
-               uint64_t ipd_drp:1;
-               uint64_t reserved_49_49:1;
-               uint64_t gmx_drp:1;
-               uint64_t trace:1;
-               uint64_t rml:1;
-               uint64_t twsi:1;
-               uint64_t reserved_44_44:1;
-               uint64_t pci_msi:4;
-               uint64_t pci_int:4;
-               uint64_t uart:2;
-               uint64_t mbox:2;
-               uint64_t gpio:16;
-               uint64_t workq:16;
-#else
-               uint64_t workq:16;
-               uint64_t gpio:16;
-               uint64_t mbox:2;
-               uint64_t uart:2;
-               uint64_t pci_int:4;
-               uint64_t pci_msi:4;
-               uint64_t reserved_44_44:1;
-               uint64_t twsi:1;
-               uint64_t rml:1;
-               uint64_t trace:1;
-               uint64_t gmx_drp:1;
-               uint64_t reserved_49_49:1;
-               uint64_t ipd_drp:1;
-               uint64_t reserved_51_51:1;
-               uint64_t timer:4;
-               uint64_t usb:1;
-               uint64_t reserved_57_58:2;
-               uint64_t twsi2:1;
-               uint64_t powiq:1;
-               uint64_t ipdppthr:1;
-               uint64_t mii:1;
-               uint64_t bootdma:1;
-#endif
-       } cn52xx;
-       struct cvmx_ciu_intx_en4_0_w1s_cn56xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t bootdma:1;
-               uint64_t mii:1;
-               uint64_t ipdppthr:1;
-               uint64_t powiq:1;
-               uint64_t twsi2:1;
-               uint64_t reserved_57_58:2;
-               uint64_t usb:1;
-               uint64_t timer:4;
-               uint64_t key_zero:1;
-               uint64_t ipd_drp:1;
-               uint64_t gmx_drp:2;
-               uint64_t trace:1;
-               uint64_t rml:1;
-               uint64_t twsi:1;
-               uint64_t reserved_44_44:1;
-               uint64_t pci_msi:4;
-               uint64_t pci_int:4;
-               uint64_t uart:2;
-               uint64_t mbox:2;
-               uint64_t gpio:16;
-               uint64_t workq:16;
-#else
-               uint64_t workq:16;
-               uint64_t gpio:16;
-               uint64_t mbox:2;
-               uint64_t uart:2;
-               uint64_t pci_int:4;
-               uint64_t pci_msi:4;
-               uint64_t reserved_44_44:1;
-               uint64_t twsi:1;
-               uint64_t rml:1;
-               uint64_t trace:1;
-               uint64_t gmx_drp:2;
-               uint64_t ipd_drp:1;
-               uint64_t key_zero:1;
-               uint64_t timer:4;
-               uint64_t usb:1;
-               uint64_t reserved_57_58:2;
-               uint64_t twsi2:1;
-               uint64_t powiq:1;
-               uint64_t ipdppthr:1;
-               uint64_t mii:1;
-               uint64_t bootdma:1;
-#endif
-       } cn56xx;
-       struct cvmx_ciu_intx_en4_0_w1s_cn58xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_56_63:8;
-               uint64_t timer:4;
-               uint64_t key_zero:1;
-               uint64_t ipd_drp:1;
-               uint64_t gmx_drp:2;
-               uint64_t trace:1;
-               uint64_t rml:1;
-               uint64_t twsi:1;
-               uint64_t reserved_44_44:1;
-               uint64_t pci_msi:4;
-               uint64_t pci_int:4;
-               uint64_t uart:2;
-               uint64_t mbox:2;
-               uint64_t gpio:16;
-               uint64_t workq:16;
-#else
-               uint64_t workq:16;
-               uint64_t gpio:16;
-               uint64_t mbox:2;
-               uint64_t uart:2;
-               uint64_t pci_int:4;
-               uint64_t pci_msi:4;
-               uint64_t reserved_44_44:1;
-               uint64_t twsi:1;
-               uint64_t rml:1;
-               uint64_t trace:1;
-               uint64_t gmx_drp:2;
-               uint64_t ipd_drp:1;
-               uint64_t key_zero:1;
-               uint64_t timer:4;
-               uint64_t reserved_56_63:8;
-#endif
-       } cn58xx;
-       struct cvmx_ciu_intx_en4_0_w1s_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t bootdma:1;
-               uint64_t mii:1;
-               uint64_t ipdppthr:1;
-               uint64_t powiq:1;
-               uint64_t twsi2:1;
-               uint64_t mpi:1;
-               uint64_t pcm:1;
-               uint64_t usb:1;
-               uint64_t timer:4;
-               uint64_t reserved_51_51:1;
-               uint64_t ipd_drp:1;
-               uint64_t gmx_drp:2;
-               uint64_t trace:1;
-               uint64_t rml:1;
-               uint64_t twsi:1;
-               uint64_t reserved_44_44:1;
-               uint64_t pci_msi:4;
-               uint64_t pci_int:4;
-               uint64_t uart:2;
-               uint64_t mbox:2;
-               uint64_t gpio:16;
-               uint64_t workq:16;
-#else
-               uint64_t workq:16;
-               uint64_t gpio:16;
-               uint64_t mbox:2;
-               uint64_t uart:2;
-               uint64_t pci_int:4;
-               uint64_t pci_msi:4;
-               uint64_t reserved_44_44:1;
-               uint64_t twsi:1;
-               uint64_t rml:1;
-               uint64_t trace:1;
-               uint64_t gmx_drp:2;
-               uint64_t ipd_drp:1;
-               uint64_t reserved_51_51:1;
-               uint64_t timer:4;
-               uint64_t usb:1;
-               uint64_t pcm:1;
-               uint64_t mpi:1;
-               uint64_t twsi2:1;
-               uint64_t powiq:1;
-               uint64_t ipdppthr:1;
-               uint64_t mii:1;
-               uint64_t bootdma:1;
-#endif
-       } cn61xx;
-       struct cvmx_ciu_intx_en4_0_w1s_cn52xx cn63xx;
-       struct cvmx_ciu_intx_en4_0_w1s_cn52xx cn63xxp1;
-       struct cvmx_ciu_intx_en4_0_w1s_cn66xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t bootdma:1;
-               uint64_t mii:1;
-               uint64_t ipdppthr:1;
-               uint64_t powiq:1;
-               uint64_t twsi2:1;
-               uint64_t mpi:1;
-               uint64_t reserved_57_57:1;
-               uint64_t usb:1;
-               uint64_t timer:4;
-               uint64_t reserved_51_51:1;
-               uint64_t ipd_drp:1;
-               uint64_t gmx_drp:2;
-               uint64_t trace:1;
-               uint64_t rml:1;
-               uint64_t twsi:1;
-               uint64_t reserved_44_44:1;
-               uint64_t pci_msi:4;
-               uint64_t pci_int:4;
-               uint64_t uart:2;
-               uint64_t mbox:2;
-               uint64_t gpio:16;
-               uint64_t workq:16;
-#else
-               uint64_t workq:16;
-               uint64_t gpio:16;
-               uint64_t mbox:2;
-               uint64_t uart:2;
-               uint64_t pci_int:4;
-               uint64_t pci_msi:4;
-               uint64_t reserved_44_44:1;
-               uint64_t twsi:1;
-               uint64_t rml:1;
-               uint64_t trace:1;
-               uint64_t gmx_drp:2;
-               uint64_t ipd_drp:1;
-               uint64_t reserved_51_51:1;
-               uint64_t timer:4;
-               uint64_t usb:1;
-               uint64_t reserved_57_57:1;
-               uint64_t mpi:1;
-               uint64_t twsi2:1;
-               uint64_t powiq:1;
-               uint64_t ipdppthr:1;
-               uint64_t mii:1;
-               uint64_t bootdma:1;
-#endif
-       } cn66xx;
-       struct cvmx_ciu_intx_en4_0_w1s_cnf71xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t bootdma:1;
-               uint64_t reserved_62_62:1;
-               uint64_t ipdppthr:1;
-               uint64_t powiq:1;
-               uint64_t twsi2:1;
-               uint64_t mpi:1;
-               uint64_t pcm:1;
-               uint64_t usb:1;
-               uint64_t timer:4;
-               uint64_t reserved_51_51:1;
-               uint64_t ipd_drp:1;
-               uint64_t reserved_49_49:1;
-               uint64_t gmx_drp:1;
-               uint64_t trace:1;
-               uint64_t rml:1;
-               uint64_t twsi:1;
-               uint64_t reserved_44_44:1;
-               uint64_t pci_msi:4;
-               uint64_t pci_int:4;
-               uint64_t uart:2;
-               uint64_t mbox:2;
-               uint64_t gpio:16;
-               uint64_t workq:16;
-#else
-               uint64_t workq:16;
-               uint64_t gpio:16;
-               uint64_t mbox:2;
-               uint64_t uart:2;
-               uint64_t pci_int:4;
-               uint64_t pci_msi:4;
-               uint64_t reserved_44_44:1;
-               uint64_t twsi:1;
-               uint64_t rml:1;
-               uint64_t trace:1;
-               uint64_t gmx_drp:1;
-               uint64_t reserved_49_49:1;
-               uint64_t ipd_drp:1;
-               uint64_t reserved_51_51:1;
-               uint64_t timer:4;
-               uint64_t usb:1;
-               uint64_t pcm:1;
-               uint64_t mpi:1;
-               uint64_t twsi2:1;
-               uint64_t powiq:1;
-               uint64_t ipdppthr:1;
-               uint64_t reserved_62_62:1;
-               uint64_t bootdma:1;
-#endif
-       } cnf71xx;
-};
-
-union cvmx_ciu_intx_en4_1 {
-       uint64_t u64;
-       struct cvmx_ciu_intx_en4_1_s {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t rst:1;
-               uint64_t reserved_62_62:1;
-               uint64_t srio3:1;
-               uint64_t srio2:1;
-               uint64_t reserved_57_59:3;
-               uint64_t dfm:1;
-               uint64_t reserved_53_55:3;
-               uint64_t lmc0:1;
-               uint64_t srio1:1;
-               uint64_t srio0:1;
-               uint64_t pem1:1;
-               uint64_t pem0:1;
-               uint64_t ptp:1;
-               uint64_t agl:1;
-               uint64_t reserved_41_45:5;
-               uint64_t dpi_dma:1;
-               uint64_t reserved_38_39:2;
-               uint64_t agx1:1;
-               uint64_t agx0:1;
-               uint64_t dpi:1;
-               uint64_t sli:1;
-               uint64_t usb:1;
-               uint64_t dfa:1;
-               uint64_t key:1;
-               uint64_t rad:1;
-               uint64_t tim:1;
-               uint64_t zip:1;
-               uint64_t pko:1;
-               uint64_t pip:1;
-               uint64_t ipd:1;
-               uint64_t l2c:1;
-               uint64_t pow:1;
-               uint64_t fpa:1;
-               uint64_t iob:1;
-               uint64_t mio:1;
-               uint64_t nand:1;
-               uint64_t mii1:1;
-               uint64_t usb1:1;
-               uint64_t uart2:1;
-               uint64_t wdog:16;
-#else
-               uint64_t wdog:16;
-               uint64_t uart2:1;
-               uint64_t usb1:1;
-               uint64_t mii1:1;
-               uint64_t nand:1;
-               uint64_t mio:1;
-               uint64_t iob:1;
-               uint64_t fpa:1;
-               uint64_t pow:1;
-               uint64_t l2c:1;
-               uint64_t ipd:1;
-               uint64_t pip:1;
-               uint64_t pko:1;
-               uint64_t zip:1;
-               uint64_t tim:1;
-               uint64_t rad:1;
-               uint64_t key:1;
-               uint64_t dfa:1;
-               uint64_t usb:1;
-               uint64_t sli:1;
-               uint64_t dpi:1;
-               uint64_t agx0:1;
-               uint64_t agx1:1;
-               uint64_t reserved_38_39:2;
-               uint64_t dpi_dma:1;
-               uint64_t reserved_41_45:5;
-               uint64_t agl:1;
-               uint64_t ptp:1;
-               uint64_t pem0:1;
-               uint64_t pem1:1;
-               uint64_t srio0:1;
-               uint64_t srio1:1;
-               uint64_t lmc0:1;
-               uint64_t reserved_53_55:3;
-               uint64_t dfm:1;
-               uint64_t reserved_57_59:3;
-               uint64_t srio2:1;
-               uint64_t srio3:1;
-               uint64_t reserved_62_62:1;
-               uint64_t rst:1;
-#endif
-       } s;
-       struct cvmx_ciu_intx_en4_1_cn50xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_2_63:62;
-               uint64_t wdog:2;
-#else
-               uint64_t wdog:2;
-               uint64_t reserved_2_63:62;
-#endif
-       } cn50xx;
-       struct cvmx_ciu_intx_en4_1_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_20_63:44;
-               uint64_t nand:1;
-               uint64_t mii1:1;
-               uint64_t usb1:1;
-               uint64_t uart2:1;
-               uint64_t reserved_4_15:12;
-               uint64_t wdog:4;
-#else
-               uint64_t wdog:4;
-               uint64_t reserved_4_15:12;
-               uint64_t uart2:1;
-               uint64_t usb1:1;
-               uint64_t mii1:1;
-               uint64_t nand:1;
-               uint64_t reserved_20_63:44;
-#endif
-       } cn52xx;
-       struct cvmx_ciu_intx_en4_1_cn52xxp1 {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_19_63:45;
-               uint64_t mii1:1;
-               uint64_t usb1:1;
-               uint64_t uart2:1;
-               uint64_t reserved_4_15:12;
-               uint64_t wdog:4;
-#else
-               uint64_t wdog:4;
-               uint64_t reserved_4_15:12;
-               uint64_t uart2:1;
-               uint64_t usb1:1;
-               uint64_t mii1:1;
-               uint64_t reserved_19_63:45;
-#endif
-       } cn52xxp1;
-       struct cvmx_ciu_intx_en4_1_cn56xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_12_63:52;
-               uint64_t wdog:12;
-#else
-               uint64_t wdog:12;
-               uint64_t reserved_12_63:52;
-#endif
-       } cn56xx;
-       struct cvmx_ciu_intx_en4_1_cn56xx cn56xxp1;
-       struct cvmx_ciu_intx_en4_1_cn58xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_16_63:48;
-               uint64_t wdog:16;
-#else
-               uint64_t wdog:16;
-               uint64_t reserved_16_63:48;
-#endif
-       } cn58xx;
-       struct cvmx_ciu_intx_en4_1_cn58xx cn58xxp1;
-       struct cvmx_ciu_intx_en4_1_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t rst:1;
-               uint64_t reserved_53_62:10;
-               uint64_t lmc0:1;
-               uint64_t reserved_50_51:2;
-               uint64_t pem1:1;
-               uint64_t pem0:1;
-               uint64_t ptp:1;
-               uint64_t agl:1;
-               uint64_t reserved_41_45:5;
-               uint64_t dpi_dma:1;
-               uint64_t reserved_38_39:2;
-               uint64_t agx1:1;
-               uint64_t agx0:1;
-               uint64_t dpi:1;
-               uint64_t sli:1;
-               uint64_t usb:1;
-               uint64_t dfa:1;
-               uint64_t key:1;
-               uint64_t rad:1;
-               uint64_t tim:1;
-               uint64_t zip:1;
-               uint64_t pko:1;
-               uint64_t pip:1;
-               uint64_t ipd:1;
-               uint64_t l2c:1;
-               uint64_t pow:1;
-               uint64_t fpa:1;
-               uint64_t iob:1;
-               uint64_t mio:1;
-               uint64_t nand:1;
-               uint64_t mii1:1;
-               uint64_t reserved_4_17:14;
-               uint64_t wdog:4;
-#else
-               uint64_t wdog:4;
-               uint64_t reserved_4_17:14;
-               uint64_t mii1:1;
-               uint64_t nand:1;
-               uint64_t mio:1;
-               uint64_t iob:1;
-               uint64_t fpa:1;
-               uint64_t pow:1;
-               uint64_t l2c:1;
-               uint64_t ipd:1;
-               uint64_t pip:1;
-               uint64_t pko:1;
-               uint64_t zip:1;
-               uint64_t tim:1;
-               uint64_t rad:1;
-               uint64_t key:1;
-               uint64_t dfa:1;
-               uint64_t usb:1;
-               uint64_t sli:1;
-               uint64_t dpi:1;
-               uint64_t agx0:1;
-               uint64_t agx1:1;
-               uint64_t reserved_38_39:2;
-               uint64_t dpi_dma:1;
-               uint64_t reserved_41_45:5;
-               uint64_t agl:1;
-               uint64_t ptp:1;
-               uint64_t pem0:1;
-               uint64_t pem1:1;
-               uint64_t reserved_50_51:2;
-               uint64_t lmc0:1;
-               uint64_t reserved_53_62:10;
-               uint64_t rst:1;
-#endif
-       } cn61xx;
-       struct cvmx_ciu_intx_en4_1_cn63xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t rst:1;
-               uint64_t reserved_57_62:6;
-               uint64_t dfm:1;
-               uint64_t reserved_53_55:3;
-               uint64_t lmc0:1;
-               uint64_t srio1:1;
-               uint64_t srio0:1;
-               uint64_t pem1:1;
-               uint64_t pem0:1;
-               uint64_t ptp:1;
-               uint64_t agl:1;
-               uint64_t reserved_37_45:9;
-               uint64_t agx0:1;
-               uint64_t dpi:1;
-               uint64_t sli:1;
-               uint64_t usb:1;
-               uint64_t dfa:1;
-               uint64_t key:1;
-               uint64_t rad:1;
-               uint64_t tim:1;
-               uint64_t zip:1;
-               uint64_t pko:1;
-               uint64_t pip:1;
-               uint64_t ipd:1;
-               uint64_t l2c:1;
-               uint64_t pow:1;
-               uint64_t fpa:1;
-               uint64_t iob:1;
-               uint64_t mio:1;
-               uint64_t nand:1;
-               uint64_t mii1:1;
-               uint64_t reserved_6_17:12;
-               uint64_t wdog:6;
-#else
-               uint64_t wdog:6;
-               uint64_t reserved_6_17:12;
-               uint64_t mii1:1;
-               uint64_t nand:1;
-               uint64_t mio:1;
-               uint64_t iob:1;
-               uint64_t fpa:1;
-               uint64_t pow:1;
-               uint64_t l2c:1;
-               uint64_t ipd:1;
-               uint64_t pip:1;
-               uint64_t pko:1;
-               uint64_t zip:1;
-               uint64_t tim:1;
-               uint64_t rad:1;
-               uint64_t key:1;
-               uint64_t dfa:1;
-               uint64_t usb:1;
-               uint64_t sli:1;
-               uint64_t dpi:1;
-               uint64_t agx0:1;
-               uint64_t reserved_37_45:9;
-               uint64_t agl:1;
-               uint64_t ptp:1;
-               uint64_t pem0:1;
-               uint64_t pem1:1;
-               uint64_t srio0:1;
-               uint64_t srio1:1;
-               uint64_t lmc0:1;
-               uint64_t reserved_53_55:3;
-               uint64_t dfm:1;
-               uint64_t reserved_57_62:6;
-               uint64_t rst:1;
-#endif
-       } cn63xx;
-       struct cvmx_ciu_intx_en4_1_cn63xx cn63xxp1;
-       struct cvmx_ciu_intx_en4_1_cn66xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t rst:1;
-               uint64_t reserved_62_62:1;
-               uint64_t srio3:1;
-               uint64_t srio2:1;
-               uint64_t reserved_57_59:3;
-               uint64_t dfm:1;
-               uint64_t reserved_53_55:3;
-               uint64_t lmc0:1;
-               uint64_t reserved_51_51:1;
-               uint64_t srio0:1;
-               uint64_t pem1:1;
-               uint64_t pem0:1;
-               uint64_t ptp:1;
-               uint64_t agl:1;
-               uint64_t reserved_38_45:8;
-               uint64_t agx1:1;
-               uint64_t agx0:1;
-               uint64_t dpi:1;
-               uint64_t sli:1;
-               uint64_t usb:1;
-               uint64_t dfa:1;
-               uint64_t key:1;
-               uint64_t rad:1;
-               uint64_t tim:1;
-               uint64_t zip:1;
-               uint64_t pko:1;
-               uint64_t pip:1;
-               uint64_t ipd:1;
-               uint64_t l2c:1;
-               uint64_t pow:1;
-               uint64_t fpa:1;
-               uint64_t iob:1;
-               uint64_t mio:1;
-               uint64_t nand:1;
-               uint64_t mii1:1;
-               uint64_t reserved_10_17:8;
-               uint64_t wdog:10;
-#else
-               uint64_t wdog:10;
-               uint64_t reserved_10_17:8;
-               uint64_t mii1:1;
-               uint64_t nand:1;
-               uint64_t mio:1;
-               uint64_t iob:1;
-               uint64_t fpa:1;
-               uint64_t pow:1;
-               uint64_t l2c:1;
-               uint64_t ipd:1;
-               uint64_t pip:1;
-               uint64_t pko:1;
-               uint64_t zip:1;
-               uint64_t tim:1;
-               uint64_t rad:1;
-               uint64_t key:1;
-               uint64_t dfa:1;
-               uint64_t usb:1;
-               uint64_t sli:1;
-               uint64_t dpi:1;
-               uint64_t agx0:1;
-               uint64_t agx1:1;
-               uint64_t reserved_38_45:8;
-               uint64_t agl:1;
-               uint64_t ptp:1;
-               uint64_t pem0:1;
-               uint64_t pem1:1;
-               uint64_t srio0:1;
-               uint64_t reserved_51_51:1;
-               uint64_t lmc0:1;
-               uint64_t reserved_53_55:3;
-               uint64_t dfm:1;
-               uint64_t reserved_57_59:3;
-               uint64_t srio2:1;
-               uint64_t srio3:1;
-               uint64_t reserved_62_62:1;
-               uint64_t rst:1;
-#endif
-       } cn66xx;
-       struct cvmx_ciu_intx_en4_1_cnf71xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t rst:1;
-               uint64_t reserved_53_62:10;
-               uint64_t lmc0:1;
-               uint64_t reserved_50_51:2;
-               uint64_t pem1:1;
-               uint64_t pem0:1;
-               uint64_t ptp:1;
-               uint64_t reserved_41_46:6;
-               uint64_t dpi_dma:1;
-               uint64_t reserved_37_39:3;
-               uint64_t agx0:1;
-               uint64_t dpi:1;
-               uint64_t sli:1;
-               uint64_t usb:1;
-               uint64_t reserved_32_32:1;
-               uint64_t key:1;
-               uint64_t rad:1;
-               uint64_t tim:1;
-               uint64_t reserved_28_28:1;
-               uint64_t pko:1;
-               uint64_t pip:1;
-               uint64_t ipd:1;
-               uint64_t l2c:1;
-               uint64_t pow:1;
-               uint64_t fpa:1;
-               uint64_t iob:1;
-               uint64_t mio:1;
-               uint64_t nand:1;
-               uint64_t reserved_4_18:15;
-               uint64_t wdog:4;
-#else
-               uint64_t wdog:4;
-               uint64_t reserved_4_18:15;
-               uint64_t nand:1;
-               uint64_t mio:1;
-               uint64_t iob:1;
-               uint64_t fpa:1;
-               uint64_t pow:1;
-               uint64_t l2c:1;
-               uint64_t ipd:1;
-               uint64_t pip:1;
-               uint64_t pko:1;
-               uint64_t reserved_28_28:1;
-               uint64_t tim:1;
-               uint64_t rad:1;
-               uint64_t key:1;
-               uint64_t reserved_32_32:1;
-               uint64_t usb:1;
-               uint64_t sli:1;
-               uint64_t dpi:1;
-               uint64_t agx0:1;
-               uint64_t reserved_37_39:3;
-               uint64_t dpi_dma:1;
-               uint64_t reserved_41_46:6;
-               uint64_t ptp:1;
-               uint64_t pem0:1;
-               uint64_t pem1:1;
-               uint64_t reserved_50_51:2;
-               uint64_t lmc0:1;
-               uint64_t reserved_53_62:10;
-               uint64_t rst:1;
-#endif
-       } cnf71xx;
-};
-
-union cvmx_ciu_intx_en4_1_w1c {
-       uint64_t u64;
-       struct cvmx_ciu_intx_en4_1_w1c_s {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t rst:1;
-               uint64_t reserved_62_62:1;
-               uint64_t srio3:1;
-               uint64_t srio2:1;
-               uint64_t reserved_57_59:3;
-               uint64_t dfm:1;
-               uint64_t reserved_53_55:3;
-               uint64_t lmc0:1;
-               uint64_t srio1:1;
-               uint64_t srio0:1;
-               uint64_t pem1:1;
-               uint64_t pem0:1;
-               uint64_t ptp:1;
-               uint64_t agl:1;
-               uint64_t reserved_41_45:5;
-               uint64_t dpi_dma:1;
-               uint64_t reserved_38_39:2;
-               uint64_t agx1:1;
-               uint64_t agx0:1;
-               uint64_t dpi:1;
-               uint64_t sli:1;
-               uint64_t usb:1;
-               uint64_t dfa:1;
-               uint64_t key:1;
-               uint64_t rad:1;
-               uint64_t tim:1;
-               uint64_t zip:1;
-               uint64_t pko:1;
-               uint64_t pip:1;
-               uint64_t ipd:1;
-               uint64_t l2c:1;
-               uint64_t pow:1;
-               uint64_t fpa:1;
-               uint64_t iob:1;
-               uint64_t mio:1;
-               uint64_t nand:1;
-               uint64_t mii1:1;
-               uint64_t usb1:1;
-               uint64_t uart2:1;
-               uint64_t wdog:16;
-#else
-               uint64_t wdog:16;
-               uint64_t uart2:1;
-               uint64_t usb1:1;
-               uint64_t mii1:1;
-               uint64_t nand:1;
-               uint64_t mio:1;
-               uint64_t iob:1;
-               uint64_t fpa:1;
-               uint64_t pow:1;
-               uint64_t l2c:1;
-               uint64_t ipd:1;
-               uint64_t pip:1;
-               uint64_t pko:1;
-               uint64_t zip:1;
-               uint64_t tim:1;
-               uint64_t rad:1;
-               uint64_t key:1;
-               uint64_t dfa:1;
-               uint64_t usb:1;
-               uint64_t sli:1;
-               uint64_t dpi:1;
-               uint64_t agx0:1;
-               uint64_t agx1:1;
-               uint64_t reserved_38_39:2;
-               uint64_t dpi_dma:1;
-               uint64_t reserved_41_45:5;
-               uint64_t agl:1;
-               uint64_t ptp:1;
-               uint64_t pem0:1;
-               uint64_t pem1:1;
-               uint64_t srio0:1;
-               uint64_t srio1:1;
-               uint64_t lmc0:1;
-               uint64_t reserved_53_55:3;
-               uint64_t dfm:1;
-               uint64_t reserved_57_59:3;
-               uint64_t srio2:1;
-               uint64_t srio3:1;
-               uint64_t reserved_62_62:1;
-               uint64_t rst:1;
-#endif
-       } s;
-       struct cvmx_ciu_intx_en4_1_w1c_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_20_63:44;
-               uint64_t nand:1;
-               uint64_t mii1:1;
-               uint64_t usb1:1;
-               uint64_t uart2:1;
-               uint64_t reserved_4_15:12;
-               uint64_t wdog:4;
-#else
-               uint64_t wdog:4;
-               uint64_t reserved_4_15:12;
-               uint64_t uart2:1;
-               uint64_t usb1:1;
-               uint64_t mii1:1;
-               uint64_t nand:1;
-               uint64_t reserved_20_63:44;
-#endif
-       } cn52xx;
-       struct cvmx_ciu_intx_en4_1_w1c_cn56xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_12_63:52;
-               uint64_t wdog:12;
-#else
-               uint64_t wdog:12;
-               uint64_t reserved_12_63:52;
-#endif
-       } cn56xx;
-       struct cvmx_ciu_intx_en4_1_w1c_cn58xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_16_63:48;
-               uint64_t wdog:16;
-#else
-               uint64_t wdog:16;
-               uint64_t reserved_16_63:48;
-#endif
-       } cn58xx;
-       struct cvmx_ciu_intx_en4_1_w1c_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t rst:1;
-               uint64_t reserved_53_62:10;
-               uint64_t lmc0:1;
-               uint64_t reserved_50_51:2;
-               uint64_t pem1:1;
-               uint64_t pem0:1;
-               uint64_t ptp:1;
-               uint64_t agl:1;
-               uint64_t reserved_41_45:5;
-               uint64_t dpi_dma:1;
-               uint64_t reserved_38_39:2;
-               uint64_t agx1:1;
-               uint64_t agx0:1;
-               uint64_t dpi:1;
-               uint64_t sli:1;
-               uint64_t usb:1;
-               uint64_t dfa:1;
-               uint64_t key:1;
-               uint64_t rad:1;
-               uint64_t tim:1;
-               uint64_t zip:1;
-               uint64_t pko:1;
-               uint64_t pip:1;
-               uint64_t ipd:1;
-               uint64_t l2c:1;
-               uint64_t pow:1;
-               uint64_t fpa:1;
-               uint64_t iob:1;
-               uint64_t mio:1;
-               uint64_t nand:1;
-               uint64_t mii1:1;
-               uint64_t reserved_4_17:14;
-               uint64_t wdog:4;
-#else
-               uint64_t wdog:4;
-               uint64_t reserved_4_17:14;
-               uint64_t mii1:1;
-               uint64_t nand:1;
-               uint64_t mio:1;
-               uint64_t iob:1;
-               uint64_t fpa:1;
-               uint64_t pow:1;
-               uint64_t l2c:1;
-               uint64_t ipd:1;
-               uint64_t pip:1;
-               uint64_t pko:1;
-               uint64_t zip:1;
-               uint64_t tim:1;
-               uint64_t rad:1;
-               uint64_t key:1;
-               uint64_t dfa:1;
-               uint64_t usb:1;
-               uint64_t sli:1;
-               uint64_t dpi:1;
-               uint64_t agx0:1;
-               uint64_t agx1:1;
-               uint64_t reserved_38_39:2;
-               uint64_t dpi_dma:1;
-               uint64_t reserved_41_45:5;
-               uint64_t agl:1;
-               uint64_t ptp:1;
-               uint64_t pem0:1;
-               uint64_t pem1:1;
-               uint64_t reserved_50_51:2;
-               uint64_t lmc0:1;
-               uint64_t reserved_53_62:10;
-               uint64_t rst:1;
-#endif
-       } cn61xx;
-       struct cvmx_ciu_intx_en4_1_w1c_cn63xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t rst:1;
-               uint64_t reserved_57_62:6;
-               uint64_t dfm:1;
-               uint64_t reserved_53_55:3;
-               uint64_t lmc0:1;
-               uint64_t srio1:1;
-               uint64_t srio0:1;
-               uint64_t pem1:1;
-               uint64_t pem0:1;
-               uint64_t ptp:1;
-               uint64_t agl:1;
-               uint64_t reserved_37_45:9;
-               uint64_t agx0:1;
-               uint64_t dpi:1;
-               uint64_t sli:1;
-               uint64_t usb:1;
-               uint64_t dfa:1;
-               uint64_t key:1;
-               uint64_t rad:1;
-               uint64_t tim:1;
-               uint64_t zip:1;
-               uint64_t pko:1;
-               uint64_t pip:1;
-               uint64_t ipd:1;
-               uint64_t l2c:1;
-               uint64_t pow:1;
-               uint64_t fpa:1;
-               uint64_t iob:1;
-               uint64_t mio:1;
-               uint64_t nand:1;
-               uint64_t mii1:1;
-               uint64_t reserved_6_17:12;
-               uint64_t wdog:6;
-#else
-               uint64_t wdog:6;
-               uint64_t reserved_6_17:12;
-               uint64_t mii1:1;
-               uint64_t nand:1;
-               uint64_t mio:1;
-               uint64_t iob:1;
-               uint64_t fpa:1;
-               uint64_t pow:1;
-               uint64_t l2c:1;
-               uint64_t ipd:1;
-               uint64_t pip:1;
-               uint64_t pko:1;
-               uint64_t zip:1;
-               uint64_t tim:1;
-               uint64_t rad:1;
-               uint64_t key:1;
-               uint64_t dfa:1;
-               uint64_t usb:1;
-               uint64_t sli:1;
-               uint64_t dpi:1;
-               uint64_t agx0:1;
-               uint64_t reserved_37_45:9;
-               uint64_t agl:1;
-               uint64_t ptp:1;
-               uint64_t pem0:1;
-               uint64_t pem1:1;
-               uint64_t srio0:1;
-               uint64_t srio1:1;
-               uint64_t lmc0:1;
-               uint64_t reserved_53_55:3;
-               uint64_t dfm:1;
-               uint64_t reserved_57_62:6;
-               uint64_t rst:1;
-#endif
-       } cn63xx;
-       struct cvmx_ciu_intx_en4_1_w1c_cn63xx cn63xxp1;
-       struct cvmx_ciu_intx_en4_1_w1c_cn66xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t rst:1;
-               uint64_t reserved_62_62:1;
-               uint64_t srio3:1;
-               uint64_t srio2:1;
-               uint64_t reserved_57_59:3;
-               uint64_t dfm:1;
-               uint64_t reserved_53_55:3;
-               uint64_t lmc0:1;
-               uint64_t reserved_51_51:1;
-               uint64_t srio0:1;
-               uint64_t pem1:1;
-               uint64_t pem0:1;
-               uint64_t ptp:1;
-               uint64_t agl:1;
-               uint64_t reserved_38_45:8;
-               uint64_t agx1:1;
-               uint64_t agx0:1;
-               uint64_t dpi:1;
-               uint64_t sli:1;
-               uint64_t usb:1;
-               uint64_t dfa:1;
-               uint64_t key:1;
-               uint64_t rad:1;
-               uint64_t tim:1;
-               uint64_t zip:1;
-               uint64_t pko:1;
-               uint64_t pip:1;
-               uint64_t ipd:1;
-               uint64_t l2c:1;
-               uint64_t pow:1;
-               uint64_t fpa:1;
-               uint64_t iob:1;
-               uint64_t mio:1;
-               uint64_t nand:1;
-               uint64_t mii1:1;
-               uint64_t reserved_10_17:8;
-               uint64_t wdog:10;
-#else
-               uint64_t wdog:10;
-               uint64_t reserved_10_17:8;
-               uint64_t mii1:1;
-               uint64_t nand:1;
-               uint64_t mio:1;
-               uint64_t iob:1;
-               uint64_t fpa:1;
-               uint64_t pow:1;
-               uint64_t l2c:1;
-               uint64_t ipd:1;
-               uint64_t pip:1;
-               uint64_t pko:1;
-               uint64_t zip:1;
-               uint64_t tim:1;
-               uint64_t rad:1;
-               uint64_t key:1;
-               uint64_t dfa:1;
-               uint64_t usb:1;
-               uint64_t sli:1;
-               uint64_t dpi:1;
-               uint64_t agx0:1;
-               uint64_t agx1:1;
-               uint64_t reserved_38_45:8;
-               uint64_t agl:1;
-               uint64_t ptp:1;
-               uint64_t pem0:1;
-               uint64_t pem1:1;
-               uint64_t srio0:1;
-               uint64_t reserved_51_51:1;
-               uint64_t lmc0:1;
-               uint64_t reserved_53_55:3;
-               uint64_t dfm:1;
-               uint64_t reserved_57_59:3;
-               uint64_t srio2:1;
-               uint64_t srio3:1;
-               uint64_t reserved_62_62:1;
-               uint64_t rst:1;
-#endif
-       } cn66xx;
-       struct cvmx_ciu_intx_en4_1_w1c_cnf71xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t rst:1;
-               uint64_t reserved_53_62:10;
-               uint64_t lmc0:1;
-               uint64_t reserved_50_51:2;
-               uint64_t pem1:1;
-               uint64_t pem0:1;
-               uint64_t ptp:1;
-               uint64_t reserved_41_46:6;
-               uint64_t dpi_dma:1;
-               uint64_t reserved_37_39:3;
-               uint64_t agx0:1;
-               uint64_t dpi:1;
-               uint64_t sli:1;
-               uint64_t usb:1;
-               uint64_t reserved_32_32:1;
-               uint64_t key:1;
-               uint64_t rad:1;
-               uint64_t tim:1;
-               uint64_t reserved_28_28:1;
-               uint64_t pko:1;
-               uint64_t pip:1;
-               uint64_t ipd:1;
-               uint64_t l2c:1;
-               uint64_t pow:1;
-               uint64_t fpa:1;
-               uint64_t iob:1;
-               uint64_t mio:1;
-               uint64_t nand:1;
-               uint64_t reserved_4_18:15;
-               uint64_t wdog:4;
-#else
-               uint64_t wdog:4;
-               uint64_t reserved_4_18:15;
-               uint64_t nand:1;
-               uint64_t mio:1;
-               uint64_t iob:1;
-               uint64_t fpa:1;
-               uint64_t pow:1;
-               uint64_t l2c:1;
-               uint64_t ipd:1;
-               uint64_t pip:1;
-               uint64_t pko:1;
-               uint64_t reserved_28_28:1;
-               uint64_t tim:1;
-               uint64_t rad:1;
-               uint64_t key:1;
-               uint64_t reserved_32_32:1;
-               uint64_t usb:1;
-               uint64_t sli:1;
-               uint64_t dpi:1;
-               uint64_t agx0:1;
-               uint64_t reserved_37_39:3;
-               uint64_t dpi_dma:1;
-               uint64_t reserved_41_46:6;
-               uint64_t ptp:1;
-               uint64_t pem0:1;
-               uint64_t pem1:1;
-               uint64_t reserved_50_51:2;
-               uint64_t lmc0:1;
-               uint64_t reserved_53_62:10;
-               uint64_t rst:1;
-#endif
-       } cnf71xx;
-};
-
-union cvmx_ciu_intx_en4_1_w1s {
-       uint64_t u64;
-       struct cvmx_ciu_intx_en4_1_w1s_s {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t rst:1;
-               uint64_t reserved_62_62:1;
-               uint64_t srio3:1;
-               uint64_t srio2:1;
-               uint64_t reserved_57_59:3;
-               uint64_t dfm:1;
-               uint64_t reserved_53_55:3;
-               uint64_t lmc0:1;
-               uint64_t srio1:1;
-               uint64_t srio0:1;
-               uint64_t pem1:1;
-               uint64_t pem0:1;
-               uint64_t ptp:1;
-               uint64_t agl:1;
-               uint64_t reserved_41_45:5;
-               uint64_t dpi_dma:1;
-               uint64_t reserved_38_39:2;
-               uint64_t agx1:1;
-               uint64_t agx0:1;
-               uint64_t dpi:1;
-               uint64_t sli:1;
-               uint64_t usb:1;
-               uint64_t dfa:1;
-               uint64_t key:1;
-               uint64_t rad:1;
-               uint64_t tim:1;
-               uint64_t zip:1;
-               uint64_t pko:1;
-               uint64_t pip:1;
-               uint64_t ipd:1;
-               uint64_t l2c:1;
-               uint64_t pow:1;
-               uint64_t fpa:1;
-               uint64_t iob:1;
-               uint64_t mio:1;
-               uint64_t nand:1;
-               uint64_t mii1:1;
-               uint64_t usb1:1;
-               uint64_t uart2:1;
-               uint64_t wdog:16;
-#else
-               uint64_t wdog:16;
-               uint64_t uart2:1;
-               uint64_t usb1:1;
-               uint64_t mii1:1;
-               uint64_t nand:1;
-               uint64_t mio:1;
-               uint64_t iob:1;
-               uint64_t fpa:1;
-               uint64_t pow:1;
-               uint64_t l2c:1;
-               uint64_t ipd:1;
-               uint64_t pip:1;
-               uint64_t pko:1;
-               uint64_t zip:1;
-               uint64_t tim:1;
-               uint64_t rad:1;
-               uint64_t key:1;
-               uint64_t dfa:1;
-               uint64_t usb:1;
-               uint64_t sli:1;
-               uint64_t dpi:1;
-               uint64_t agx0:1;
-               uint64_t agx1:1;
-               uint64_t reserved_38_39:2;
-               uint64_t dpi_dma:1;
-               uint64_t reserved_41_45:5;
-               uint64_t agl:1;
-               uint64_t ptp:1;
-               uint64_t pem0:1;
-               uint64_t pem1:1;
-               uint64_t srio0:1;
-               uint64_t srio1:1;
-               uint64_t lmc0:1;
-               uint64_t reserved_53_55:3;
-               uint64_t dfm:1;
-               uint64_t reserved_57_59:3;
-               uint64_t srio2:1;
-               uint64_t srio3:1;
-               uint64_t reserved_62_62:1;
-               uint64_t rst:1;
-#endif
-       } s;
-       struct cvmx_ciu_intx_en4_1_w1s_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_20_63:44;
-               uint64_t nand:1;
-               uint64_t mii1:1;
-               uint64_t usb1:1;
-               uint64_t uart2:1;
-               uint64_t reserved_4_15:12;
-               uint64_t wdog:4;
-#else
-               uint64_t wdog:4;
-               uint64_t reserved_4_15:12;
-               uint64_t uart2:1;
-               uint64_t usb1:1;
-               uint64_t mii1:1;
-               uint64_t nand:1;
-               uint64_t reserved_20_63:44;
-#endif
-       } cn52xx;
-       struct cvmx_ciu_intx_en4_1_w1s_cn56xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_12_63:52;
-               uint64_t wdog:12;
-#else
-               uint64_t wdog:12;
-               uint64_t reserved_12_63:52;
-#endif
-       } cn56xx;
-       struct cvmx_ciu_intx_en4_1_w1s_cn58xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_16_63:48;
-               uint64_t wdog:16;
-#else
-               uint64_t wdog:16;
-               uint64_t reserved_16_63:48;
-#endif
-       } cn58xx;
-       struct cvmx_ciu_intx_en4_1_w1s_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t rst:1;
-               uint64_t reserved_53_62:10;
-               uint64_t lmc0:1;
-               uint64_t reserved_50_51:2;
-               uint64_t pem1:1;
-               uint64_t pem0:1;
-               uint64_t ptp:1;
-               uint64_t agl:1;
-               uint64_t reserved_41_45:5;
-               uint64_t dpi_dma:1;
-               uint64_t reserved_38_39:2;
-               uint64_t agx1:1;
-               uint64_t agx0:1;
-               uint64_t dpi:1;
-               uint64_t sli:1;
-               uint64_t usb:1;
-               uint64_t dfa:1;
-               uint64_t key:1;
-               uint64_t rad:1;
-               uint64_t tim:1;
-               uint64_t zip:1;
-               uint64_t pko:1;
-               uint64_t pip:1;
-               uint64_t ipd:1;
-               uint64_t l2c:1;
-               uint64_t pow:1;
-               uint64_t fpa:1;
-               uint64_t iob:1;
-               uint64_t mio:1;
-               uint64_t nand:1;
-               uint64_t mii1:1;
-               uint64_t reserved_4_17:14;
-               uint64_t wdog:4;
-#else
-               uint64_t wdog:4;
-               uint64_t reserved_4_17:14;
-               uint64_t mii1:1;
-               uint64_t nand:1;
-               uint64_t mio:1;
-               uint64_t iob:1;
-               uint64_t fpa:1;
-               uint64_t pow:1;
-               uint64_t l2c:1;
-               uint64_t ipd:1;
-               uint64_t pip:1;
-               uint64_t pko:1;
-               uint64_t zip:1;
-               uint64_t tim:1;
-               uint64_t rad:1;
-               uint64_t key:1;
-               uint64_t dfa:1;
-               uint64_t usb:1;
-               uint64_t sli:1;
-               uint64_t dpi:1;
-               uint64_t agx0:1;
-               uint64_t agx1:1;
-               uint64_t reserved_38_39:2;
-               uint64_t dpi_dma:1;
-               uint64_t reserved_41_45:5;
-               uint64_t agl:1;
-               uint64_t ptp:1;
-               uint64_t pem0:1;
-               uint64_t pem1:1;
-               uint64_t reserved_50_51:2;
-               uint64_t lmc0:1;
-               uint64_t reserved_53_62:10;
-               uint64_t rst:1;
-#endif
-       } cn61xx;
-       struct cvmx_ciu_intx_en4_1_w1s_cn63xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t rst:1;
-               uint64_t reserved_57_62:6;
-               uint64_t dfm:1;
-               uint64_t reserved_53_55:3;
-               uint64_t lmc0:1;
-               uint64_t srio1:1;
-               uint64_t srio0:1;
-               uint64_t pem1:1;
-               uint64_t pem0:1;
-               uint64_t ptp:1;
-               uint64_t agl:1;
-               uint64_t reserved_37_45:9;
-               uint64_t agx0:1;
-               uint64_t dpi:1;
-               uint64_t sli:1;
-               uint64_t usb:1;
-               uint64_t dfa:1;
-               uint64_t key:1;
-               uint64_t rad:1;
-               uint64_t tim:1;
-               uint64_t zip:1;
-               uint64_t pko:1;
-               uint64_t pip:1;
-               uint64_t ipd:1;
-               uint64_t l2c:1;
-               uint64_t pow:1;
-               uint64_t fpa:1;
-               uint64_t iob:1;
-               uint64_t mio:1;
-               uint64_t nand:1;
-               uint64_t mii1:1;
-               uint64_t reserved_6_17:12;
-               uint64_t wdog:6;
-#else
-               uint64_t wdog:6;
-               uint64_t reserved_6_17:12;
-               uint64_t mii1:1;
-               uint64_t nand:1;
-               uint64_t mio:1;
-               uint64_t iob:1;
-               uint64_t fpa:1;
-               uint64_t pow:1;
-               uint64_t l2c:1;
-               uint64_t ipd:1;
-               uint64_t pip:1;
-               uint64_t pko:1;
-               uint64_t zip:1;
-               uint64_t tim:1;
-               uint64_t rad:1;
-               uint64_t key:1;
-               uint64_t dfa:1;
-               uint64_t usb:1;
-               uint64_t sli:1;
-               uint64_t dpi:1;
-               uint64_t agx0:1;
-               uint64_t reserved_37_45:9;
-               uint64_t agl:1;
-               uint64_t ptp:1;
-               uint64_t pem0:1;
-               uint64_t pem1:1;
-               uint64_t srio0:1;
-               uint64_t srio1:1;
-               uint64_t lmc0:1;
-               uint64_t reserved_53_55:3;
-               uint64_t dfm:1;
-               uint64_t reserved_57_62:6;
-               uint64_t rst:1;
-#endif
-       } cn63xx;
-       struct cvmx_ciu_intx_en4_1_w1s_cn63xx cn63xxp1;
-       struct cvmx_ciu_intx_en4_1_w1s_cn66xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t rst:1;
-               uint64_t reserved_62_62:1;
-               uint64_t srio3:1;
-               uint64_t srio2:1;
-               uint64_t reserved_57_59:3;
-               uint64_t dfm:1;
-               uint64_t reserved_53_55:3;
-               uint64_t lmc0:1;
-               uint64_t reserved_51_51:1;
-               uint64_t srio0:1;
-               uint64_t pem1:1;
-               uint64_t pem0:1;
-               uint64_t ptp:1;
-               uint64_t agl:1;
-               uint64_t reserved_38_45:8;
-               uint64_t agx1:1;
-               uint64_t agx0:1;
-               uint64_t dpi:1;
-               uint64_t sli:1;
-               uint64_t usb:1;
-               uint64_t dfa:1;
-               uint64_t key:1;
-               uint64_t rad:1;
-               uint64_t tim:1;
-               uint64_t zip:1;
-               uint64_t pko:1;
-               uint64_t pip:1;
-               uint64_t ipd:1;
-               uint64_t l2c:1;
-               uint64_t pow:1;
-               uint64_t fpa:1;
-               uint64_t iob:1;
-               uint64_t mio:1;
-               uint64_t nand:1;
-               uint64_t mii1:1;
-               uint64_t reserved_10_17:8;
-               uint64_t wdog:10;
-#else
-               uint64_t wdog:10;
-               uint64_t reserved_10_17:8;
-               uint64_t mii1:1;
-               uint64_t nand:1;
-               uint64_t mio:1;
-               uint64_t iob:1;
-               uint64_t fpa:1;
-               uint64_t pow:1;
-               uint64_t l2c:1;
-               uint64_t ipd:1;
-               uint64_t pip:1;
-               uint64_t pko:1;
-               uint64_t zip:1;
-               uint64_t tim:1;
-               uint64_t rad:1;
-               uint64_t key:1;
-               uint64_t dfa:1;
-               uint64_t usb:1;
-               uint64_t sli:1;
-               uint64_t dpi:1;
-               uint64_t agx0:1;
-               uint64_t agx1:1;
-               uint64_t reserved_38_45:8;
-               uint64_t agl:1;
-               uint64_t ptp:1;
-               uint64_t pem0:1;
-               uint64_t pem1:1;
-               uint64_t srio0:1;
-               uint64_t reserved_51_51:1;
-               uint64_t lmc0:1;
-               uint64_t reserved_53_55:3;
-               uint64_t dfm:1;
-               uint64_t reserved_57_59:3;
-               uint64_t srio2:1;
-               uint64_t srio3:1;
-               uint64_t reserved_62_62:1;
-               uint64_t rst:1;
-#endif
-       } cn66xx;
-       struct cvmx_ciu_intx_en4_1_w1s_cnf71xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t rst:1;
-               uint64_t reserved_53_62:10;
-               uint64_t lmc0:1;
-               uint64_t reserved_50_51:2;
-               uint64_t pem1:1;
-               uint64_t pem0:1;
-               uint64_t ptp:1;
-               uint64_t reserved_41_46:6;
-               uint64_t dpi_dma:1;
-               uint64_t reserved_37_39:3;
-               uint64_t agx0:1;
-               uint64_t dpi:1;
-               uint64_t sli:1;
-               uint64_t usb:1;
-               uint64_t reserved_32_32:1;
-               uint64_t key:1;
-               uint64_t rad:1;
-               uint64_t tim:1;
-               uint64_t reserved_28_28:1;
-               uint64_t pko:1;
-               uint64_t pip:1;
-               uint64_t ipd:1;
-               uint64_t l2c:1;
-               uint64_t pow:1;
-               uint64_t fpa:1;
-               uint64_t iob:1;
-               uint64_t mio:1;
-               uint64_t nand:1;
-               uint64_t reserved_4_18:15;
-               uint64_t wdog:4;
-#else
-               uint64_t wdog:4;
-               uint64_t reserved_4_18:15;
-               uint64_t nand:1;
-               uint64_t mio:1;
-               uint64_t iob:1;
-               uint64_t fpa:1;
-               uint64_t pow:1;
-               uint64_t l2c:1;
-               uint64_t ipd:1;
-               uint64_t pip:1;
-               uint64_t pko:1;
-               uint64_t reserved_28_28:1;
-               uint64_t tim:1;
-               uint64_t rad:1;
-               uint64_t key:1;
-               uint64_t reserved_32_32:1;
-               uint64_t usb:1;
-               uint64_t sli:1;
-               uint64_t dpi:1;
-               uint64_t agx0:1;
-               uint64_t reserved_37_39:3;
-               uint64_t dpi_dma:1;
-               uint64_t reserved_41_46:6;
-               uint64_t ptp:1;
-               uint64_t pem0:1;
-               uint64_t pem1:1;
-               uint64_t reserved_50_51:2;
-               uint64_t lmc0:1;
-               uint64_t reserved_53_62:10;
-               uint64_t rst:1;
-#endif
-       } cnf71xx;
-};
-
-union cvmx_ciu_intx_sum0 {
-       uint64_t u64;
-       struct cvmx_ciu_intx_sum0_s {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t bootdma:1;
-               uint64_t mii:1;
-               uint64_t ipdppthr:1;
-               uint64_t powiq:1;
-               uint64_t twsi2:1;
-               uint64_t mpi:1;
-               uint64_t pcm:1;
-               uint64_t usb:1;
-               uint64_t timer:4;
-               uint64_t reserved_51_51:1;
-               uint64_t ipd_drp:1;
-               uint64_t gmx_drp:2;
-               uint64_t trace:1;
-               uint64_t rml:1;
-               uint64_t twsi:1;
-               uint64_t wdog_sum:1;
-               uint64_t pci_msi:4;
-               uint64_t pci_int:4;
-               uint64_t uart:2;
-               uint64_t mbox:2;
-               uint64_t gpio:16;
-               uint64_t workq:16;
-#else
-               uint64_t workq:16;
-               uint64_t gpio:16;
-               uint64_t mbox:2;
-               uint64_t uart:2;
-               uint64_t pci_int:4;
-               uint64_t pci_msi:4;
-               uint64_t wdog_sum:1;
-               uint64_t twsi:1;
-               uint64_t rml:1;
-               uint64_t trace:1;
-               uint64_t gmx_drp:2;
-               uint64_t ipd_drp:1;
-               uint64_t reserved_51_51:1;
-               uint64_t timer:4;
-               uint64_t usb:1;
-               uint64_t pcm:1;
-               uint64_t mpi:1;
-               uint64_t twsi2:1;
-               uint64_t powiq:1;
-               uint64_t ipdppthr:1;
-               uint64_t mii:1;
-               uint64_t bootdma:1;
-#endif
-       } s;
-       struct cvmx_ciu_intx_sum0_cn30xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_59_63:5;
-               uint64_t mpi:1;
-               uint64_t pcm:1;
-               uint64_t usb:1;
-               uint64_t timer:4;
-               uint64_t reserved_51_51:1;
-               uint64_t ipd_drp:1;
-               uint64_t reserved_49_49:1;
-               uint64_t gmx_drp:1;
-               uint64_t reserved_47_47:1;
-               uint64_t rml:1;
-               uint64_t twsi:1;
-               uint64_t wdog_sum:1;
-               uint64_t pci_msi:4;
-               uint64_t pci_int:4;
-               uint64_t uart:2;
-               uint64_t mbox:2;
-               uint64_t gpio:16;
-               uint64_t workq:16;
-#else
-               uint64_t workq:16;
-               uint64_t gpio:16;
-               uint64_t mbox:2;
-               uint64_t uart:2;
-               uint64_t pci_int:4;
-               uint64_t pci_msi:4;
-               uint64_t wdog_sum:1;
-               uint64_t twsi:1;
-               uint64_t rml:1;
-               uint64_t reserved_47_47:1;
-               uint64_t gmx_drp:1;
-               uint64_t reserved_49_49:1;
-               uint64_t ipd_drp:1;
-               uint64_t reserved_51_51:1;
-               uint64_t timer:4;
-               uint64_t usb:1;
-               uint64_t pcm:1;
-               uint64_t mpi:1;
-               uint64_t reserved_59_63:5;
-#endif
-       } cn30xx;
-       struct cvmx_ciu_intx_sum0_cn31xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_59_63:5;
-               uint64_t mpi:1;
-               uint64_t pcm:1;
-               uint64_t usb:1;
-               uint64_t timer:4;
-               uint64_t reserved_51_51:1;
-               uint64_t ipd_drp:1;
-               uint64_t reserved_49_49:1;
-               uint64_t gmx_drp:1;
-               uint64_t trace:1;
-               uint64_t rml:1;
-               uint64_t twsi:1;
-               uint64_t wdog_sum:1;
-               uint64_t pci_msi:4;
-               uint64_t pci_int:4;
-               uint64_t uart:2;
-               uint64_t mbox:2;
-               uint64_t gpio:16;
-               uint64_t workq:16;
-#else
-               uint64_t workq:16;
-               uint64_t gpio:16;
-               uint64_t mbox:2;
-               uint64_t uart:2;
-               uint64_t pci_int:4;
-               uint64_t pci_msi:4;
-               uint64_t wdog_sum:1;
-               uint64_t twsi:1;
-               uint64_t rml:1;
-               uint64_t trace:1;
-               uint64_t gmx_drp:1;
-               uint64_t reserved_49_49:1;
-               uint64_t ipd_drp:1;
-               uint64_t reserved_51_51:1;
-               uint64_t timer:4;
-               uint64_t usb:1;
-               uint64_t pcm:1;
-               uint64_t mpi:1;
-               uint64_t reserved_59_63:5;
-#endif
-       } cn31xx;
-       struct cvmx_ciu_intx_sum0_cn38xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_56_63:8;
-               uint64_t timer:4;
-               uint64_t key_zero:1;
-               uint64_t ipd_drp:1;
-               uint64_t gmx_drp:2;
-               uint64_t trace:1;
-               uint64_t rml:1;
-               uint64_t twsi:1;
-               uint64_t wdog_sum:1;
-               uint64_t pci_msi:4;
-               uint64_t pci_int:4;
-               uint64_t uart:2;
-               uint64_t mbox:2;
-               uint64_t gpio:16;
-               uint64_t workq:16;
-#else
-               uint64_t workq:16;
-               uint64_t gpio:16;
-               uint64_t mbox:2;
-               uint64_t uart:2;
-               uint64_t pci_int:4;
-               uint64_t pci_msi:4;
-               uint64_t wdog_sum:1;
-               uint64_t twsi:1;
-               uint64_t rml:1;
-               uint64_t trace:1;
-               uint64_t gmx_drp:2;
-               uint64_t ipd_drp:1;
-               uint64_t key_zero:1;
-               uint64_t timer:4;
-               uint64_t reserved_56_63:8;
-#endif
-       } cn38xx;
-       struct cvmx_ciu_intx_sum0_cn38xx cn38xxp2;
-       struct cvmx_ciu_intx_sum0_cn30xx cn50xx;
-       struct cvmx_ciu_intx_sum0_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t bootdma:1;
-               uint64_t mii:1;
-               uint64_t ipdppthr:1;
-               uint64_t powiq:1;
-               uint64_t twsi2:1;
-               uint64_t reserved_57_58:2;
-               uint64_t usb:1;
-               uint64_t timer:4;
-               uint64_t reserved_51_51:1;
-               uint64_t ipd_drp:1;
-               uint64_t reserved_49_49:1;
-               uint64_t gmx_drp:1;
-               uint64_t trace:1;
-               uint64_t rml:1;
-               uint64_t twsi:1;
-               uint64_t wdog_sum:1;
-               uint64_t pci_msi:4;
-               uint64_t pci_int:4;
-               uint64_t uart:2;
-               uint64_t mbox:2;
-               uint64_t gpio:16;
-               uint64_t workq:16;
-#else
-               uint64_t workq:16;
-               uint64_t gpio:16;
-               uint64_t mbox:2;
-               uint64_t uart:2;
-               uint64_t pci_int:4;
-               uint64_t pci_msi:4;
-               uint64_t wdog_sum:1;
-               uint64_t twsi:1;
-               uint64_t rml:1;
-               uint64_t trace:1;
-               uint64_t gmx_drp:1;
-               uint64_t reserved_49_49:1;
-               uint64_t ipd_drp:1;
-               uint64_t reserved_51_51:1;
-               uint64_t timer:4;
-               uint64_t usb:1;
-               uint64_t reserved_57_58:2;
-               uint64_t twsi2:1;
-               uint64_t powiq:1;
-               uint64_t ipdppthr:1;
-               uint64_t mii:1;
-               uint64_t bootdma:1;
-#endif
-       } cn52xx;
-       struct cvmx_ciu_intx_sum0_cn52xx cn52xxp1;
-       struct cvmx_ciu_intx_sum0_cn56xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t bootdma:1;
-               uint64_t mii:1;
-               uint64_t ipdppthr:1;
-               uint64_t powiq:1;
-               uint64_t twsi2:1;
-               uint64_t reserved_57_58:2;
-               uint64_t usb:1;
-               uint64_t timer:4;
-               uint64_t key_zero:1;
-               uint64_t ipd_drp:1;
-               uint64_t gmx_drp:2;
-               uint64_t trace:1;
-               uint64_t rml:1;
-               uint64_t twsi:1;
-               uint64_t wdog_sum:1;
-               uint64_t pci_msi:4;
-               uint64_t pci_int:4;
-               uint64_t uart:2;
-               uint64_t mbox:2;
-               uint64_t gpio:16;
-               uint64_t workq:16;
-#else
-               uint64_t workq:16;
-               uint64_t gpio:16;
-               uint64_t mbox:2;
-               uint64_t uart:2;
-               uint64_t pci_int:4;
-               uint64_t pci_msi:4;
-               uint64_t wdog_sum:1;
-               uint64_t twsi:1;
-               uint64_t rml:1;
-               uint64_t trace:1;
-               uint64_t gmx_drp:2;
-               uint64_t ipd_drp:1;
-               uint64_t key_zero:1;
-               uint64_t timer:4;
-               uint64_t usb:1;
-               uint64_t reserved_57_58:2;
-               uint64_t twsi2:1;
-               uint64_t powiq:1;
-               uint64_t ipdppthr:1;
-               uint64_t mii:1;
-               uint64_t bootdma:1;
-#endif
-       } cn56xx;
-       struct cvmx_ciu_intx_sum0_cn56xx cn56xxp1;
-       struct cvmx_ciu_intx_sum0_cn38xx cn58xx;
-       struct cvmx_ciu_intx_sum0_cn38xx cn58xxp1;
-       struct cvmx_ciu_intx_sum0_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t bootdma:1;
-               uint64_t mii:1;
-               uint64_t ipdppthr:1;
-               uint64_t powiq:1;
-               uint64_t twsi2:1;
-               uint64_t mpi:1;
-               uint64_t pcm:1;
-               uint64_t usb:1;
-               uint64_t timer:4;
-               uint64_t sum2:1;
-               uint64_t ipd_drp:1;
-               uint64_t gmx_drp:2;
-               uint64_t trace:1;
-               uint64_t rml:1;
-               uint64_t twsi:1;
-               uint64_t wdog_sum:1;
-               uint64_t pci_msi:4;
-               uint64_t pci_int:4;
-               uint64_t uart:2;
-               uint64_t mbox:2;
-               uint64_t gpio:16;
-               uint64_t workq:16;
-#else
-               uint64_t workq:16;
-               uint64_t gpio:16;
-               uint64_t mbox:2;
-               uint64_t uart:2;
-               uint64_t pci_int:4;
-               uint64_t pci_msi:4;
-               uint64_t wdog_sum:1;
-               uint64_t twsi:1;
-               uint64_t rml:1;
-               uint64_t trace:1;
-               uint64_t gmx_drp:2;
-               uint64_t ipd_drp:1;
-               uint64_t sum2:1;
-               uint64_t timer:4;
-               uint64_t usb:1;
-               uint64_t pcm:1;
-               uint64_t mpi:1;
-               uint64_t twsi2:1;
-               uint64_t powiq:1;
-               uint64_t ipdppthr:1;
-               uint64_t mii:1;
-               uint64_t bootdma:1;
-#endif
-       } cn61xx;
-       struct cvmx_ciu_intx_sum0_cn52xx cn63xx;
-       struct cvmx_ciu_intx_sum0_cn52xx cn63xxp1;
-       struct cvmx_ciu_intx_sum0_cn66xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t bootdma:1;
-               uint64_t mii:1;
-               uint64_t ipdppthr:1;
-               uint64_t powiq:1;
-               uint64_t twsi2:1;
-               uint64_t mpi:1;
-               uint64_t reserved_57_57:1;
-               uint64_t usb:1;
-               uint64_t timer:4;
-               uint64_t sum2:1;
-               uint64_t ipd_drp:1;
-               uint64_t gmx_drp:2;
-               uint64_t trace:1;
-               uint64_t rml:1;
-               uint64_t twsi:1;
-               uint64_t wdog_sum:1;
-               uint64_t pci_msi:4;
-               uint64_t pci_int:4;
-               uint64_t uart:2;
-               uint64_t mbox:2;
-               uint64_t gpio:16;
-               uint64_t workq:16;
-#else
-               uint64_t workq:16;
-               uint64_t gpio:16;
-               uint64_t mbox:2;
-               uint64_t uart:2;
-               uint64_t pci_int:4;
-               uint64_t pci_msi:4;
-               uint64_t wdog_sum:1;
-               uint64_t twsi:1;
-               uint64_t rml:1;
-               uint64_t trace:1;
-               uint64_t gmx_drp:2;
-               uint64_t ipd_drp:1;
-               uint64_t sum2:1;
-               uint64_t timer:4;
-               uint64_t usb:1;
-               uint64_t reserved_57_57:1;
-               uint64_t mpi:1;
-               uint64_t twsi2:1;
-               uint64_t powiq:1;
-               uint64_t ipdppthr:1;
-               uint64_t mii:1;
-               uint64_t bootdma:1;
-#endif
-       } cn66xx;
-       struct cvmx_ciu_intx_sum0_cnf71xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t bootdma:1;
-               uint64_t reserved_62_62:1;
-               uint64_t ipdppthr:1;
-               uint64_t powiq:1;
-               uint64_t twsi2:1;
-               uint64_t mpi:1;
-               uint64_t pcm:1;
-               uint64_t usb:1;
-               uint64_t timer:4;
-               uint64_t sum2:1;
-               uint64_t ipd_drp:1;
-               uint64_t reserved_49_49:1;
-               uint64_t gmx_drp:1;
-               uint64_t trace:1;
-               uint64_t rml:1;
-               uint64_t twsi:1;
-               uint64_t wdog_sum:1;
-               uint64_t pci_msi:4;
-               uint64_t pci_int:4;
-               uint64_t uart:2;
-               uint64_t mbox:2;
-               uint64_t gpio:16;
-               uint64_t workq:16;
-#else
-               uint64_t workq:16;
-               uint64_t gpio:16;
-               uint64_t mbox:2;
-               uint64_t uart:2;
-               uint64_t pci_int:4;
-               uint64_t pci_msi:4;
-               uint64_t wdog_sum:1;
-               uint64_t twsi:1;
-               uint64_t rml:1;
-               uint64_t trace:1;
-               uint64_t gmx_drp:1;
-               uint64_t reserved_49_49:1;
-               uint64_t ipd_drp:1;
-               uint64_t sum2:1;
-               uint64_t timer:4;
-               uint64_t usb:1;
-               uint64_t pcm:1;
-               uint64_t mpi:1;
-               uint64_t twsi2:1;
-               uint64_t powiq:1;
-               uint64_t ipdppthr:1;
-               uint64_t reserved_62_62:1;
-               uint64_t bootdma:1;
-#endif
-       } cnf71xx;
-};
-
-union cvmx_ciu_intx_sum4 {
-       uint64_t u64;
-       struct cvmx_ciu_intx_sum4_s {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t bootdma:1;
-               uint64_t mii:1;
-               uint64_t ipdppthr:1;
-               uint64_t powiq:1;
-               uint64_t twsi2:1;
-               uint64_t mpi:1;
-               uint64_t pcm:1;
-               uint64_t usb:1;
-               uint64_t timer:4;
-               uint64_t reserved_51_51:1;
-               uint64_t ipd_drp:1;
-               uint64_t gmx_drp:2;
-               uint64_t trace:1;
-               uint64_t rml:1;
-               uint64_t twsi:1;
-               uint64_t wdog_sum:1;
-               uint64_t pci_msi:4;
-               uint64_t pci_int:4;
-               uint64_t uart:2;
-               uint64_t mbox:2;
-               uint64_t gpio:16;
-               uint64_t workq:16;
-#else
-               uint64_t workq:16;
-               uint64_t gpio:16;
-               uint64_t mbox:2;
-               uint64_t uart:2;
-               uint64_t pci_int:4;
-               uint64_t pci_msi:4;
-               uint64_t wdog_sum:1;
-               uint64_t twsi:1;
-               uint64_t rml:1;
-               uint64_t trace:1;
-               uint64_t gmx_drp:2;
-               uint64_t ipd_drp:1;
-               uint64_t reserved_51_51:1;
-               uint64_t timer:4;
-               uint64_t usb:1;
-               uint64_t pcm:1;
-               uint64_t mpi:1;
-               uint64_t twsi2:1;
-               uint64_t powiq:1;
-               uint64_t ipdppthr:1;
-               uint64_t mii:1;
-               uint64_t bootdma:1;
-#endif
-       } s;
-       struct cvmx_ciu_intx_sum4_cn50xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_59_63:5;
-               uint64_t mpi:1;
-               uint64_t pcm:1;
-               uint64_t usb:1;
-               uint64_t timer:4;
-               uint64_t reserved_51_51:1;
-               uint64_t ipd_drp:1;
-               uint64_t reserved_49_49:1;
-               uint64_t gmx_drp:1;
-               uint64_t reserved_47_47:1;
-               uint64_t rml:1;
-               uint64_t twsi:1;
-               uint64_t wdog_sum:1;
-               uint64_t pci_msi:4;
-               uint64_t pci_int:4;
-               uint64_t uart:2;
-               uint64_t mbox:2;
-               uint64_t gpio:16;
-               uint64_t workq:16;
-#else
-               uint64_t workq:16;
-               uint64_t gpio:16;
-               uint64_t mbox:2;
-               uint64_t uart:2;
-               uint64_t pci_int:4;
-               uint64_t pci_msi:4;
-               uint64_t wdog_sum:1;
-               uint64_t twsi:1;
-               uint64_t rml:1;
-               uint64_t reserved_47_47:1;
-               uint64_t gmx_drp:1;
-               uint64_t reserved_49_49:1;
-               uint64_t ipd_drp:1;
-               uint64_t reserved_51_51:1;
-               uint64_t timer:4;
-               uint64_t usb:1;
-               uint64_t pcm:1;
-               uint64_t mpi:1;
-               uint64_t reserved_59_63:5;
-#endif
-       } cn50xx;
-       struct cvmx_ciu_intx_sum4_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t bootdma:1;
-               uint64_t mii:1;
-               uint64_t ipdppthr:1;
-               uint64_t powiq:1;
-               uint64_t twsi2:1;
-               uint64_t reserved_57_58:2;
-               uint64_t usb:1;
-               uint64_t timer:4;
-               uint64_t reserved_51_51:1;
-               uint64_t ipd_drp:1;
-               uint64_t reserved_49_49:1;
-               uint64_t gmx_drp:1;
-               uint64_t trace:1;
-               uint64_t rml:1;
-               uint64_t twsi:1;
-               uint64_t wdog_sum:1;
-               uint64_t pci_msi:4;
-               uint64_t pci_int:4;
-               uint64_t uart:2;
-               uint64_t mbox:2;
-               uint64_t gpio:16;
-               uint64_t workq:16;
-#else
-               uint64_t workq:16;
-               uint64_t gpio:16;
-               uint64_t mbox:2;
-               uint64_t uart:2;
-               uint64_t pci_int:4;
-               uint64_t pci_msi:4;
-               uint64_t wdog_sum:1;
-               uint64_t twsi:1;
-               uint64_t rml:1;
-               uint64_t trace:1;
-               uint64_t gmx_drp:1;
-               uint64_t reserved_49_49:1;
-               uint64_t ipd_drp:1;
-               uint64_t reserved_51_51:1;
-               uint64_t timer:4;
-               uint64_t usb:1;
-               uint64_t reserved_57_58:2;
-               uint64_t twsi2:1;
-               uint64_t powiq:1;
-               uint64_t ipdppthr:1;
-               uint64_t mii:1;
-               uint64_t bootdma:1;
-#endif
-       } cn52xx;
-       struct cvmx_ciu_intx_sum4_cn52xx cn52xxp1;
-       struct cvmx_ciu_intx_sum4_cn56xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t bootdma:1;
-               uint64_t mii:1;
-               uint64_t ipdppthr:1;
-               uint64_t powiq:1;
-               uint64_t twsi2:1;
-               uint64_t reserved_57_58:2;
-               uint64_t usb:1;
-               uint64_t timer:4;
-               uint64_t key_zero:1;
-               uint64_t ipd_drp:1;
-               uint64_t gmx_drp:2;
-               uint64_t trace:1;
-               uint64_t rml:1;
-               uint64_t twsi:1;
-               uint64_t wdog_sum:1;
-               uint64_t pci_msi:4;
-               uint64_t pci_int:4;
-               uint64_t uart:2;
-               uint64_t mbox:2;
-               uint64_t gpio:16;
-               uint64_t workq:16;
-#else
-               uint64_t workq:16;
-               uint64_t gpio:16;
-               uint64_t mbox:2;
-               uint64_t uart:2;
-               uint64_t pci_int:4;
-               uint64_t pci_msi:4;
-               uint64_t wdog_sum:1;
-               uint64_t twsi:1;
-               uint64_t rml:1;
-               uint64_t trace:1;
-               uint64_t gmx_drp:2;
-               uint64_t ipd_drp:1;
-               uint64_t key_zero:1;
-               uint64_t timer:4;
-               uint64_t usb:1;
-               uint64_t reserved_57_58:2;
-               uint64_t twsi2:1;
-               uint64_t powiq:1;
-               uint64_t ipdppthr:1;
-               uint64_t mii:1;
-               uint64_t bootdma:1;
-#endif
-       } cn56xx;
-       struct cvmx_ciu_intx_sum4_cn56xx cn56xxp1;
-       struct cvmx_ciu_intx_sum4_cn58xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_56_63:8;
-               uint64_t timer:4;
-               uint64_t key_zero:1;
-               uint64_t ipd_drp:1;
-               uint64_t gmx_drp:2;
-               uint64_t trace:1;
-               uint64_t rml:1;
-               uint64_t twsi:1;
-               uint64_t wdog_sum:1;
-               uint64_t pci_msi:4;
-               uint64_t pci_int:4;
-               uint64_t uart:2;
-               uint64_t mbox:2;
-               uint64_t gpio:16;
-               uint64_t workq:16;
-#else
-               uint64_t workq:16;
-               uint64_t gpio:16;
-               uint64_t mbox:2;
-               uint64_t uart:2;
-               uint64_t pci_int:4;
-               uint64_t pci_msi:4;
-               uint64_t wdog_sum:1;
-               uint64_t twsi:1;
-               uint64_t rml:1;
-               uint64_t trace:1;
-               uint64_t gmx_drp:2;
-               uint64_t ipd_drp:1;
-               uint64_t key_zero:1;
-               uint64_t timer:4;
-               uint64_t reserved_56_63:8;
-#endif
-       } cn58xx;
-       struct cvmx_ciu_intx_sum4_cn58xx cn58xxp1;
-       struct cvmx_ciu_intx_sum4_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t bootdma:1;
-               uint64_t mii:1;
-               uint64_t ipdppthr:1;
-               uint64_t powiq:1;
-               uint64_t twsi2:1;
-               uint64_t mpi:1;
-               uint64_t pcm:1;
-               uint64_t usb:1;
-               uint64_t timer:4;
-               uint64_t sum2:1;
-               uint64_t ipd_drp:1;
-               uint64_t gmx_drp:2;
-               uint64_t trace:1;
-               uint64_t rml:1;
-               uint64_t twsi:1;
-               uint64_t wdog_sum:1;
-               uint64_t pci_msi:4;
-               uint64_t pci_int:4;
-               uint64_t uart:2;
-               uint64_t mbox:2;
-               uint64_t gpio:16;
-               uint64_t workq:16;
-#else
-               uint64_t workq:16;
-               uint64_t gpio:16;
-               uint64_t mbox:2;
-               uint64_t uart:2;
-               uint64_t pci_int:4;
-               uint64_t pci_msi:4;
-               uint64_t wdog_sum:1;
-               uint64_t twsi:1;
-               uint64_t rml:1;
-               uint64_t trace:1;
-               uint64_t gmx_drp:2;
-               uint64_t ipd_drp:1;
-               uint64_t sum2:1;
-               uint64_t timer:4;
-               uint64_t usb:1;
-               uint64_t pcm:1;
-               uint64_t mpi:1;
-               uint64_t twsi2:1;
-               uint64_t powiq:1;
-               uint64_t ipdppthr:1;
-               uint64_t mii:1;
-               uint64_t bootdma:1;
-#endif
-       } cn61xx;
-       struct cvmx_ciu_intx_sum4_cn52xx cn63xx;
-       struct cvmx_ciu_intx_sum4_cn52xx cn63xxp1;
-       struct cvmx_ciu_intx_sum4_cn66xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t bootdma:1;
-               uint64_t mii:1;
-               uint64_t ipdppthr:1;
-               uint64_t powiq:1;
-               uint64_t twsi2:1;
-               uint64_t mpi:1;
-               uint64_t reserved_57_57:1;
-               uint64_t usb:1;
-               uint64_t timer:4;
-               uint64_t sum2:1;
-               uint64_t ipd_drp:1;
-               uint64_t gmx_drp:2;
-               uint64_t trace:1;
-               uint64_t rml:1;
-               uint64_t twsi:1;
-               uint64_t wdog_sum:1;
-               uint64_t pci_msi:4;
-               uint64_t pci_int:4;
-               uint64_t uart:2;
-               uint64_t mbox:2;
-               uint64_t gpio:16;
-               uint64_t workq:16;
-#else
-               uint64_t workq:16;
-               uint64_t gpio:16;
-               uint64_t mbox:2;
-               uint64_t uart:2;
-               uint64_t pci_int:4;
-               uint64_t pci_msi:4;
-               uint64_t wdog_sum:1;
-               uint64_t twsi:1;
-               uint64_t rml:1;
-               uint64_t trace:1;
-               uint64_t gmx_drp:2;
-               uint64_t ipd_drp:1;
-               uint64_t sum2:1;
-               uint64_t timer:4;
-               uint64_t usb:1;
-               uint64_t reserved_57_57:1;
-               uint64_t mpi:1;
-               uint64_t twsi2:1;
-               uint64_t powiq:1;
-               uint64_t ipdppthr:1;
-               uint64_t mii:1;
-               uint64_t bootdma:1;
-#endif
-       } cn66xx;
-       struct cvmx_ciu_intx_sum4_cnf71xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t bootdma:1;
-               uint64_t reserved_62_62:1;
-               uint64_t ipdppthr:1;
-               uint64_t powiq:1;
-               uint64_t twsi2:1;
-               uint64_t mpi:1;
-               uint64_t pcm:1;
-               uint64_t usb:1;
-               uint64_t timer:4;
-               uint64_t sum2:1;
-               uint64_t ipd_drp:1;
-               uint64_t reserved_49_49:1;
-               uint64_t gmx_drp:1;
-               uint64_t trace:1;
-               uint64_t rml:1;
-               uint64_t twsi:1;
-               uint64_t wdog_sum:1;
-               uint64_t pci_msi:4;
-               uint64_t pci_int:4;
-               uint64_t uart:2;
-               uint64_t mbox:2;
-               uint64_t gpio:16;
-               uint64_t workq:16;
-#else
-               uint64_t workq:16;
-               uint64_t gpio:16;
-               uint64_t mbox:2;
-               uint64_t uart:2;
-               uint64_t pci_int:4;
-               uint64_t pci_msi:4;
-               uint64_t wdog_sum:1;
-               uint64_t twsi:1;
-               uint64_t rml:1;
-               uint64_t trace:1;
-               uint64_t gmx_drp:1;
-               uint64_t reserved_49_49:1;
-               uint64_t ipd_drp:1;
-               uint64_t sum2:1;
-               uint64_t timer:4;
-               uint64_t usb:1;
-               uint64_t pcm:1;
-               uint64_t mpi:1;
-               uint64_t twsi2:1;
-               uint64_t powiq:1;
-               uint64_t ipdppthr:1;
-               uint64_t reserved_62_62:1;
-               uint64_t bootdma:1;
-#endif
-       } cnf71xx;
-};
-
-union cvmx_ciu_int33_sum0 {
-       uint64_t u64;
-       struct cvmx_ciu_int33_sum0_s {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t bootdma:1;
-               uint64_t mii:1;
-               uint64_t ipdppthr:1;
-               uint64_t powiq:1;
-               uint64_t twsi2:1;
-               uint64_t mpi:1;
-               uint64_t pcm:1;
-               uint64_t usb:1;
-               uint64_t timer:4;
-               uint64_t sum2:1;
-               uint64_t ipd_drp:1;
-               uint64_t gmx_drp:2;
-               uint64_t trace:1;
-               uint64_t rml:1;
-               uint64_t twsi:1;
-               uint64_t wdog_sum:1;
-               uint64_t pci_msi:4;
-               uint64_t pci_int:4;
-               uint64_t uart:2;
-               uint64_t mbox:2;
-               uint64_t gpio:16;
-               uint64_t workq:16;
-#else
-               uint64_t workq:16;
-               uint64_t gpio:16;
-               uint64_t mbox:2;
-               uint64_t uart:2;
-               uint64_t pci_int:4;
-               uint64_t pci_msi:4;
-               uint64_t wdog_sum:1;
-               uint64_t twsi:1;
-               uint64_t rml:1;
-               uint64_t trace:1;
-               uint64_t gmx_drp:2;
-               uint64_t ipd_drp:1;
-               uint64_t sum2:1;
-               uint64_t timer:4;
-               uint64_t usb:1;
-               uint64_t pcm:1;
-               uint64_t mpi:1;
-               uint64_t twsi2:1;
-               uint64_t powiq:1;
-               uint64_t ipdppthr:1;
-               uint64_t mii:1;
-               uint64_t bootdma:1;
-#endif
-       } s;
-       struct cvmx_ciu_int33_sum0_s cn61xx;
-       struct cvmx_ciu_int33_sum0_cn63xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t bootdma:1;
-               uint64_t mii:1;
-               uint64_t ipdppthr:1;
-               uint64_t powiq:1;
-               uint64_t twsi2:1;
-               uint64_t reserved_57_58:2;
-               uint64_t usb:1;
-               uint64_t timer:4;
-               uint64_t reserved_51_51:1;
-               uint64_t ipd_drp:1;
-               uint64_t reserved_49_49:1;
-               uint64_t gmx_drp:1;
-               uint64_t trace:1;
-               uint64_t rml:1;
-               uint64_t twsi:1;
-               uint64_t wdog_sum:1;
-               uint64_t pci_msi:4;
-               uint64_t pci_int:4;
-               uint64_t uart:2;
-               uint64_t mbox:2;
-               uint64_t gpio:16;
-               uint64_t workq:16;
-#else
-               uint64_t workq:16;
-               uint64_t gpio:16;
-               uint64_t mbox:2;
-               uint64_t uart:2;
-               uint64_t pci_int:4;
-               uint64_t pci_msi:4;
-               uint64_t wdog_sum:1;
-               uint64_t twsi:1;
-               uint64_t rml:1;
-               uint64_t trace:1;
-               uint64_t gmx_drp:1;
-               uint64_t reserved_49_49:1;
-               uint64_t ipd_drp:1;
-               uint64_t reserved_51_51:1;
-               uint64_t timer:4;
-               uint64_t usb:1;
-               uint64_t reserved_57_58:2;
-               uint64_t twsi2:1;
-               uint64_t powiq:1;
-               uint64_t ipdppthr:1;
-               uint64_t mii:1;
-               uint64_t bootdma:1;
-#endif
-       } cn63xx;
-       struct cvmx_ciu_int33_sum0_cn63xx cn63xxp1;
-       struct cvmx_ciu_int33_sum0_cn66xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t bootdma:1;
-               uint64_t mii:1;
-               uint64_t ipdppthr:1;
-               uint64_t powiq:1;
-               uint64_t twsi2:1;
-               uint64_t mpi:1;
-               uint64_t reserved_57_57:1;
-               uint64_t usb:1;
-               uint64_t timer:4;
-               uint64_t sum2:1;
-               uint64_t ipd_drp:1;
-               uint64_t gmx_drp:2;
-               uint64_t trace:1;
-               uint64_t rml:1;
-               uint64_t twsi:1;
-               uint64_t wdog_sum:1;
-               uint64_t pci_msi:4;
-               uint64_t pci_int:4;
-               uint64_t uart:2;
-               uint64_t mbox:2;
-               uint64_t gpio:16;
-               uint64_t workq:16;
-#else
-               uint64_t workq:16;
-               uint64_t gpio:16;
-               uint64_t mbox:2;
-               uint64_t uart:2;
-               uint64_t pci_int:4;
-               uint64_t pci_msi:4;
-               uint64_t wdog_sum:1;
-               uint64_t twsi:1;
-               uint64_t rml:1;
-               uint64_t trace:1;
-               uint64_t gmx_drp:2;
-               uint64_t ipd_drp:1;
-               uint64_t sum2:1;
-               uint64_t timer:4;
-               uint64_t usb:1;
-               uint64_t reserved_57_57:1;
-               uint64_t mpi:1;
-               uint64_t twsi2:1;
-               uint64_t powiq:1;
-               uint64_t ipdppthr:1;
-               uint64_t mii:1;
-               uint64_t bootdma:1;
-#endif
-       } cn66xx;
-       struct cvmx_ciu_int33_sum0_cnf71xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t bootdma:1;
-               uint64_t reserved_62_62:1;
-               uint64_t ipdppthr:1;
-               uint64_t powiq:1;
-               uint64_t twsi2:1;
-               uint64_t mpi:1;
-               uint64_t pcm:1;
-               uint64_t usb:1;
-               uint64_t timer:4;
-               uint64_t sum2:1;
-               uint64_t ipd_drp:1;
-               uint64_t reserved_49_49:1;
-               uint64_t gmx_drp:1;
-               uint64_t trace:1;
-               uint64_t rml:1;
-               uint64_t twsi:1;
-               uint64_t wdog_sum:1;
-               uint64_t pci_msi:4;
-               uint64_t pci_int:4;
-               uint64_t uart:2;
-               uint64_t mbox:2;
-               uint64_t gpio:16;
-               uint64_t workq:16;
-#else
-               uint64_t workq:16;
-               uint64_t gpio:16;
-               uint64_t mbox:2;
-               uint64_t uart:2;
-               uint64_t pci_int:4;
-               uint64_t pci_msi:4;
-               uint64_t wdog_sum:1;
-               uint64_t twsi:1;
-               uint64_t rml:1;
-               uint64_t trace:1;
-               uint64_t gmx_drp:1;
-               uint64_t reserved_49_49:1;
-               uint64_t ipd_drp:1;
-               uint64_t sum2:1;
-               uint64_t timer:4;
-               uint64_t usb:1;
-               uint64_t pcm:1;
-               uint64_t mpi:1;
-               uint64_t twsi2:1;
-               uint64_t powiq:1;
-               uint64_t ipdppthr:1;
-               uint64_t reserved_62_62:1;
-               uint64_t bootdma:1;
-#endif
-       } cnf71xx;
-};
-
-union cvmx_ciu_int_dbg_sel {
-       uint64_t u64;
-       struct cvmx_ciu_int_dbg_sel_s {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_19_63:45;
-               uint64_t sel:3;
-               uint64_t reserved_10_15:6;
-               uint64_t irq:2;
-               uint64_t reserved_5_7:3;
-               uint64_t pp:5;
-#else
-               uint64_t pp:5;
-               uint64_t reserved_5_7:3;
-               uint64_t irq:2;
-               uint64_t reserved_10_15:6;
-               uint64_t sel:3;
-               uint64_t reserved_19_63:45;
-#endif
-       } s;
-       struct cvmx_ciu_int_dbg_sel_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_19_63:45;
-               uint64_t sel:3;
-               uint64_t reserved_10_15:6;
-               uint64_t irq:2;
-               uint64_t reserved_4_7:4;
-               uint64_t pp:4;
-#else
-               uint64_t pp:4;
-               uint64_t reserved_4_7:4;
-               uint64_t irq:2;
-               uint64_t reserved_10_15:6;
-               uint64_t sel:3;
-               uint64_t reserved_19_63:45;
-#endif
-       } cn61xx;
-       struct cvmx_ciu_int_dbg_sel_cn63xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_19_63:45;
-               uint64_t sel:3;
-               uint64_t reserved_10_15:6;
-               uint64_t irq:2;
-               uint64_t reserved_3_7:5;
-               uint64_t pp:3;
-#else
-               uint64_t pp:3;
-               uint64_t reserved_3_7:5;
-               uint64_t irq:2;
-               uint64_t reserved_10_15:6;
-               uint64_t sel:3;
-               uint64_t reserved_19_63:45;
-#endif
-       } cn63xx;
-       struct cvmx_ciu_int_dbg_sel_cn61xx cn66xx;
-       struct cvmx_ciu_int_dbg_sel_s cn68xx;
-       struct cvmx_ciu_int_dbg_sel_s cn68xxp1;
-       struct cvmx_ciu_int_dbg_sel_cn61xx cnf71xx;
-};
-
-union cvmx_ciu_int_sum1 {
-       uint64_t u64;
-       struct cvmx_ciu_int_sum1_s {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t rst:1;
-               uint64_t reserved_62_62:1;
-               uint64_t srio3:1;
-               uint64_t srio2:1;
-               uint64_t reserved_57_59:3;
-               uint64_t dfm:1;
-               uint64_t reserved_53_55:3;
-               uint64_t lmc0:1;
-               uint64_t srio1:1;
-               uint64_t srio0:1;
-               uint64_t pem1:1;
-               uint64_t pem0:1;
-               uint64_t ptp:1;
-               uint64_t agl:1;
-               uint64_t reserved_38_45:8;
-               uint64_t agx1:1;
-               uint64_t agx0:1;
-               uint64_t dpi:1;
-               uint64_t sli:1;
-               uint64_t usb:1;
-               uint64_t dfa:1;
-               uint64_t key:1;
-               uint64_t rad:1;
-               uint64_t tim:1;
-               uint64_t zip:1;
-               uint64_t pko:1;
-               uint64_t pip:1;
-               uint64_t ipd:1;
-               uint64_t l2c:1;
-               uint64_t pow:1;
-               uint64_t fpa:1;
-               uint64_t iob:1;
-               uint64_t mio:1;
-               uint64_t nand:1;
-               uint64_t mii1:1;
-               uint64_t usb1:1;
-               uint64_t uart2:1;
-               uint64_t wdog:16;
-#else
-               uint64_t wdog:16;
-               uint64_t uart2:1;
-               uint64_t usb1:1;
-               uint64_t mii1:1;
-               uint64_t nand:1;
-               uint64_t mio:1;
-               uint64_t iob:1;
-               uint64_t fpa:1;
-               uint64_t pow:1;
-               uint64_t l2c:1;
-               uint64_t ipd:1;
-               uint64_t pip:1;
-               uint64_t pko:1;
-               uint64_t zip:1;
-               uint64_t tim:1;
-               uint64_t rad:1;
-               uint64_t key:1;
-               uint64_t dfa:1;
-               uint64_t usb:1;
-               uint64_t sli:1;
-               uint64_t dpi:1;
-               uint64_t agx0:1;
-               uint64_t agx1:1;
-               uint64_t reserved_38_45:8;
-               uint64_t agl:1;
-               uint64_t ptp:1;
-               uint64_t pem0:1;
-               uint64_t pem1:1;
-               uint64_t srio0:1;
-               uint64_t srio1:1;
-               uint64_t lmc0:1;
-               uint64_t reserved_53_55:3;
-               uint64_t dfm:1;
-               uint64_t reserved_57_59:3;
-               uint64_t srio2:1;
-               uint64_t srio3:1;
-               uint64_t reserved_62_62:1;
-               uint64_t rst:1;
-#endif
-       } s;
-       struct cvmx_ciu_int_sum1_cn30xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_1_63:63;
-               uint64_t wdog:1;
-#else
-               uint64_t wdog:1;
-               uint64_t reserved_1_63:63;
-#endif
-       } cn30xx;
-       struct cvmx_ciu_int_sum1_cn31xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_2_63:62;
-               uint64_t wdog:2;
-#else
-               uint64_t wdog:2;
-               uint64_t reserved_2_63:62;
-#endif
-       } cn31xx;
-       struct cvmx_ciu_int_sum1_cn38xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_16_63:48;
-               uint64_t wdog:16;
-#else
-               uint64_t wdog:16;
-               uint64_t reserved_16_63:48;
-#endif
-       } cn38xx;
-       struct cvmx_ciu_int_sum1_cn38xx cn38xxp2;
-       struct cvmx_ciu_int_sum1_cn31xx cn50xx;
-       struct cvmx_ciu_int_sum1_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_20_63:44;
-               uint64_t nand:1;
-               uint64_t mii1:1;
-               uint64_t usb1:1;
-               uint64_t uart2:1;
-               uint64_t reserved_4_15:12;
-               uint64_t wdog:4;
-#else
-               uint64_t wdog:4;
-               uint64_t reserved_4_15:12;
-               uint64_t uart2:1;
-               uint64_t usb1:1;
-               uint64_t mii1:1;
-               uint64_t nand:1;
-               uint64_t reserved_20_63:44;
-#endif
-       } cn52xx;
-       struct cvmx_ciu_int_sum1_cn52xxp1 {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_19_63:45;
-               uint64_t mii1:1;
-               uint64_t usb1:1;
-               uint64_t uart2:1;
-               uint64_t reserved_4_15:12;
-               uint64_t wdog:4;
-#else
-               uint64_t wdog:4;
-               uint64_t reserved_4_15:12;
-               uint64_t uart2:1;
-               uint64_t usb1:1;
-               uint64_t mii1:1;
-               uint64_t reserved_19_63:45;
-#endif
-       } cn52xxp1;
-       struct cvmx_ciu_int_sum1_cn56xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_12_63:52;
-               uint64_t wdog:12;
-#else
-               uint64_t wdog:12;
-               uint64_t reserved_12_63:52;
-#endif
-       } cn56xx;
-       struct cvmx_ciu_int_sum1_cn56xx cn56xxp1;
-       struct cvmx_ciu_int_sum1_cn38xx cn58xx;
-       struct cvmx_ciu_int_sum1_cn38xx cn58xxp1;
-       struct cvmx_ciu_int_sum1_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t rst:1;
-               uint64_t reserved_53_62:10;
-               uint64_t lmc0:1;
-               uint64_t reserved_50_51:2;
-               uint64_t pem1:1;
-               uint64_t pem0:1;
-               uint64_t ptp:1;
-               uint64_t agl:1;
-               uint64_t reserved_38_45:8;
-               uint64_t agx1:1;
-               uint64_t agx0:1;
-               uint64_t dpi:1;
-               uint64_t sli:1;
-               uint64_t usb:1;
-               uint64_t dfa:1;
-               uint64_t key:1;
-               uint64_t rad:1;
-               uint64_t tim:1;
-               uint64_t zip:1;
-               uint64_t pko:1;
-               uint64_t pip:1;
-               uint64_t ipd:1;
-               uint64_t l2c:1;
-               uint64_t pow:1;
-               uint64_t fpa:1;
-               uint64_t iob:1;
-               uint64_t mio:1;
-               uint64_t nand:1;
-               uint64_t mii1:1;
-               uint64_t reserved_4_17:14;
-               uint64_t wdog:4;
-#else
-               uint64_t wdog:4;
-               uint64_t reserved_4_17:14;
-               uint64_t mii1:1;
-               uint64_t nand:1;
-               uint64_t mio:1;
-               uint64_t iob:1;
-               uint64_t fpa:1;
-               uint64_t pow:1;
-               uint64_t l2c:1;
-               uint64_t ipd:1;
-               uint64_t pip:1;
-               uint64_t pko:1;
-               uint64_t zip:1;
-               uint64_t tim:1;
-               uint64_t rad:1;
-               uint64_t key:1;
-               uint64_t dfa:1;
-               uint64_t usb:1;
-               uint64_t sli:1;
-               uint64_t dpi:1;
-               uint64_t agx0:1;
-               uint64_t agx1:1;
-               uint64_t reserved_38_45:8;
-               uint64_t agl:1;
-               uint64_t ptp:1;
-               uint64_t pem0:1;
-               uint64_t pem1:1;
-               uint64_t reserved_50_51:2;
-               uint64_t lmc0:1;
-               uint64_t reserved_53_62:10;
-               uint64_t rst:1;
-#endif
-       } cn61xx;
-       struct cvmx_ciu_int_sum1_cn63xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t rst:1;
-               uint64_t reserved_57_62:6;
-               uint64_t dfm:1;
-               uint64_t reserved_53_55:3;
-               uint64_t lmc0:1;
-               uint64_t srio1:1;
-               uint64_t srio0:1;
-               uint64_t pem1:1;
-               uint64_t pem0:1;
-               uint64_t ptp:1;
-               uint64_t agl:1;
-               uint64_t reserved_37_45:9;
-               uint64_t agx0:1;
-               uint64_t dpi:1;
-               uint64_t sli:1;
-               uint64_t usb:1;
-               uint64_t dfa:1;
-               uint64_t key:1;
-               uint64_t rad:1;
-               uint64_t tim:1;
-               uint64_t zip:1;
-               uint64_t pko:1;
-               uint64_t pip:1;
-               uint64_t ipd:1;
-               uint64_t l2c:1;
-               uint64_t pow:1;
-               uint64_t fpa:1;
-               uint64_t iob:1;
-               uint64_t mio:1;
-               uint64_t nand:1;
-               uint64_t mii1:1;
-               uint64_t reserved_6_17:12;
-               uint64_t wdog:6;
-#else
-               uint64_t wdog:6;
-               uint64_t reserved_6_17:12;
-               uint64_t mii1:1;
-               uint64_t nand:1;
-               uint64_t mio:1;
-               uint64_t iob:1;
-               uint64_t fpa:1;
-               uint64_t pow:1;
-               uint64_t l2c:1;
-               uint64_t ipd:1;
-               uint64_t pip:1;
-               uint64_t pko:1;
-               uint64_t zip:1;
-               uint64_t tim:1;
-               uint64_t rad:1;
-               uint64_t key:1;
-               uint64_t dfa:1;
-               uint64_t usb:1;
-               uint64_t sli:1;
-               uint64_t dpi:1;
-               uint64_t agx0:1;
-               uint64_t reserved_37_45:9;
-               uint64_t agl:1;
-               uint64_t ptp:1;
-               uint64_t pem0:1;
-               uint64_t pem1:1;
-               uint64_t srio0:1;
-               uint64_t srio1:1;
-               uint64_t lmc0:1;
-               uint64_t reserved_53_55:3;
-               uint64_t dfm:1;
-               uint64_t reserved_57_62:6;
-               uint64_t rst:1;
-#endif
-       } cn63xx;
-       struct cvmx_ciu_int_sum1_cn63xx cn63xxp1;
-       struct cvmx_ciu_int_sum1_cn66xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t rst:1;
-               uint64_t reserved_62_62:1;
-               uint64_t srio3:1;
-               uint64_t srio2:1;
-               uint64_t reserved_57_59:3;
-               uint64_t dfm:1;
-               uint64_t reserved_53_55:3;
-               uint64_t lmc0:1;
-               uint64_t reserved_51_51:1;
-               uint64_t srio0:1;
-               uint64_t pem1:1;
-               uint64_t pem0:1;
-               uint64_t ptp:1;
-               uint64_t agl:1;
-               uint64_t reserved_38_45:8;
-               uint64_t agx1:1;
-               uint64_t agx0:1;
-               uint64_t dpi:1;
-               uint64_t sli:1;
-               uint64_t usb:1;
-               uint64_t dfa:1;
-               uint64_t key:1;
-               uint64_t rad:1;
-               uint64_t tim:1;
-               uint64_t zip:1;
-               uint64_t pko:1;
-               uint64_t pip:1;
-               uint64_t ipd:1;
-               uint64_t l2c:1;
-               uint64_t pow:1;
-               uint64_t fpa:1;
-               uint64_t iob:1;
-               uint64_t mio:1;
-               uint64_t nand:1;
-               uint64_t mii1:1;
-               uint64_t reserved_10_17:8;
-               uint64_t wdog:10;
-#else
-               uint64_t wdog:10;
-               uint64_t reserved_10_17:8;
-               uint64_t mii1:1;
-               uint64_t nand:1;
-               uint64_t mio:1;
-               uint64_t iob:1;
-               uint64_t fpa:1;
-               uint64_t pow:1;
-               uint64_t l2c:1;
-               uint64_t ipd:1;
-               uint64_t pip:1;
-               uint64_t pko:1;
-               uint64_t zip:1;
-               uint64_t tim:1;
-               uint64_t rad:1;
-               uint64_t key:1;
-               uint64_t dfa:1;
-               uint64_t usb:1;
-               uint64_t sli:1;
-               uint64_t dpi:1;
-               uint64_t agx0:1;
-               uint64_t agx1:1;
-               uint64_t reserved_38_45:8;
-               uint64_t agl:1;
-               uint64_t ptp:1;
-               uint64_t pem0:1;
-               uint64_t pem1:1;
-               uint64_t srio0:1;
-               uint64_t reserved_51_51:1;
-               uint64_t lmc0:1;
-               uint64_t reserved_53_55:3;
-               uint64_t dfm:1;
-               uint64_t reserved_57_59:3;
-               uint64_t srio2:1;
-               uint64_t srio3:1;
-               uint64_t reserved_62_62:1;
-               uint64_t rst:1;
-#endif
-       } cn66xx;
-       struct cvmx_ciu_int_sum1_cnf71xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t rst:1;
-               uint64_t reserved_53_62:10;
-               uint64_t lmc0:1;
-               uint64_t reserved_50_51:2;
-               uint64_t pem1:1;
-               uint64_t pem0:1;
-               uint64_t ptp:1;
-               uint64_t reserved_37_46:10;
-               uint64_t agx0:1;
-               uint64_t dpi:1;
-               uint64_t sli:1;
-               uint64_t usb:1;
-               uint64_t reserved_32_32:1;
-               uint64_t key:1;
-               uint64_t rad:1;
-               uint64_t tim:1;
-               uint64_t reserved_28_28:1;
-               uint64_t pko:1;
-               uint64_t pip:1;
-               uint64_t ipd:1;
-               uint64_t l2c:1;
-               uint64_t pow:1;
-               uint64_t fpa:1;
-               uint64_t iob:1;
-               uint64_t mio:1;
-               uint64_t nand:1;
-               uint64_t reserved_4_18:15;
-               uint64_t wdog:4;
-#else
-               uint64_t wdog:4;
-               uint64_t reserved_4_18:15;
-               uint64_t nand:1;
-               uint64_t mio:1;
-               uint64_t iob:1;
-               uint64_t fpa:1;
-               uint64_t pow:1;
-               uint64_t l2c:1;
-               uint64_t ipd:1;
-               uint64_t pip:1;
-               uint64_t pko:1;
-               uint64_t reserved_28_28:1;
-               uint64_t tim:1;
-               uint64_t rad:1;
-               uint64_t key:1;
-               uint64_t reserved_32_32:1;
-               uint64_t usb:1;
-               uint64_t sli:1;
-               uint64_t dpi:1;
-               uint64_t agx0:1;
-               uint64_t reserved_37_46:10;
-               uint64_t ptp:1;
-               uint64_t pem0:1;
-               uint64_t pem1:1;
-               uint64_t reserved_50_51:2;
-               uint64_t lmc0:1;
-               uint64_t reserved_53_62:10;
-               uint64_t rst:1;
-#endif
-       } cnf71xx;
-};
-
-union cvmx_ciu_mbox_clrx {
-       uint64_t u64;
-       struct cvmx_ciu_mbox_clrx_s {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_32_63:32;
-               uint64_t bits:32;
-#else
-               uint64_t bits:32;
-               uint64_t reserved_32_63:32;
-#endif
-       } s;
-       struct cvmx_ciu_mbox_clrx_s cn30xx;
-       struct cvmx_ciu_mbox_clrx_s cn31xx;
-       struct cvmx_ciu_mbox_clrx_s cn38xx;
-       struct cvmx_ciu_mbox_clrx_s cn38xxp2;
-       struct cvmx_ciu_mbox_clrx_s cn50xx;
-       struct cvmx_ciu_mbox_clrx_s cn52xx;
-       struct cvmx_ciu_mbox_clrx_s cn52xxp1;
-       struct cvmx_ciu_mbox_clrx_s cn56xx;
-       struct cvmx_ciu_mbox_clrx_s cn56xxp1;
-       struct cvmx_ciu_mbox_clrx_s cn58xx;
-       struct cvmx_ciu_mbox_clrx_s cn58xxp1;
-       struct cvmx_ciu_mbox_clrx_s cn61xx;
-       struct cvmx_ciu_mbox_clrx_s cn63xx;
-       struct cvmx_ciu_mbox_clrx_s cn63xxp1;
-       struct cvmx_ciu_mbox_clrx_s cn66xx;
-       struct cvmx_ciu_mbox_clrx_s cn68xx;
-       struct cvmx_ciu_mbox_clrx_s cn68xxp1;
-       struct cvmx_ciu_mbox_clrx_s cnf71xx;
-};
-
-union cvmx_ciu_mbox_setx {
-       uint64_t u64;
-       struct cvmx_ciu_mbox_setx_s {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_32_63:32;
-               uint64_t bits:32;
-#else
-               uint64_t bits:32;
-               uint64_t reserved_32_63:32;
-#endif
-       } s;
-       struct cvmx_ciu_mbox_setx_s cn30xx;
-       struct cvmx_ciu_mbox_setx_s cn31xx;
-       struct cvmx_ciu_mbox_setx_s cn38xx;
-       struct cvmx_ciu_mbox_setx_s cn38xxp2;
-       struct cvmx_ciu_mbox_setx_s cn50xx;
-       struct cvmx_ciu_mbox_setx_s cn52xx;
-       struct cvmx_ciu_mbox_setx_s cn52xxp1;
-       struct cvmx_ciu_mbox_setx_s cn56xx;
-       struct cvmx_ciu_mbox_setx_s cn56xxp1;
-       struct cvmx_ciu_mbox_setx_s cn58xx;
-       struct cvmx_ciu_mbox_setx_s cn58xxp1;
-       struct cvmx_ciu_mbox_setx_s cn61xx;
-       struct cvmx_ciu_mbox_setx_s cn63xx;
-       struct cvmx_ciu_mbox_setx_s cn63xxp1;
-       struct cvmx_ciu_mbox_setx_s cn66xx;
-       struct cvmx_ciu_mbox_setx_s cn68xx;
-       struct cvmx_ciu_mbox_setx_s cn68xxp1;
-       struct cvmx_ciu_mbox_setx_s cnf71xx;
-};
-
-union cvmx_ciu_nmi {
-       uint64_t u64;
-       struct cvmx_ciu_nmi_s {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_32_63:32;
-               uint64_t nmi:32;
-#else
-               uint64_t nmi:32;
-               uint64_t reserved_32_63:32;
-#endif
-       } s;
-       struct cvmx_ciu_nmi_cn30xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_1_63:63;
-               uint64_t nmi:1;
-#else
-               uint64_t nmi:1;
-               uint64_t reserved_1_63:63;
-#endif
-       } cn30xx;
-       struct cvmx_ciu_nmi_cn31xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_2_63:62;
-               uint64_t nmi:2;
-#else
-               uint64_t nmi:2;
-               uint64_t reserved_2_63:62;
-#endif
-       } cn31xx;
-       struct cvmx_ciu_nmi_cn38xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_16_63:48;
-               uint64_t nmi:16;
-#else
-               uint64_t nmi:16;
-               uint64_t reserved_16_63:48;
-#endif
-       } cn38xx;
-       struct cvmx_ciu_nmi_cn38xx cn38xxp2;
-       struct cvmx_ciu_nmi_cn31xx cn50xx;
-       struct cvmx_ciu_nmi_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_4_63:60;
-               uint64_t nmi:4;
-#else
-               uint64_t nmi:4;
-               uint64_t reserved_4_63:60;
-#endif
-       } cn52xx;
-       struct cvmx_ciu_nmi_cn52xx cn52xxp1;
-       struct cvmx_ciu_nmi_cn56xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_12_63:52;
-               uint64_t nmi:12;
-#else
-               uint64_t nmi:12;
-               uint64_t reserved_12_63:52;
-#endif
-       } cn56xx;
-       struct cvmx_ciu_nmi_cn56xx cn56xxp1;
-       struct cvmx_ciu_nmi_cn38xx cn58xx;
-       struct cvmx_ciu_nmi_cn38xx cn58xxp1;
-       struct cvmx_ciu_nmi_cn52xx cn61xx;
-       struct cvmx_ciu_nmi_cn63xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_6_63:58;
-               uint64_t nmi:6;
-#else
-               uint64_t nmi:6;
-               uint64_t reserved_6_63:58;
-#endif
-       } cn63xx;
-       struct cvmx_ciu_nmi_cn63xx cn63xxp1;
-       struct cvmx_ciu_nmi_cn66xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_10_63:54;
-               uint64_t nmi:10;
-#else
-               uint64_t nmi:10;
-               uint64_t reserved_10_63:54;
-#endif
-       } cn66xx;
-       struct cvmx_ciu_nmi_s cn68xx;
-       struct cvmx_ciu_nmi_s cn68xxp1;
-       struct cvmx_ciu_nmi_cn52xx cnf71xx;
-};
-
-union cvmx_ciu_pci_inta {
-       uint64_t u64;
-       struct cvmx_ciu_pci_inta_s {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_2_63:62;
-               uint64_t intr:2;
-#else
-               uint64_t intr:2;
-               uint64_t reserved_2_63:62;
-#endif
-       } s;
-       struct cvmx_ciu_pci_inta_s cn30xx;
-       struct cvmx_ciu_pci_inta_s cn31xx;
-       struct cvmx_ciu_pci_inta_s cn38xx;
-       struct cvmx_ciu_pci_inta_s cn38xxp2;
-       struct cvmx_ciu_pci_inta_s cn50xx;
-       struct cvmx_ciu_pci_inta_s cn52xx;
-       struct cvmx_ciu_pci_inta_s cn52xxp1;
-       struct cvmx_ciu_pci_inta_s cn56xx;
-       struct cvmx_ciu_pci_inta_s cn56xxp1;
-       struct cvmx_ciu_pci_inta_s cn58xx;
-       struct cvmx_ciu_pci_inta_s cn58xxp1;
-       struct cvmx_ciu_pci_inta_s cn61xx;
-       struct cvmx_ciu_pci_inta_s cn63xx;
-       struct cvmx_ciu_pci_inta_s cn63xxp1;
-       struct cvmx_ciu_pci_inta_s cn66xx;
-       struct cvmx_ciu_pci_inta_s cn68xx;
-       struct cvmx_ciu_pci_inta_s cn68xxp1;
-       struct cvmx_ciu_pci_inta_s cnf71xx;
-};
-
-union cvmx_ciu_pp_bist_stat {
-       uint64_t u64;
-       struct cvmx_ciu_pp_bist_stat_s {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_32_63:32;
-               uint64_t pp_bist:32;
-#else
-               uint64_t pp_bist:32;
-               uint64_t reserved_32_63:32;
-#endif
-       } s;
-       struct cvmx_ciu_pp_bist_stat_s cn68xx;
-       struct cvmx_ciu_pp_bist_stat_s cn68xxp1;
-};
-
-union cvmx_ciu_pp_dbg {
-       uint64_t u64;
-       struct cvmx_ciu_pp_dbg_s {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_32_63:32;
-               uint64_t ppdbg:32;
-#else
-               uint64_t ppdbg:32;
-               uint64_t reserved_32_63:32;
-#endif
-       } s;
-       struct cvmx_ciu_pp_dbg_cn30xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_1_63:63;
-               uint64_t ppdbg:1;
-#else
-               uint64_t ppdbg:1;
-               uint64_t reserved_1_63:63;
-#endif
-       } cn30xx;
-       struct cvmx_ciu_pp_dbg_cn31xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_2_63:62;
-               uint64_t ppdbg:2;
-#else
-               uint64_t ppdbg:2;
-               uint64_t reserved_2_63:62;
-#endif
-       } cn31xx;
-       struct cvmx_ciu_pp_dbg_cn38xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_16_63:48;
-               uint64_t ppdbg:16;
-#else
-               uint64_t ppdbg:16;
-               uint64_t reserved_16_63:48;
-#endif
-       } cn38xx;
-       struct cvmx_ciu_pp_dbg_cn38xx cn38xxp2;
-       struct cvmx_ciu_pp_dbg_cn31xx cn50xx;
-       struct cvmx_ciu_pp_dbg_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_4_63:60;
-               uint64_t ppdbg:4;
-#else
-               uint64_t ppdbg:4;
-               uint64_t reserved_4_63:60;
-#endif
-       } cn52xx;
-       struct cvmx_ciu_pp_dbg_cn52xx cn52xxp1;
-       struct cvmx_ciu_pp_dbg_cn56xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_12_63:52;
-               uint64_t ppdbg:12;
-#else
-               uint64_t ppdbg:12;
-               uint64_t reserved_12_63:52;
-#endif
-       } cn56xx;
-       struct cvmx_ciu_pp_dbg_cn56xx cn56xxp1;
-       struct cvmx_ciu_pp_dbg_cn38xx cn58xx;
-       struct cvmx_ciu_pp_dbg_cn38xx cn58xxp1;
-       struct cvmx_ciu_pp_dbg_cn52xx cn61xx;
-       struct cvmx_ciu_pp_dbg_cn63xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_6_63:58;
-               uint64_t ppdbg:6;
-#else
-               uint64_t ppdbg:6;
-               uint64_t reserved_6_63:58;
-#endif
-       } cn63xx;
-       struct cvmx_ciu_pp_dbg_cn63xx cn63xxp1;
-       struct cvmx_ciu_pp_dbg_cn66xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_10_63:54;
-               uint64_t ppdbg:10;
-#else
-               uint64_t ppdbg:10;
-               uint64_t reserved_10_63:54;
-#endif
-       } cn66xx;
-       struct cvmx_ciu_pp_dbg_s cn68xx;
-       struct cvmx_ciu_pp_dbg_s cn68xxp1;
-       struct cvmx_ciu_pp_dbg_cn52xx cnf71xx;
-};
-
-union cvmx_ciu_pp_pokex {
-       uint64_t u64;
-       struct cvmx_ciu_pp_pokex_s {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t poke:64;
-#else
-               uint64_t poke:64;
-#endif
-       } s;
-       struct cvmx_ciu_pp_pokex_s cn30xx;
-       struct cvmx_ciu_pp_pokex_s cn31xx;
-       struct cvmx_ciu_pp_pokex_s cn38xx;
-       struct cvmx_ciu_pp_pokex_s cn38xxp2;
-       struct cvmx_ciu_pp_pokex_s cn50xx;
-       struct cvmx_ciu_pp_pokex_s cn52xx;
-       struct cvmx_ciu_pp_pokex_s cn52xxp1;
-       struct cvmx_ciu_pp_pokex_s cn56xx;
-       struct cvmx_ciu_pp_pokex_s cn56xxp1;
-       struct cvmx_ciu_pp_pokex_s cn58xx;
-       struct cvmx_ciu_pp_pokex_s cn58xxp1;
-       struct cvmx_ciu_pp_pokex_s cn61xx;
-       struct cvmx_ciu_pp_pokex_s cn63xx;
-       struct cvmx_ciu_pp_pokex_s cn63xxp1;
-       struct cvmx_ciu_pp_pokex_s cn66xx;
-       struct cvmx_ciu_pp_pokex_s cn68xx;
-       struct cvmx_ciu_pp_pokex_s cn68xxp1;
-       struct cvmx_ciu_pp_pokex_s cnf71xx;
-};
-
-union cvmx_ciu_pp_rst {
-       uint64_t u64;
-       struct cvmx_ciu_pp_rst_s {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_32_63:32;
-               uint64_t rst:31;
-               uint64_t rst0:1;
-#else
-               uint64_t rst0:1;
-               uint64_t rst:31;
-               uint64_t reserved_32_63:32;
-#endif
-       } s;
-       struct cvmx_ciu_pp_rst_cn30xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_1_63:63;
-               uint64_t rst0:1;
-#else
-               uint64_t rst0:1;
-               uint64_t reserved_1_63:63;
-#endif
-       } cn30xx;
-       struct cvmx_ciu_pp_rst_cn31xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_2_63:62;
-               uint64_t rst:1;
-               uint64_t rst0:1;
-#else
-               uint64_t rst0:1;
-               uint64_t rst:1;
-               uint64_t reserved_2_63:62;
-#endif
-       } cn31xx;
-       struct cvmx_ciu_pp_rst_cn38xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_16_63:48;
-               uint64_t rst:15;
-               uint64_t rst0:1;
-#else
-               uint64_t rst0:1;
-               uint64_t rst:15;
-               uint64_t reserved_16_63:48;
-#endif
-       } cn38xx;
-       struct cvmx_ciu_pp_rst_cn38xx cn38xxp2;
-       struct cvmx_ciu_pp_rst_cn31xx cn50xx;
-       struct cvmx_ciu_pp_rst_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_4_63:60;
-               uint64_t rst:3;
-               uint64_t rst0:1;
-#else
-               uint64_t rst0:1;
-               uint64_t rst:3;
-               uint64_t reserved_4_63:60;
-#endif
-       } cn52xx;
-       struct cvmx_ciu_pp_rst_cn52xx cn52xxp1;
-       struct cvmx_ciu_pp_rst_cn56xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_12_63:52;
-               uint64_t rst:11;
-               uint64_t rst0:1;
-#else
-               uint64_t rst0:1;
-               uint64_t rst:11;
-               uint64_t reserved_12_63:52;
-#endif
-       } cn56xx;
-       struct cvmx_ciu_pp_rst_cn56xx cn56xxp1;
-       struct cvmx_ciu_pp_rst_cn38xx cn58xx;
-       struct cvmx_ciu_pp_rst_cn38xx cn58xxp1;
-       struct cvmx_ciu_pp_rst_cn52xx cn61xx;
-       struct cvmx_ciu_pp_rst_cn63xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_6_63:58;
-               uint64_t rst:5;
-               uint64_t rst0:1;
-#else
-               uint64_t rst0:1;
-               uint64_t rst:5;
-               uint64_t reserved_6_63:58;
-#endif
-       } cn63xx;
-       struct cvmx_ciu_pp_rst_cn63xx cn63xxp1;
-       struct cvmx_ciu_pp_rst_cn66xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_10_63:54;
-               uint64_t rst:9;
-               uint64_t rst0:1;
-#else
-               uint64_t rst0:1;
-               uint64_t rst:9;
-               uint64_t reserved_10_63:54;
-#endif
-       } cn66xx;
-       struct cvmx_ciu_pp_rst_s cn68xx;
-       struct cvmx_ciu_pp_rst_s cn68xxp1;
-       struct cvmx_ciu_pp_rst_cn52xx cnf71xx;
-};
-
-union cvmx_ciu_qlm0 {
-       uint64_t u64;
-       struct cvmx_ciu_qlm0_s {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t g2bypass:1;
-               uint64_t reserved_53_62:10;
-               uint64_t g2deemph:5;
-               uint64_t reserved_45_47:3;
-               uint64_t g2margin:5;
-               uint64_t reserved_32_39:8;
-               uint64_t txbypass:1;
-               uint64_t reserved_21_30:10;
-               uint64_t txdeemph:5;
-               uint64_t reserved_13_15:3;
-               uint64_t txmargin:5;
-               uint64_t reserved_4_7:4;
-               uint64_t lane_en:4;
-#else
-               uint64_t lane_en:4;
-               uint64_t reserved_4_7:4;
-               uint64_t txmargin:5;
-               uint64_t reserved_13_15:3;
-               uint64_t txdeemph:5;
-               uint64_t reserved_21_30:10;
-               uint64_t txbypass:1;
-               uint64_t reserved_32_39:8;
-               uint64_t g2margin:5;
-               uint64_t reserved_45_47:3;
-               uint64_t g2deemph:5;
-               uint64_t reserved_53_62:10;
-               uint64_t g2bypass:1;
-#endif
-       } s;
-       struct cvmx_ciu_qlm0_s cn61xx;
-       struct cvmx_ciu_qlm0_s cn63xx;
-       struct cvmx_ciu_qlm0_cn63xxp1 {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_32_63:32;
-               uint64_t txbypass:1;
-               uint64_t reserved_20_30:11;
-               uint64_t txdeemph:4;
-               uint64_t reserved_13_15:3;
-               uint64_t txmargin:5;
-               uint64_t reserved_4_7:4;
-               uint64_t lane_en:4;
-#else
-               uint64_t lane_en:4;
-               uint64_t reserved_4_7:4;
-               uint64_t txmargin:5;
-               uint64_t reserved_13_15:3;
-               uint64_t txdeemph:4;
-               uint64_t reserved_20_30:11;
-               uint64_t txbypass:1;
-               uint64_t reserved_32_63:32;
-#endif
-       } cn63xxp1;
-       struct cvmx_ciu_qlm0_s cn66xx;
-       struct cvmx_ciu_qlm0_cn68xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_32_63:32;
-               uint64_t txbypass:1;
-               uint64_t reserved_21_30:10;
-               uint64_t txdeemph:5;
-               uint64_t reserved_13_15:3;
-               uint64_t txmargin:5;
-               uint64_t reserved_4_7:4;
-               uint64_t lane_en:4;
-#else
-               uint64_t lane_en:4;
-               uint64_t reserved_4_7:4;
-               uint64_t txmargin:5;
-               uint64_t reserved_13_15:3;
-               uint64_t txdeemph:5;
-               uint64_t reserved_21_30:10;
-               uint64_t txbypass:1;
-               uint64_t reserved_32_63:32;
-#endif
-       } cn68xx;
-       struct cvmx_ciu_qlm0_cn68xx cn68xxp1;
-       struct cvmx_ciu_qlm0_s cnf71xx;
-};
-
-union cvmx_ciu_qlm1 {
-       uint64_t u64;
-       struct cvmx_ciu_qlm1_s {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t g2bypass:1;
-               uint64_t reserved_53_62:10;
-               uint64_t g2deemph:5;
-               uint64_t reserved_45_47:3;
-               uint64_t g2margin:5;
-               uint64_t reserved_32_39:8;
-               uint64_t txbypass:1;
-               uint64_t reserved_21_30:10;
-               uint64_t txdeemph:5;
-               uint64_t reserved_13_15:3;
-               uint64_t txmargin:5;
-               uint64_t reserved_4_7:4;
-               uint64_t lane_en:4;
-#else
-               uint64_t lane_en:4;
-               uint64_t reserved_4_7:4;
-               uint64_t txmargin:5;
-               uint64_t reserved_13_15:3;
-               uint64_t txdeemph:5;
-               uint64_t reserved_21_30:10;
-               uint64_t txbypass:1;
-               uint64_t reserved_32_39:8;
-               uint64_t g2margin:5;
-               uint64_t reserved_45_47:3;
-               uint64_t g2deemph:5;
-               uint64_t reserved_53_62:10;
-               uint64_t g2bypass:1;
-#endif
-       } s;
-       struct cvmx_ciu_qlm1_s cn61xx;
-       struct cvmx_ciu_qlm1_s cn63xx;
-       struct cvmx_ciu_qlm1_cn63xxp1 {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_32_63:32;
-               uint64_t txbypass:1;
-               uint64_t reserved_20_30:11;
-               uint64_t txdeemph:4;
-               uint64_t reserved_13_15:3;
-               uint64_t txmargin:5;
-               uint64_t reserved_4_7:4;
-               uint64_t lane_en:4;
-#else
-               uint64_t lane_en:4;
-               uint64_t reserved_4_7:4;
-               uint64_t txmargin:5;
-               uint64_t reserved_13_15:3;
-               uint64_t txdeemph:4;
-               uint64_t reserved_20_30:11;
-               uint64_t txbypass:1;
-               uint64_t reserved_32_63:32;
-#endif
-       } cn63xxp1;
-       struct cvmx_ciu_qlm1_s cn66xx;
-       struct cvmx_ciu_qlm1_s cn68xx;
-       struct cvmx_ciu_qlm1_s cn68xxp1;
-       struct cvmx_ciu_qlm1_s cnf71xx;
-};
-
-union cvmx_ciu_qlm2 {
-       uint64_t u64;
-       struct cvmx_ciu_qlm2_s {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t g2bypass:1;
-               uint64_t reserved_53_62:10;
-               uint64_t g2deemph:5;
-               uint64_t reserved_45_47:3;
-               uint64_t g2margin:5;
-               uint64_t reserved_32_39:8;
-               uint64_t txbypass:1;
-               uint64_t reserved_21_30:10;
-               uint64_t txdeemph:5;
-               uint64_t reserved_13_15:3;
-               uint64_t txmargin:5;
-               uint64_t reserved_4_7:4;
-               uint64_t lane_en:4;
-#else
-               uint64_t lane_en:4;
-               uint64_t reserved_4_7:4;
-               uint64_t txmargin:5;
-               uint64_t reserved_13_15:3;
-               uint64_t txdeemph:5;
-               uint64_t reserved_21_30:10;
-               uint64_t txbypass:1;
-               uint64_t reserved_32_39:8;
-               uint64_t g2margin:5;
-               uint64_t reserved_45_47:3;
-               uint64_t g2deemph:5;
-               uint64_t reserved_53_62:10;
-               uint64_t g2bypass:1;
-#endif
-       } s;
-       struct cvmx_ciu_qlm2_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_32_63:32;
-               uint64_t txbypass:1;
-               uint64_t reserved_21_30:10;
-               uint64_t txdeemph:5;
-               uint64_t reserved_13_15:3;
-               uint64_t txmargin:5;
-               uint64_t reserved_4_7:4;
-               uint64_t lane_en:4;
-#else
-               uint64_t lane_en:4;
-               uint64_t reserved_4_7:4;
-               uint64_t txmargin:5;
-               uint64_t reserved_13_15:3;
-               uint64_t txdeemph:5;
-               uint64_t reserved_21_30:10;
-               uint64_t txbypass:1;
-               uint64_t reserved_32_63:32;
-#endif
-       } cn61xx;
-       struct cvmx_ciu_qlm2_cn61xx cn63xx;
-       struct cvmx_ciu_qlm2_cn63xxp1 {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_32_63:32;
-               uint64_t txbypass:1;
-               uint64_t reserved_20_30:11;
-               uint64_t txdeemph:4;
-               uint64_t reserved_13_15:3;
-               uint64_t txmargin:5;
-               uint64_t reserved_4_7:4;
-               uint64_t lane_en:4;
-#else
-               uint64_t lane_en:4;
-               uint64_t reserved_4_7:4;
-               uint64_t txmargin:5;
-               uint64_t reserved_13_15:3;
-               uint64_t txdeemph:4;
-               uint64_t reserved_20_30:11;
-               uint64_t txbypass:1;
-               uint64_t reserved_32_63:32;
-#endif
-       } cn63xxp1;
-       struct cvmx_ciu_qlm2_cn61xx cn66xx;
-       struct cvmx_ciu_qlm2_s cn68xx;
-       struct cvmx_ciu_qlm2_s cn68xxp1;
-       struct cvmx_ciu_qlm2_cn61xx cnf71xx;
-};
 
-union cvmx_ciu_qlm3 {
+union cvmx_ciu_qlm {
        uint64_t u64;
-       struct cvmx_ciu_qlm3_s {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t g2bypass:1;
-               uint64_t reserved_53_62:10;
-               uint64_t g2deemph:5;
-               uint64_t reserved_45_47:3;
-               uint64_t g2margin:5;
-               uint64_t reserved_32_39:8;
-               uint64_t txbypass:1;
-               uint64_t reserved_21_30:10;
-               uint64_t txdeemph:5;
-               uint64_t reserved_13_15:3;
-               uint64_t txmargin:5;
-               uint64_t reserved_4_7:4;
-               uint64_t lane_en:4;
-#else
-               uint64_t lane_en:4;
-               uint64_t reserved_4_7:4;
-               uint64_t txmargin:5;
-               uint64_t reserved_13_15:3;
-               uint64_t txdeemph:5;
-               uint64_t reserved_21_30:10;
-               uint64_t txbypass:1;
-               uint64_t reserved_32_39:8;
-               uint64_t g2margin:5;
-               uint64_t reserved_45_47:3;
-               uint64_t g2deemph:5;
-               uint64_t reserved_53_62:10;
-               uint64_t g2bypass:1;
-#endif
+       struct cvmx_ciu_qlm_s {
+               __BITFIELD_FIELD(uint64_t g2bypass:1,
+               __BITFIELD_FIELD(uint64_t reserved_53_62:10,
+               __BITFIELD_FIELD(uint64_t g2deemph:5,
+               __BITFIELD_FIELD(uint64_t reserved_45_47:3,
+               __BITFIELD_FIELD(uint64_t g2margin:5,
+               __BITFIELD_FIELD(uint64_t reserved_32_39:8,
+               __BITFIELD_FIELD(uint64_t txbypass:1,
+               __BITFIELD_FIELD(uint64_t reserved_21_30:10,
+               __BITFIELD_FIELD(uint64_t txdeemph:5,
+               __BITFIELD_FIELD(uint64_t reserved_13_15:3,
+               __BITFIELD_FIELD(uint64_t txmargin:5,
+               __BITFIELD_FIELD(uint64_t reserved_4_7:4,
+               __BITFIELD_FIELD(uint64_t lane_en:4,
+               ;)))))))))))))
        } s;
-       struct cvmx_ciu_qlm3_s cn68xx;
-       struct cvmx_ciu_qlm3_s cn68xxp1;
-};
-
-union cvmx_ciu_qlm4 {
-       uint64_t u64;
-       struct cvmx_ciu_qlm4_s {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t g2bypass:1;
-               uint64_t reserved_53_62:10;
-               uint64_t g2deemph:5;
-               uint64_t reserved_45_47:3;
-               uint64_t g2margin:5;
-               uint64_t reserved_32_39:8;
-               uint64_t txbypass:1;
-               uint64_t reserved_21_30:10;
-               uint64_t txdeemph:5;
-               uint64_t reserved_13_15:3;
-               uint64_t txmargin:5;
-               uint64_t reserved_4_7:4;
-               uint64_t lane_en:4;
-#else
-               uint64_t lane_en:4;
-               uint64_t reserved_4_7:4;
-               uint64_t txmargin:5;
-               uint64_t reserved_13_15:3;
-               uint64_t txdeemph:5;
-               uint64_t reserved_21_30:10;
-               uint64_t txbypass:1;
-               uint64_t reserved_32_39:8;
-               uint64_t g2margin:5;
-               uint64_t reserved_45_47:3;
-               uint64_t g2deemph:5;
-               uint64_t reserved_53_62:10;
-               uint64_t g2bypass:1;
-#endif
-       } s;
-       struct cvmx_ciu_qlm4_s cn68xx;
-       struct cvmx_ciu_qlm4_s cn68xxp1;
-};
-
-union cvmx_ciu_qlm_dcok {
-       uint64_t u64;
-       struct cvmx_ciu_qlm_dcok_s {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_4_63:60;
-               uint64_t qlm_dcok:4;
-#else
-               uint64_t qlm_dcok:4;
-               uint64_t reserved_4_63:60;
-#endif
-       } s;
-       struct cvmx_ciu_qlm_dcok_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_2_63:62;
-               uint64_t qlm_dcok:2;
-#else
-               uint64_t qlm_dcok:2;
-               uint64_t reserved_2_63:62;
-#endif
-       } cn52xx;
-       struct cvmx_ciu_qlm_dcok_cn52xx cn52xxp1;
-       struct cvmx_ciu_qlm_dcok_s cn56xx;
-       struct cvmx_ciu_qlm_dcok_s cn56xxp1;
 };
 
 union cvmx_ciu_qlm_jtgc {
        uint64_t u64;
        struct cvmx_ciu_qlm_jtgc_s {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_17_63:47;
-               uint64_t bypass_ext:1;
-               uint64_t reserved_11_15:5;
-               uint64_t clk_div:3;
-               uint64_t reserved_7_7:1;
-               uint64_t mux_sel:3;
-               uint64_t bypass:4;
-#else
-               uint64_t bypass:4;
-               uint64_t mux_sel:3;
-               uint64_t reserved_7_7:1;
-               uint64_t clk_div:3;
-               uint64_t reserved_11_15:5;
-               uint64_t bypass_ext:1;
-               uint64_t reserved_17_63:47;
-#endif
+               __BITFIELD_FIELD(uint64_t reserved_17_63:47,
+               __BITFIELD_FIELD(uint64_t bypass_ext:1,
+               __BITFIELD_FIELD(uint64_t reserved_11_15:5,
+               __BITFIELD_FIELD(uint64_t clk_div:3,
+               __BITFIELD_FIELD(uint64_t reserved_7_7:1,
+               __BITFIELD_FIELD(uint64_t mux_sel:3,
+               __BITFIELD_FIELD(uint64_t bypass:4,
+               ;)))))))
        } s;
-       struct cvmx_ciu_qlm_jtgc_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_11_63:53;
-               uint64_t clk_div:3;
-               uint64_t reserved_5_7:3;
-               uint64_t mux_sel:1;
-               uint64_t reserved_2_3:2;
-               uint64_t bypass:2;
-#else
-               uint64_t bypass:2;
-               uint64_t reserved_2_3:2;
-               uint64_t mux_sel:1;
-               uint64_t reserved_5_7:3;
-               uint64_t clk_div:3;
-               uint64_t reserved_11_63:53;
-#endif
-       } cn52xx;
-       struct cvmx_ciu_qlm_jtgc_cn52xx cn52xxp1;
-       struct cvmx_ciu_qlm_jtgc_cn56xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_11_63:53;
-               uint64_t clk_div:3;
-               uint64_t reserved_6_7:2;
-               uint64_t mux_sel:2;
-               uint64_t bypass:4;
-#else
-               uint64_t bypass:4;
-               uint64_t mux_sel:2;
-               uint64_t reserved_6_7:2;
-               uint64_t clk_div:3;
-               uint64_t reserved_11_63:53;
-#endif
-       } cn56xx;
-       struct cvmx_ciu_qlm_jtgc_cn56xx cn56xxp1;
-       struct cvmx_ciu_qlm_jtgc_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_11_63:53;
-               uint64_t clk_div:3;
-               uint64_t reserved_6_7:2;
-               uint64_t mux_sel:2;
-               uint64_t reserved_3_3:1;
-               uint64_t bypass:3;
-#else
-               uint64_t bypass:3;
-               uint64_t reserved_3_3:1;
-               uint64_t mux_sel:2;
-               uint64_t reserved_6_7:2;
-               uint64_t clk_div:3;
-               uint64_t reserved_11_63:53;
-#endif
-       } cn61xx;
-       struct cvmx_ciu_qlm_jtgc_cn61xx cn63xx;
-       struct cvmx_ciu_qlm_jtgc_cn61xx cn63xxp1;
-       struct cvmx_ciu_qlm_jtgc_cn61xx cn66xx;
-       struct cvmx_ciu_qlm_jtgc_s cn68xx;
-       struct cvmx_ciu_qlm_jtgc_s cn68xxp1;
-       struct cvmx_ciu_qlm_jtgc_cn61xx cnf71xx;
 };
 
 union cvmx_ciu_qlm_jtgd {
        uint64_t u64;
        struct cvmx_ciu_qlm_jtgd_s {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t capture:1;
-               uint64_t shift:1;
-               uint64_t update:1;
-               uint64_t reserved_45_60:16;
-               uint64_t select:5;
-               uint64_t reserved_37_39:3;
-               uint64_t shft_cnt:5;
-               uint64_t shft_reg:32;
-#else
-               uint64_t shft_reg:32;
-               uint64_t shft_cnt:5;
-               uint64_t reserved_37_39:3;
-               uint64_t select:5;
-               uint64_t reserved_45_60:16;
-               uint64_t update:1;
-               uint64_t shift:1;
-               uint64_t capture:1;
-#endif
-       } s;
-       struct cvmx_ciu_qlm_jtgd_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t capture:1;
-               uint64_t shift:1;
-               uint64_t update:1;
-               uint64_t reserved_42_60:19;
-               uint64_t select:2;
-               uint64_t reserved_37_39:3;
-               uint64_t shft_cnt:5;
-               uint64_t shft_reg:32;
-#else
-               uint64_t shft_reg:32;
-               uint64_t shft_cnt:5;
-               uint64_t reserved_37_39:3;
-               uint64_t select:2;
-               uint64_t reserved_42_60:19;
-               uint64_t update:1;
-               uint64_t shift:1;
-               uint64_t capture:1;
-#endif
-       } cn52xx;
-       struct cvmx_ciu_qlm_jtgd_cn52xx cn52xxp1;
-       struct cvmx_ciu_qlm_jtgd_cn56xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t capture:1;
-               uint64_t shift:1;
-               uint64_t update:1;
-               uint64_t reserved_44_60:17;
-               uint64_t select:4;
-               uint64_t reserved_37_39:3;
-               uint64_t shft_cnt:5;
-               uint64_t shft_reg:32;
-#else
-               uint64_t shft_reg:32;
-               uint64_t shft_cnt:5;
-               uint64_t reserved_37_39:3;
-               uint64_t select:4;
-               uint64_t reserved_44_60:17;
-               uint64_t update:1;
-               uint64_t shift:1;
-               uint64_t capture:1;
-#endif
-       } cn56xx;
-       struct cvmx_ciu_qlm_jtgd_cn56xxp1 {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t capture:1;
-               uint64_t shift:1;
-               uint64_t update:1;
-               uint64_t reserved_37_60:24;
-               uint64_t shft_cnt:5;
-               uint64_t shft_reg:32;
-#else
-               uint64_t shft_reg:32;
-               uint64_t shft_cnt:5;
-               uint64_t reserved_37_60:24;
-               uint64_t update:1;
-               uint64_t shift:1;
-               uint64_t capture:1;
-#endif
-       } cn56xxp1;
-       struct cvmx_ciu_qlm_jtgd_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t capture:1;
-               uint64_t shift:1;
-               uint64_t update:1;
-               uint64_t reserved_43_60:18;
-               uint64_t select:3;
-               uint64_t reserved_37_39:3;
-               uint64_t shft_cnt:5;
-               uint64_t shft_reg:32;
-#else
-               uint64_t shft_reg:32;
-               uint64_t shft_cnt:5;
-               uint64_t reserved_37_39:3;
-               uint64_t select:3;
-               uint64_t reserved_43_60:18;
-               uint64_t update:1;
-               uint64_t shift:1;
-               uint64_t capture:1;
-#endif
-       } cn61xx;
-       struct cvmx_ciu_qlm_jtgd_cn61xx cn63xx;
-       struct cvmx_ciu_qlm_jtgd_cn61xx cn63xxp1;
-       struct cvmx_ciu_qlm_jtgd_cn61xx cn66xx;
-       struct cvmx_ciu_qlm_jtgd_s cn68xx;
-       struct cvmx_ciu_qlm_jtgd_s cn68xxp1;
-       struct cvmx_ciu_qlm_jtgd_cn61xx cnf71xx;
-};
-
-union cvmx_ciu_soft_bist {
-       uint64_t u64;
-       struct cvmx_ciu_soft_bist_s {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_1_63:63;
-               uint64_t soft_bist:1;
-#else
-               uint64_t soft_bist:1;
-               uint64_t reserved_1_63:63;
-#endif
+               __BITFIELD_FIELD(uint64_t capture:1,
+               __BITFIELD_FIELD(uint64_t shift:1,
+               __BITFIELD_FIELD(uint64_t update:1,
+               __BITFIELD_FIELD(uint64_t reserved_45_60:16,
+               __BITFIELD_FIELD(uint64_t select:5,
+               __BITFIELD_FIELD(uint64_t reserved_37_39:3,
+               __BITFIELD_FIELD(uint64_t shft_cnt:5,
+               __BITFIELD_FIELD(uint64_t shft_reg:32,
+               ;))))))))
        } s;
-       struct cvmx_ciu_soft_bist_s cn30xx;
-       struct cvmx_ciu_soft_bist_s cn31xx;
-       struct cvmx_ciu_soft_bist_s cn38xx;
-       struct cvmx_ciu_soft_bist_s cn38xxp2;
-       struct cvmx_ciu_soft_bist_s cn50xx;
-       struct cvmx_ciu_soft_bist_s cn52xx;
-       struct cvmx_ciu_soft_bist_s cn52xxp1;
-       struct cvmx_ciu_soft_bist_s cn56xx;
-       struct cvmx_ciu_soft_bist_s cn56xxp1;
-       struct cvmx_ciu_soft_bist_s cn58xx;
-       struct cvmx_ciu_soft_bist_s cn58xxp1;
-       struct cvmx_ciu_soft_bist_s cn61xx;
-       struct cvmx_ciu_soft_bist_s cn63xx;
-       struct cvmx_ciu_soft_bist_s cn63xxp1;
-       struct cvmx_ciu_soft_bist_s cn66xx;
-       struct cvmx_ciu_soft_bist_s cn68xx;
-       struct cvmx_ciu_soft_bist_s cn68xxp1;
-       struct cvmx_ciu_soft_bist_s cnf71xx;
 };
 
 union cvmx_ciu_soft_prst {
        uint64_t u64;
        struct cvmx_ciu_soft_prst_s {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_3_63:61;
-               uint64_t host64:1;
-               uint64_t npi:1;
-               uint64_t soft_prst:1;
-#else
-               uint64_t soft_prst:1;
-               uint64_t npi:1;
-               uint64_t host64:1;
-               uint64_t reserved_3_63:61;
-#endif
-       } s;
-       struct cvmx_ciu_soft_prst_s cn30xx;
-       struct cvmx_ciu_soft_prst_s cn31xx;
-       struct cvmx_ciu_soft_prst_s cn38xx;
-       struct cvmx_ciu_soft_prst_s cn38xxp2;
-       struct cvmx_ciu_soft_prst_s cn50xx;
-       struct cvmx_ciu_soft_prst_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_1_63:63;
-               uint64_t soft_prst:1;
-#else
-               uint64_t soft_prst:1;
-               uint64_t reserved_1_63:63;
-#endif
-       } cn52xx;
-       struct cvmx_ciu_soft_prst_cn52xx cn52xxp1;
-       struct cvmx_ciu_soft_prst_cn52xx cn56xx;
-       struct cvmx_ciu_soft_prst_cn52xx cn56xxp1;
-       struct cvmx_ciu_soft_prst_s cn58xx;
-       struct cvmx_ciu_soft_prst_s cn58xxp1;
-       struct cvmx_ciu_soft_prst_cn52xx cn61xx;
-       struct cvmx_ciu_soft_prst_cn52xx cn63xx;
-       struct cvmx_ciu_soft_prst_cn52xx cn63xxp1;
-       struct cvmx_ciu_soft_prst_cn52xx cn66xx;
-       struct cvmx_ciu_soft_prst_cn52xx cn68xx;
-       struct cvmx_ciu_soft_prst_cn52xx cn68xxp1;
-       struct cvmx_ciu_soft_prst_cn52xx cnf71xx;
-};
-
-union cvmx_ciu_soft_prst1 {
-       uint64_t u64;
-       struct cvmx_ciu_soft_prst1_s {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_1_63:63;
-               uint64_t soft_prst:1;
-#else
-               uint64_t soft_prst:1;
-               uint64_t reserved_1_63:63;
-#endif
-       } s;
-       struct cvmx_ciu_soft_prst1_s cn52xx;
-       struct cvmx_ciu_soft_prst1_s cn52xxp1;
-       struct cvmx_ciu_soft_prst1_s cn56xx;
-       struct cvmx_ciu_soft_prst1_s cn56xxp1;
-       struct cvmx_ciu_soft_prst1_s cn61xx;
-       struct cvmx_ciu_soft_prst1_s cn63xx;
-       struct cvmx_ciu_soft_prst1_s cn63xxp1;
-       struct cvmx_ciu_soft_prst1_s cn66xx;
-       struct cvmx_ciu_soft_prst1_s cn68xx;
-       struct cvmx_ciu_soft_prst1_s cn68xxp1;
-       struct cvmx_ciu_soft_prst1_s cnf71xx;
-};
-
-union cvmx_ciu_soft_prst2 {
-       uint64_t u64;
-       struct cvmx_ciu_soft_prst2_s {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_1_63:63;
-               uint64_t soft_prst:1;
-#else
-               uint64_t soft_prst:1;
-               uint64_t reserved_1_63:63;
-#endif
-       } s;
-       struct cvmx_ciu_soft_prst2_s cn66xx;
-};
-
-union cvmx_ciu_soft_prst3 {
-       uint64_t u64;
-       struct cvmx_ciu_soft_prst3_s {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_1_63:63;
-               uint64_t soft_prst:1;
-#else
-               uint64_t soft_prst:1;
-               uint64_t reserved_1_63:63;
-#endif
-       } s;
-       struct cvmx_ciu_soft_prst3_s cn66xx;
-};
-
-union cvmx_ciu_soft_rst {
-       uint64_t u64;
-       struct cvmx_ciu_soft_rst_s {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_1_63:63;
-               uint64_t soft_rst:1;
-#else
-               uint64_t soft_rst:1;
-               uint64_t reserved_1_63:63;
-#endif
-       } s;
-       struct cvmx_ciu_soft_rst_s cn30xx;
-       struct cvmx_ciu_soft_rst_s cn31xx;
-       struct cvmx_ciu_soft_rst_s cn38xx;
-       struct cvmx_ciu_soft_rst_s cn38xxp2;
-       struct cvmx_ciu_soft_rst_s cn50xx;
-       struct cvmx_ciu_soft_rst_s cn52xx;
-       struct cvmx_ciu_soft_rst_s cn52xxp1;
-       struct cvmx_ciu_soft_rst_s cn56xx;
-       struct cvmx_ciu_soft_rst_s cn56xxp1;
-       struct cvmx_ciu_soft_rst_s cn58xx;
-       struct cvmx_ciu_soft_rst_s cn58xxp1;
-       struct cvmx_ciu_soft_rst_s cn61xx;
-       struct cvmx_ciu_soft_rst_s cn63xx;
-       struct cvmx_ciu_soft_rst_s cn63xxp1;
-       struct cvmx_ciu_soft_rst_s cn66xx;
-       struct cvmx_ciu_soft_rst_s cn68xx;
-       struct cvmx_ciu_soft_rst_s cn68xxp1;
-       struct cvmx_ciu_soft_rst_s cnf71xx;
-};
-
-union cvmx_ciu_sum1_iox_int {
-       uint64_t u64;
-       struct cvmx_ciu_sum1_iox_int_s {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t rst:1;
-               uint64_t reserved_62_62:1;
-               uint64_t srio3:1;
-               uint64_t srio2:1;
-               uint64_t reserved_57_59:3;
-               uint64_t dfm:1;
-               uint64_t reserved_53_55:3;
-               uint64_t lmc0:1;
-               uint64_t reserved_51_51:1;
-               uint64_t srio0:1;
-               uint64_t pem1:1;
-               uint64_t pem0:1;
-               uint64_t ptp:1;
-               uint64_t agl:1;
-               uint64_t reserved_41_45:5;
-               uint64_t dpi_dma:1;
-               uint64_t reserved_38_39:2;
-               uint64_t agx1:1;
-               uint64_t agx0:1;
-               uint64_t dpi:1;
-               uint64_t sli:1;
-               uint64_t usb:1;
-               uint64_t dfa:1;
-               uint64_t key:1;
-               uint64_t rad:1;
-               uint64_t tim:1;
-               uint64_t zip:1;
-               uint64_t pko:1;
-               uint64_t pip:1;
-               uint64_t ipd:1;
-               uint64_t l2c:1;
-               uint64_t pow:1;
-               uint64_t fpa:1;
-               uint64_t iob:1;
-               uint64_t mio:1;
-               uint64_t nand:1;
-               uint64_t mii1:1;
-               uint64_t reserved_10_17:8;
-               uint64_t wdog:10;
-#else
-               uint64_t wdog:10;
-               uint64_t reserved_10_17:8;
-               uint64_t mii1:1;
-               uint64_t nand:1;
-               uint64_t mio:1;
-               uint64_t iob:1;
-               uint64_t fpa:1;
-               uint64_t pow:1;
-               uint64_t l2c:1;
-               uint64_t ipd:1;
-               uint64_t pip:1;
-               uint64_t pko:1;
-               uint64_t zip:1;
-               uint64_t tim:1;
-               uint64_t rad:1;
-               uint64_t key:1;
-               uint64_t dfa:1;
-               uint64_t usb:1;
-               uint64_t sli:1;
-               uint64_t dpi:1;
-               uint64_t agx0:1;
-               uint64_t agx1:1;
-               uint64_t reserved_38_39:2;
-               uint64_t dpi_dma:1;
-               uint64_t reserved_41_45:5;
-               uint64_t agl:1;
-               uint64_t ptp:1;
-               uint64_t pem0:1;
-               uint64_t pem1:1;
-               uint64_t srio0:1;
-               uint64_t reserved_51_51:1;
-               uint64_t lmc0:1;
-               uint64_t reserved_53_55:3;
-               uint64_t dfm:1;
-               uint64_t reserved_57_59:3;
-               uint64_t srio2:1;
-               uint64_t srio3:1;
-               uint64_t reserved_62_62:1;
-               uint64_t rst:1;
-#endif
+               __BITFIELD_FIELD(uint64_t reserved_3_63:61,
+               __BITFIELD_FIELD(uint64_t host64:1,
+               __BITFIELD_FIELD(uint64_t npi:1,
+               __BITFIELD_FIELD(uint64_t soft_prst:1,
+               ;))))
        } s;
-       struct cvmx_ciu_sum1_iox_int_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t rst:1;
-               uint64_t reserved_53_62:10;
-               uint64_t lmc0:1;
-               uint64_t reserved_50_51:2;
-               uint64_t pem1:1;
-               uint64_t pem0:1;
-               uint64_t ptp:1;
-               uint64_t agl:1;
-               uint64_t reserved_41_45:5;
-               uint64_t dpi_dma:1;
-               uint64_t reserved_38_39:2;
-               uint64_t agx1:1;
-               uint64_t agx0:1;
-               uint64_t dpi:1;
-               uint64_t sli:1;
-               uint64_t usb:1;
-               uint64_t dfa:1;
-               uint64_t key:1;
-               uint64_t rad:1;
-               uint64_t tim:1;
-               uint64_t zip:1;
-               uint64_t pko:1;
-               uint64_t pip:1;
-               uint64_t ipd:1;
-               uint64_t l2c:1;
-               uint64_t pow:1;
-               uint64_t fpa:1;
-               uint64_t iob:1;
-               uint64_t mio:1;
-               uint64_t nand:1;
-               uint64_t mii1:1;
-               uint64_t reserved_4_17:14;
-               uint64_t wdog:4;
-#else
-               uint64_t wdog:4;
-               uint64_t reserved_4_17:14;
-               uint64_t mii1:1;
-               uint64_t nand:1;
-               uint64_t mio:1;
-               uint64_t iob:1;
-               uint64_t fpa:1;
-               uint64_t pow:1;
-               uint64_t l2c:1;
-               uint64_t ipd:1;
-               uint64_t pip:1;
-               uint64_t pko:1;
-               uint64_t zip:1;
-               uint64_t tim:1;
-               uint64_t rad:1;
-               uint64_t key:1;
-               uint64_t dfa:1;
-               uint64_t usb:1;
-               uint64_t sli:1;
-               uint64_t dpi:1;
-               uint64_t agx0:1;
-               uint64_t agx1:1;
-               uint64_t reserved_38_39:2;
-               uint64_t dpi_dma:1;
-               uint64_t reserved_41_45:5;
-               uint64_t agl:1;
-               uint64_t ptp:1;
-               uint64_t pem0:1;
-               uint64_t pem1:1;
-               uint64_t reserved_50_51:2;
-               uint64_t lmc0:1;
-               uint64_t reserved_53_62:10;
-               uint64_t rst:1;
-#endif
-       } cn61xx;
-       struct cvmx_ciu_sum1_iox_int_cn66xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t rst:1;
-               uint64_t reserved_62_62:1;
-               uint64_t srio3:1;
-               uint64_t srio2:1;
-               uint64_t reserved_57_59:3;
-               uint64_t dfm:1;
-               uint64_t reserved_53_55:3;
-               uint64_t lmc0:1;
-               uint64_t reserved_51_51:1;
-               uint64_t srio0:1;
-               uint64_t pem1:1;
-               uint64_t pem0:1;
-               uint64_t ptp:1;
-               uint64_t agl:1;
-               uint64_t reserved_38_45:8;
-               uint64_t agx1:1;
-               uint64_t agx0:1;
-               uint64_t dpi:1;
-               uint64_t sli:1;
-               uint64_t usb:1;
-               uint64_t dfa:1;
-               uint64_t key:1;
-               uint64_t rad:1;
-               uint64_t tim:1;
-               uint64_t zip:1;
-               uint64_t pko:1;
-               uint64_t pip:1;
-               uint64_t ipd:1;
-               uint64_t l2c:1;
-               uint64_t pow:1;
-               uint64_t fpa:1;
-               uint64_t iob:1;
-               uint64_t mio:1;
-               uint64_t nand:1;
-               uint64_t mii1:1;
-               uint64_t reserved_10_17:8;
-               uint64_t wdog:10;
-#else
-               uint64_t wdog:10;
-               uint64_t reserved_10_17:8;
-               uint64_t mii1:1;
-               uint64_t nand:1;
-               uint64_t mio:1;
-               uint64_t iob:1;
-               uint64_t fpa:1;
-               uint64_t pow:1;
-               uint64_t l2c:1;
-               uint64_t ipd:1;
-               uint64_t pip:1;
-               uint64_t pko:1;
-               uint64_t zip:1;
-               uint64_t tim:1;
-               uint64_t rad:1;
-               uint64_t key:1;
-               uint64_t dfa:1;
-               uint64_t usb:1;
-               uint64_t sli:1;
-               uint64_t dpi:1;
-               uint64_t agx0:1;
-               uint64_t agx1:1;
-               uint64_t reserved_38_45:8;
-               uint64_t agl:1;
-               uint64_t ptp:1;
-               uint64_t pem0:1;
-               uint64_t pem1:1;
-               uint64_t srio0:1;
-               uint64_t reserved_51_51:1;
-               uint64_t lmc0:1;
-               uint64_t reserved_53_55:3;
-               uint64_t dfm:1;
-               uint64_t reserved_57_59:3;
-               uint64_t srio2:1;
-               uint64_t srio3:1;
-               uint64_t reserved_62_62:1;
-               uint64_t rst:1;
-#endif
-       } cn66xx;
-       struct cvmx_ciu_sum1_iox_int_cnf71xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t rst:1;
-               uint64_t reserved_53_62:10;
-               uint64_t lmc0:1;
-               uint64_t reserved_50_51:2;
-               uint64_t pem1:1;
-               uint64_t pem0:1;
-               uint64_t ptp:1;
-               uint64_t reserved_41_46:6;
-               uint64_t dpi_dma:1;
-               uint64_t reserved_37_39:3;
-               uint64_t agx0:1;
-               uint64_t dpi:1;
-               uint64_t sli:1;
-               uint64_t usb:1;
-               uint64_t reserved_32_32:1;
-               uint64_t key:1;
-               uint64_t rad:1;
-               uint64_t tim:1;
-               uint64_t reserved_28_28:1;
-               uint64_t pko:1;
-               uint64_t pip:1;
-               uint64_t ipd:1;
-               uint64_t l2c:1;
-               uint64_t pow:1;
-               uint64_t fpa:1;
-               uint64_t iob:1;
-               uint64_t mio:1;
-               uint64_t nand:1;
-               uint64_t reserved_4_18:15;
-               uint64_t wdog:4;
-#else
-               uint64_t wdog:4;
-               uint64_t reserved_4_18:15;
-               uint64_t nand:1;
-               uint64_t mio:1;
-               uint64_t iob:1;
-               uint64_t fpa:1;
-               uint64_t pow:1;
-               uint64_t l2c:1;
-               uint64_t ipd:1;
-               uint64_t pip:1;
-               uint64_t pko:1;
-               uint64_t reserved_28_28:1;
-               uint64_t tim:1;
-               uint64_t rad:1;
-               uint64_t key:1;
-               uint64_t reserved_32_32:1;
-               uint64_t usb:1;
-               uint64_t sli:1;
-               uint64_t dpi:1;
-               uint64_t agx0:1;
-               uint64_t reserved_37_39:3;
-               uint64_t dpi_dma:1;
-               uint64_t reserved_41_46:6;
-               uint64_t ptp:1;
-               uint64_t pem0:1;
-               uint64_t pem1:1;
-               uint64_t reserved_50_51:2;
-               uint64_t lmc0:1;
-               uint64_t reserved_53_62:10;
-               uint64_t rst:1;
-#endif
-       } cnf71xx;
-};
-
-union cvmx_ciu_sum1_ppx_ip2 {
-       uint64_t u64;
-       struct cvmx_ciu_sum1_ppx_ip2_s {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t rst:1;
-               uint64_t reserved_62_62:1;
-               uint64_t srio3:1;
-               uint64_t srio2:1;
-               uint64_t reserved_57_59:3;
-               uint64_t dfm:1;
-               uint64_t reserved_53_55:3;
-               uint64_t lmc0:1;
-               uint64_t reserved_51_51:1;
-               uint64_t srio0:1;
-               uint64_t pem1:1;
-               uint64_t pem0:1;
-               uint64_t ptp:1;
-               uint64_t agl:1;
-               uint64_t reserved_41_45:5;
-               uint64_t dpi_dma:1;
-               uint64_t reserved_38_39:2;
-               uint64_t agx1:1;
-               uint64_t agx0:1;
-               uint64_t dpi:1;
-               uint64_t sli:1;
-               uint64_t usb:1;
-               uint64_t dfa:1;
-               uint64_t key:1;
-               uint64_t rad:1;
-               uint64_t tim:1;
-               uint64_t zip:1;
-               uint64_t pko:1;
-               uint64_t pip:1;
-               uint64_t ipd:1;
-               uint64_t l2c:1;
-               uint64_t pow:1;
-               uint64_t fpa:1;
-               uint64_t iob:1;
-               uint64_t mio:1;
-               uint64_t nand:1;
-               uint64_t mii1:1;
-               uint64_t reserved_10_17:8;
-               uint64_t wdog:10;
-#else
-               uint64_t wdog:10;
-               uint64_t reserved_10_17:8;
-               uint64_t mii1:1;
-               uint64_t nand:1;
-               uint64_t mio:1;
-               uint64_t iob:1;
-               uint64_t fpa:1;
-               uint64_t pow:1;
-               uint64_t l2c:1;
-               uint64_t ipd:1;
-               uint64_t pip:1;
-               uint64_t pko:1;
-               uint64_t zip:1;
-               uint64_t tim:1;
-               uint64_t rad:1;
-               uint64_t key:1;
-               uint64_t dfa:1;
-               uint64_t usb:1;
-               uint64_t sli:1;
-               uint64_t dpi:1;
-               uint64_t agx0:1;
-               uint64_t agx1:1;
-               uint64_t reserved_38_39:2;
-               uint64_t dpi_dma:1;
-               uint64_t reserved_41_45:5;
-               uint64_t agl:1;
-               uint64_t ptp:1;
-               uint64_t pem0:1;
-               uint64_t pem1:1;
-               uint64_t srio0:1;
-               uint64_t reserved_51_51:1;
-               uint64_t lmc0:1;
-               uint64_t reserved_53_55:3;
-               uint64_t dfm:1;
-               uint64_t reserved_57_59:3;
-               uint64_t srio2:1;
-               uint64_t srio3:1;
-               uint64_t reserved_62_62:1;
-               uint64_t rst:1;
-#endif
-       } s;
-       struct cvmx_ciu_sum1_ppx_ip2_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t rst:1;
-               uint64_t reserved_53_62:10;
-               uint64_t lmc0:1;
-               uint64_t reserved_50_51:2;
-               uint64_t pem1:1;
-               uint64_t pem0:1;
-               uint64_t ptp:1;
-               uint64_t agl:1;
-               uint64_t reserved_41_45:5;
-               uint64_t dpi_dma:1;
-               uint64_t reserved_38_39:2;
-               uint64_t agx1:1;
-               uint64_t agx0:1;
-               uint64_t dpi:1;
-               uint64_t sli:1;
-               uint64_t usb:1;
-               uint64_t dfa:1;
-               uint64_t key:1;
-               uint64_t rad:1;
-               uint64_t tim:1;
-               uint64_t zip:1;
-               uint64_t pko:1;
-               uint64_t pip:1;
-               uint64_t ipd:1;
-               uint64_t l2c:1;
-               uint64_t pow:1;
-               uint64_t fpa:1;
-               uint64_t iob:1;
-               uint64_t mio:1;
-               uint64_t nand:1;
-               uint64_t mii1:1;
-               uint64_t reserved_4_17:14;
-               uint64_t wdog:4;
-#else
-               uint64_t wdog:4;
-               uint64_t reserved_4_17:14;
-               uint64_t mii1:1;
-               uint64_t nand:1;
-               uint64_t mio:1;
-               uint64_t iob:1;
-               uint64_t fpa:1;
-               uint64_t pow:1;
-               uint64_t l2c:1;
-               uint64_t ipd:1;
-               uint64_t pip:1;
-               uint64_t pko:1;
-               uint64_t zip:1;
-               uint64_t tim:1;
-               uint64_t rad:1;
-               uint64_t key:1;
-               uint64_t dfa:1;
-               uint64_t usb:1;
-               uint64_t sli:1;
-               uint64_t dpi:1;
-               uint64_t agx0:1;
-               uint64_t agx1:1;
-               uint64_t reserved_38_39:2;
-               uint64_t dpi_dma:1;
-               uint64_t reserved_41_45:5;
-               uint64_t agl:1;
-               uint64_t ptp:1;
-               uint64_t pem0:1;
-               uint64_t pem1:1;
-               uint64_t reserved_50_51:2;
-               uint64_t lmc0:1;
-               uint64_t reserved_53_62:10;
-               uint64_t rst:1;
-#endif
-       } cn61xx;
-       struct cvmx_ciu_sum1_ppx_ip2_cn66xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t rst:1;
-               uint64_t reserved_62_62:1;
-               uint64_t srio3:1;
-               uint64_t srio2:1;
-               uint64_t reserved_57_59:3;
-               uint64_t dfm:1;
-               uint64_t reserved_53_55:3;
-               uint64_t lmc0:1;
-               uint64_t reserved_51_51:1;
-               uint64_t srio0:1;
-               uint64_t pem1:1;
-               uint64_t pem0:1;
-               uint64_t ptp:1;
-               uint64_t agl:1;
-               uint64_t reserved_38_45:8;
-               uint64_t agx1:1;
-               uint64_t agx0:1;
-               uint64_t dpi:1;
-               uint64_t sli:1;
-               uint64_t usb:1;
-               uint64_t dfa:1;
-               uint64_t key:1;
-               uint64_t rad:1;
-               uint64_t tim:1;
-               uint64_t zip:1;
-               uint64_t pko:1;
-               uint64_t pip:1;
-               uint64_t ipd:1;
-               uint64_t l2c:1;
-               uint64_t pow:1;
-               uint64_t fpa:1;
-               uint64_t iob:1;
-               uint64_t mio:1;
-               uint64_t nand:1;
-               uint64_t mii1:1;
-               uint64_t reserved_10_17:8;
-               uint64_t wdog:10;
-#else
-               uint64_t wdog:10;
-               uint64_t reserved_10_17:8;
-               uint64_t mii1:1;
-               uint64_t nand:1;
-               uint64_t mio:1;
-               uint64_t iob:1;
-               uint64_t fpa:1;
-               uint64_t pow:1;
-               uint64_t l2c:1;
-               uint64_t ipd:1;
-               uint64_t pip:1;
-               uint64_t pko:1;
-               uint64_t zip:1;
-               uint64_t tim:1;
-               uint64_t rad:1;
-               uint64_t key:1;
-               uint64_t dfa:1;
-               uint64_t usb:1;
-               uint64_t sli:1;
-               uint64_t dpi:1;
-               uint64_t agx0:1;
-               uint64_t agx1:1;
-               uint64_t reserved_38_45:8;
-               uint64_t agl:1;
-               uint64_t ptp:1;
-               uint64_t pem0:1;
-               uint64_t pem1:1;
-               uint64_t srio0:1;
-               uint64_t reserved_51_51:1;
-               uint64_t lmc0:1;
-               uint64_t reserved_53_55:3;
-               uint64_t dfm:1;
-               uint64_t reserved_57_59:3;
-               uint64_t srio2:1;
-               uint64_t srio3:1;
-               uint64_t reserved_62_62:1;
-               uint64_t rst:1;
-#endif
-       } cn66xx;
-       struct cvmx_ciu_sum1_ppx_ip2_cnf71xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t rst:1;
-               uint64_t reserved_53_62:10;
-               uint64_t lmc0:1;
-               uint64_t reserved_50_51:2;
-               uint64_t pem1:1;
-               uint64_t pem0:1;
-               uint64_t ptp:1;
-               uint64_t reserved_41_46:6;
-               uint64_t dpi_dma:1;
-               uint64_t reserved_37_39:3;
-               uint64_t agx0:1;
-               uint64_t dpi:1;
-               uint64_t sli:1;
-               uint64_t usb:1;
-               uint64_t reserved_32_32:1;
-               uint64_t key:1;
-               uint64_t rad:1;
-               uint64_t tim:1;
-               uint64_t reserved_28_28:1;
-               uint64_t pko:1;
-               uint64_t pip:1;
-               uint64_t ipd:1;
-               uint64_t l2c:1;
-               uint64_t pow:1;
-               uint64_t fpa:1;
-               uint64_t iob:1;
-               uint64_t mio:1;
-               uint64_t nand:1;
-               uint64_t reserved_4_18:15;
-               uint64_t wdog:4;
-#else
-               uint64_t wdog:4;
-               uint64_t reserved_4_18:15;
-               uint64_t nand:1;
-               uint64_t mio:1;
-               uint64_t iob:1;
-               uint64_t fpa:1;
-               uint64_t pow:1;
-               uint64_t l2c:1;
-               uint64_t ipd:1;
-               uint64_t pip:1;
-               uint64_t pko:1;
-               uint64_t reserved_28_28:1;
-               uint64_t tim:1;
-               uint64_t rad:1;
-               uint64_t key:1;
-               uint64_t reserved_32_32:1;
-               uint64_t usb:1;
-               uint64_t sli:1;
-               uint64_t dpi:1;
-               uint64_t agx0:1;
-               uint64_t reserved_37_39:3;
-               uint64_t dpi_dma:1;
-               uint64_t reserved_41_46:6;
-               uint64_t ptp:1;
-               uint64_t pem0:1;
-               uint64_t pem1:1;
-               uint64_t reserved_50_51:2;
-               uint64_t lmc0:1;
-               uint64_t reserved_53_62:10;
-               uint64_t rst:1;
-#endif
-       } cnf71xx;
-};
-
-union cvmx_ciu_sum1_ppx_ip3 {
-       uint64_t u64;
-       struct cvmx_ciu_sum1_ppx_ip3_s {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t rst:1;
-               uint64_t reserved_62_62:1;
-               uint64_t srio3:1;
-               uint64_t srio2:1;
-               uint64_t reserved_57_59:3;
-               uint64_t dfm:1;
-               uint64_t reserved_53_55:3;
-               uint64_t lmc0:1;
-               uint64_t reserved_51_51:1;
-               uint64_t srio0:1;
-               uint64_t pem1:1;
-               uint64_t pem0:1;
-               uint64_t ptp:1;
-               uint64_t agl:1;
-               uint64_t reserved_41_45:5;
-               uint64_t dpi_dma:1;
-               uint64_t reserved_38_39:2;
-               uint64_t agx1:1;
-               uint64_t agx0:1;
-               uint64_t dpi:1;
-               uint64_t sli:1;
-               uint64_t usb:1;
-               uint64_t dfa:1;
-               uint64_t key:1;
-               uint64_t rad:1;
-               uint64_t tim:1;
-               uint64_t zip:1;
-               uint64_t pko:1;
-               uint64_t pip:1;
-               uint64_t ipd:1;
-               uint64_t l2c:1;
-               uint64_t pow:1;
-               uint64_t fpa:1;
-               uint64_t iob:1;
-               uint64_t mio:1;
-               uint64_t nand:1;
-               uint64_t mii1:1;
-               uint64_t reserved_10_17:8;
-               uint64_t wdog:10;
-#else
-               uint64_t wdog:10;
-               uint64_t reserved_10_17:8;
-               uint64_t mii1:1;
-               uint64_t nand:1;
-               uint64_t mio:1;
-               uint64_t iob:1;
-               uint64_t fpa:1;
-               uint64_t pow:1;
-               uint64_t l2c:1;
-               uint64_t ipd:1;
-               uint64_t pip:1;
-               uint64_t pko:1;
-               uint64_t zip:1;
-               uint64_t tim:1;
-               uint64_t rad:1;
-               uint64_t key:1;
-               uint64_t dfa:1;
-               uint64_t usb:1;
-               uint64_t sli:1;
-               uint64_t dpi:1;
-               uint64_t agx0:1;
-               uint64_t agx1:1;
-               uint64_t reserved_38_39:2;
-               uint64_t dpi_dma:1;
-               uint64_t reserved_41_45:5;
-               uint64_t agl:1;
-               uint64_t ptp:1;
-               uint64_t pem0:1;
-               uint64_t pem1:1;
-               uint64_t srio0:1;
-               uint64_t reserved_51_51:1;
-               uint64_t lmc0:1;
-               uint64_t reserved_53_55:3;
-               uint64_t dfm:1;
-               uint64_t reserved_57_59:3;
-               uint64_t srio2:1;
-               uint64_t srio3:1;
-               uint64_t reserved_62_62:1;
-               uint64_t rst:1;
-#endif
-       } s;
-       struct cvmx_ciu_sum1_ppx_ip3_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t rst:1;
-               uint64_t reserved_53_62:10;
-               uint64_t lmc0:1;
-               uint64_t reserved_50_51:2;
-               uint64_t pem1:1;
-               uint64_t pem0:1;
-               uint64_t ptp:1;
-               uint64_t agl:1;
-               uint64_t reserved_41_45:5;
-               uint64_t dpi_dma:1;
-               uint64_t reserved_38_39:2;
-               uint64_t agx1:1;
-               uint64_t agx0:1;
-               uint64_t dpi:1;
-               uint64_t sli:1;
-               uint64_t usb:1;
-               uint64_t dfa:1;
-               uint64_t key:1;
-               uint64_t rad:1;
-               uint64_t tim:1;
-               uint64_t zip:1;
-               uint64_t pko:1;
-               uint64_t pip:1;
-               uint64_t ipd:1;
-               uint64_t l2c:1;
-               uint64_t pow:1;
-               uint64_t fpa:1;
-               uint64_t iob:1;
-               uint64_t mio:1;
-               uint64_t nand:1;
-               uint64_t mii1:1;
-               uint64_t reserved_4_17:14;
-               uint64_t wdog:4;
-#else
-               uint64_t wdog:4;
-               uint64_t reserved_4_17:14;
-               uint64_t mii1:1;
-               uint64_t nand:1;
-               uint64_t mio:1;
-               uint64_t iob:1;
-               uint64_t fpa:1;
-               uint64_t pow:1;
-               uint64_t l2c:1;
-               uint64_t ipd:1;
-               uint64_t pip:1;
-               uint64_t pko:1;
-               uint64_t zip:1;
-               uint64_t tim:1;
-               uint64_t rad:1;
-               uint64_t key:1;
-               uint64_t dfa:1;
-               uint64_t usb:1;
-               uint64_t sli:1;
-               uint64_t dpi:1;
-               uint64_t agx0:1;
-               uint64_t agx1:1;
-               uint64_t reserved_38_39:2;
-               uint64_t dpi_dma:1;
-               uint64_t reserved_41_45:5;
-               uint64_t agl:1;
-               uint64_t ptp:1;
-               uint64_t pem0:1;
-               uint64_t pem1:1;
-               uint64_t reserved_50_51:2;
-               uint64_t lmc0:1;
-               uint64_t reserved_53_62:10;
-               uint64_t rst:1;
-#endif
-       } cn61xx;
-       struct cvmx_ciu_sum1_ppx_ip3_cn66xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t rst:1;
-               uint64_t reserved_62_62:1;
-               uint64_t srio3:1;
-               uint64_t srio2:1;
-               uint64_t reserved_57_59:3;
-               uint64_t dfm:1;
-               uint64_t reserved_53_55:3;
-               uint64_t lmc0:1;
-               uint64_t reserved_51_51:1;
-               uint64_t srio0:1;
-               uint64_t pem1:1;
-               uint64_t pem0:1;
-               uint64_t ptp:1;
-               uint64_t agl:1;
-               uint64_t reserved_38_45:8;
-               uint64_t agx1:1;
-               uint64_t agx0:1;
-               uint64_t dpi:1;
-               uint64_t sli:1;
-               uint64_t usb:1;
-               uint64_t dfa:1;
-               uint64_t key:1;
-               uint64_t rad:1;
-               uint64_t tim:1;
-               uint64_t zip:1;
-               uint64_t pko:1;
-               uint64_t pip:1;
-               uint64_t ipd:1;
-               uint64_t l2c:1;
-               uint64_t pow:1;
-               uint64_t fpa:1;
-               uint64_t iob:1;
-               uint64_t mio:1;
-               uint64_t nand:1;
-               uint64_t mii1:1;
-               uint64_t reserved_10_17:8;
-               uint64_t wdog:10;
-#else
-               uint64_t wdog:10;
-               uint64_t reserved_10_17:8;
-               uint64_t mii1:1;
-               uint64_t nand:1;
-               uint64_t mio:1;
-               uint64_t iob:1;
-               uint64_t fpa:1;
-               uint64_t pow:1;
-               uint64_t l2c:1;
-               uint64_t ipd:1;
-               uint64_t pip:1;
-               uint64_t pko:1;
-               uint64_t zip:1;
-               uint64_t tim:1;
-               uint64_t rad:1;
-               uint64_t key:1;
-               uint64_t dfa:1;
-               uint64_t usb:1;
-               uint64_t sli:1;
-               uint64_t dpi:1;
-               uint64_t agx0:1;
-               uint64_t agx1:1;
-               uint64_t reserved_38_45:8;
-               uint64_t agl:1;
-               uint64_t ptp:1;
-               uint64_t pem0:1;
-               uint64_t pem1:1;
-               uint64_t srio0:1;
-               uint64_t reserved_51_51:1;
-               uint64_t lmc0:1;
-               uint64_t reserved_53_55:3;
-               uint64_t dfm:1;
-               uint64_t reserved_57_59:3;
-               uint64_t srio2:1;
-               uint64_t srio3:1;
-               uint64_t reserved_62_62:1;
-               uint64_t rst:1;
-#endif
-       } cn66xx;
-       struct cvmx_ciu_sum1_ppx_ip3_cnf71xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t rst:1;
-               uint64_t reserved_53_62:10;
-               uint64_t lmc0:1;
-               uint64_t reserved_50_51:2;
-               uint64_t pem1:1;
-               uint64_t pem0:1;
-               uint64_t ptp:1;
-               uint64_t reserved_41_46:6;
-               uint64_t dpi_dma:1;
-               uint64_t reserved_37_39:3;
-               uint64_t agx0:1;
-               uint64_t dpi:1;
-               uint64_t sli:1;
-               uint64_t usb:1;
-               uint64_t reserved_32_32:1;
-               uint64_t key:1;
-               uint64_t rad:1;
-               uint64_t tim:1;
-               uint64_t reserved_28_28:1;
-               uint64_t pko:1;
-               uint64_t pip:1;
-               uint64_t ipd:1;
-               uint64_t l2c:1;
-               uint64_t pow:1;
-               uint64_t fpa:1;
-               uint64_t iob:1;
-               uint64_t mio:1;
-               uint64_t nand:1;
-               uint64_t reserved_4_18:15;
-               uint64_t wdog:4;
-#else
-               uint64_t wdog:4;
-               uint64_t reserved_4_18:15;
-               uint64_t nand:1;
-               uint64_t mio:1;
-               uint64_t iob:1;
-               uint64_t fpa:1;
-               uint64_t pow:1;
-               uint64_t l2c:1;
-               uint64_t ipd:1;
-               uint64_t pip:1;
-               uint64_t pko:1;
-               uint64_t reserved_28_28:1;
-               uint64_t tim:1;
-               uint64_t rad:1;
-               uint64_t key:1;
-               uint64_t reserved_32_32:1;
-               uint64_t usb:1;
-               uint64_t sli:1;
-               uint64_t dpi:1;
-               uint64_t agx0:1;
-               uint64_t reserved_37_39:3;
-               uint64_t dpi_dma:1;
-               uint64_t reserved_41_46:6;
-               uint64_t ptp:1;
-               uint64_t pem0:1;
-               uint64_t pem1:1;
-               uint64_t reserved_50_51:2;
-               uint64_t lmc0:1;
-               uint64_t reserved_53_62:10;
-               uint64_t rst:1;
-#endif
-       } cnf71xx;
-};
-
-union cvmx_ciu_sum1_ppx_ip4 {
-       uint64_t u64;
-       struct cvmx_ciu_sum1_ppx_ip4_s {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t rst:1;
-               uint64_t reserved_62_62:1;
-               uint64_t srio3:1;
-               uint64_t srio2:1;
-               uint64_t reserved_57_59:3;
-               uint64_t dfm:1;
-               uint64_t reserved_53_55:3;
-               uint64_t lmc0:1;
-               uint64_t reserved_51_51:1;
-               uint64_t srio0:1;
-               uint64_t pem1:1;
-               uint64_t pem0:1;
-               uint64_t ptp:1;
-               uint64_t agl:1;
-               uint64_t reserved_41_45:5;
-               uint64_t dpi_dma:1;
-               uint64_t reserved_38_39:2;
-               uint64_t agx1:1;
-               uint64_t agx0:1;
-               uint64_t dpi:1;
-               uint64_t sli:1;
-               uint64_t usb:1;
-               uint64_t dfa:1;
-               uint64_t key:1;
-               uint64_t rad:1;
-               uint64_t tim:1;
-               uint64_t zip:1;
-               uint64_t pko:1;
-               uint64_t pip:1;
-               uint64_t ipd:1;
-               uint64_t l2c:1;
-               uint64_t pow:1;
-               uint64_t fpa:1;
-               uint64_t iob:1;
-               uint64_t mio:1;
-               uint64_t nand:1;
-               uint64_t mii1:1;
-               uint64_t reserved_10_17:8;
-               uint64_t wdog:10;
-#else
-               uint64_t wdog:10;
-               uint64_t reserved_10_17:8;
-               uint64_t mii1:1;
-               uint64_t nand:1;
-               uint64_t mio:1;
-               uint64_t iob:1;
-               uint64_t fpa:1;
-               uint64_t pow:1;
-               uint64_t l2c:1;
-               uint64_t ipd:1;
-               uint64_t pip:1;
-               uint64_t pko:1;
-               uint64_t zip:1;
-               uint64_t tim:1;
-               uint64_t rad:1;
-               uint64_t key:1;
-               uint64_t dfa:1;
-               uint64_t usb:1;
-               uint64_t sli:1;
-               uint64_t dpi:1;
-               uint64_t agx0:1;
-               uint64_t agx1:1;
-               uint64_t reserved_38_39:2;
-               uint64_t dpi_dma:1;
-               uint64_t reserved_41_45:5;
-               uint64_t agl:1;
-               uint64_t ptp:1;
-               uint64_t pem0:1;
-               uint64_t pem1:1;
-               uint64_t srio0:1;
-               uint64_t reserved_51_51:1;
-               uint64_t lmc0:1;
-               uint64_t reserved_53_55:3;
-               uint64_t dfm:1;
-               uint64_t reserved_57_59:3;
-               uint64_t srio2:1;
-               uint64_t srio3:1;
-               uint64_t reserved_62_62:1;
-               uint64_t rst:1;
-#endif
-       } s;
-       struct cvmx_ciu_sum1_ppx_ip4_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t rst:1;
-               uint64_t reserved_53_62:10;
-               uint64_t lmc0:1;
-               uint64_t reserved_50_51:2;
-               uint64_t pem1:1;
-               uint64_t pem0:1;
-               uint64_t ptp:1;
-               uint64_t agl:1;
-               uint64_t reserved_41_45:5;
-               uint64_t dpi_dma:1;
-               uint64_t reserved_38_39:2;
-               uint64_t agx1:1;
-               uint64_t agx0:1;
-               uint64_t dpi:1;
-               uint64_t sli:1;
-               uint64_t usb:1;
-               uint64_t dfa:1;
-               uint64_t key:1;
-               uint64_t rad:1;
-               uint64_t tim:1;
-               uint64_t zip:1;
-               uint64_t pko:1;
-               uint64_t pip:1;
-               uint64_t ipd:1;
-               uint64_t l2c:1;
-               uint64_t pow:1;
-               uint64_t fpa:1;
-               uint64_t iob:1;
-               uint64_t mio:1;
-               uint64_t nand:1;
-               uint64_t mii1:1;
-               uint64_t reserved_4_17:14;
-               uint64_t wdog:4;
-#else
-               uint64_t wdog:4;
-               uint64_t reserved_4_17:14;
-               uint64_t mii1:1;
-               uint64_t nand:1;
-               uint64_t mio:1;
-               uint64_t iob:1;
-               uint64_t fpa:1;
-               uint64_t pow:1;
-               uint64_t l2c:1;
-               uint64_t ipd:1;
-               uint64_t pip:1;
-               uint64_t pko:1;
-               uint64_t zip:1;
-               uint64_t tim:1;
-               uint64_t rad:1;
-               uint64_t key:1;
-               uint64_t dfa:1;
-               uint64_t usb:1;
-               uint64_t sli:1;
-               uint64_t dpi:1;
-               uint64_t agx0:1;
-               uint64_t agx1:1;
-               uint64_t reserved_38_39:2;
-               uint64_t dpi_dma:1;
-               uint64_t reserved_41_45:5;
-               uint64_t agl:1;
-               uint64_t ptp:1;
-               uint64_t pem0:1;
-               uint64_t pem1:1;
-               uint64_t reserved_50_51:2;
-               uint64_t lmc0:1;
-               uint64_t reserved_53_62:10;
-               uint64_t rst:1;
-#endif
-       } cn61xx;
-       struct cvmx_ciu_sum1_ppx_ip4_cn66xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t rst:1;
-               uint64_t reserved_62_62:1;
-               uint64_t srio3:1;
-               uint64_t srio2:1;
-               uint64_t reserved_57_59:3;
-               uint64_t dfm:1;
-               uint64_t reserved_53_55:3;
-               uint64_t lmc0:1;
-               uint64_t reserved_51_51:1;
-               uint64_t srio0:1;
-               uint64_t pem1:1;
-               uint64_t pem0:1;
-               uint64_t ptp:1;
-               uint64_t agl:1;
-               uint64_t reserved_38_45:8;
-               uint64_t agx1:1;
-               uint64_t agx0:1;
-               uint64_t dpi:1;
-               uint64_t sli:1;
-               uint64_t usb:1;
-               uint64_t dfa:1;
-               uint64_t key:1;
-               uint64_t rad:1;
-               uint64_t tim:1;
-               uint64_t zip:1;
-               uint64_t pko:1;
-               uint64_t pip:1;
-               uint64_t ipd:1;
-               uint64_t l2c:1;
-               uint64_t pow:1;
-               uint64_t fpa:1;
-               uint64_t iob:1;
-               uint64_t mio:1;
-               uint64_t nand:1;
-               uint64_t mii1:1;
-               uint64_t reserved_10_17:8;
-               uint64_t wdog:10;
-#else
-               uint64_t wdog:10;
-               uint64_t reserved_10_17:8;
-               uint64_t mii1:1;
-               uint64_t nand:1;
-               uint64_t mio:1;
-               uint64_t iob:1;
-               uint64_t fpa:1;
-               uint64_t pow:1;
-               uint64_t l2c:1;
-               uint64_t ipd:1;
-               uint64_t pip:1;
-               uint64_t pko:1;
-               uint64_t zip:1;
-               uint64_t tim:1;
-               uint64_t rad:1;
-               uint64_t key:1;
-               uint64_t dfa:1;
-               uint64_t usb:1;
-               uint64_t sli:1;
-               uint64_t dpi:1;
-               uint64_t agx0:1;
-               uint64_t agx1:1;
-               uint64_t reserved_38_45:8;
-               uint64_t agl:1;
-               uint64_t ptp:1;
-               uint64_t pem0:1;
-               uint64_t pem1:1;
-               uint64_t srio0:1;
-               uint64_t reserved_51_51:1;
-               uint64_t lmc0:1;
-               uint64_t reserved_53_55:3;
-               uint64_t dfm:1;
-               uint64_t reserved_57_59:3;
-               uint64_t srio2:1;
-               uint64_t srio3:1;
-               uint64_t reserved_62_62:1;
-               uint64_t rst:1;
-#endif
-       } cn66xx;
-       struct cvmx_ciu_sum1_ppx_ip4_cnf71xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t rst:1;
-               uint64_t reserved_53_62:10;
-               uint64_t lmc0:1;
-               uint64_t reserved_50_51:2;
-               uint64_t pem1:1;
-               uint64_t pem0:1;
-               uint64_t ptp:1;
-               uint64_t reserved_41_46:6;
-               uint64_t dpi_dma:1;
-               uint64_t reserved_37_39:3;
-               uint64_t agx0:1;
-               uint64_t dpi:1;
-               uint64_t sli:1;
-               uint64_t usb:1;
-               uint64_t reserved_32_32:1;
-               uint64_t key:1;
-               uint64_t rad:1;
-               uint64_t tim:1;
-               uint64_t reserved_28_28:1;
-               uint64_t pko:1;
-               uint64_t pip:1;
-               uint64_t ipd:1;
-               uint64_t l2c:1;
-               uint64_t pow:1;
-               uint64_t fpa:1;
-               uint64_t iob:1;
-               uint64_t mio:1;
-               uint64_t nand:1;
-               uint64_t reserved_4_18:15;
-               uint64_t wdog:4;
-#else
-               uint64_t wdog:4;
-               uint64_t reserved_4_18:15;
-               uint64_t nand:1;
-               uint64_t mio:1;
-               uint64_t iob:1;
-               uint64_t fpa:1;
-               uint64_t pow:1;
-               uint64_t l2c:1;
-               uint64_t ipd:1;
-               uint64_t pip:1;
-               uint64_t pko:1;
-               uint64_t reserved_28_28:1;
-               uint64_t tim:1;
-               uint64_t rad:1;
-               uint64_t key:1;
-               uint64_t reserved_32_32:1;
-               uint64_t usb:1;
-               uint64_t sli:1;
-               uint64_t dpi:1;
-               uint64_t agx0:1;
-               uint64_t reserved_37_39:3;
-               uint64_t dpi_dma:1;
-               uint64_t reserved_41_46:6;
-               uint64_t ptp:1;
-               uint64_t pem0:1;
-               uint64_t pem1:1;
-               uint64_t reserved_50_51:2;
-               uint64_t lmc0:1;
-               uint64_t reserved_53_62:10;
-               uint64_t rst:1;
-#endif
-       } cnf71xx;
-};
-
-union cvmx_ciu_sum2_iox_int {
-       uint64_t u64;
-       struct cvmx_ciu_sum2_iox_int_s {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_15_63:49;
-               uint64_t endor:2;
-               uint64_t eoi:1;
-               uint64_t reserved_10_11:2;
-               uint64_t timer:6;
-               uint64_t reserved_0_3:4;
-#else
-               uint64_t reserved_0_3:4;
-               uint64_t timer:6;
-               uint64_t reserved_10_11:2;
-               uint64_t eoi:1;
-               uint64_t endor:2;
-               uint64_t reserved_15_63:49;
-#endif
-       } s;
-       struct cvmx_ciu_sum2_iox_int_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_10_63:54;
-               uint64_t timer:6;
-               uint64_t reserved_0_3:4;
-#else
-               uint64_t reserved_0_3:4;
-               uint64_t timer:6;
-               uint64_t reserved_10_63:54;
-#endif
-       } cn61xx;
-       struct cvmx_ciu_sum2_iox_int_cn61xx cn66xx;
-       struct cvmx_ciu_sum2_iox_int_s cnf71xx;
-};
-
-union cvmx_ciu_sum2_ppx_ip2 {
-       uint64_t u64;
-       struct cvmx_ciu_sum2_ppx_ip2_s {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_15_63:49;
-               uint64_t endor:2;
-               uint64_t eoi:1;
-               uint64_t reserved_10_11:2;
-               uint64_t timer:6;
-               uint64_t reserved_0_3:4;
-#else
-               uint64_t reserved_0_3:4;
-               uint64_t timer:6;
-               uint64_t reserved_10_11:2;
-               uint64_t eoi:1;
-               uint64_t endor:2;
-               uint64_t reserved_15_63:49;
-#endif
-       } s;
-       struct cvmx_ciu_sum2_ppx_ip2_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_10_63:54;
-               uint64_t timer:6;
-               uint64_t reserved_0_3:4;
-#else
-               uint64_t reserved_0_3:4;
-               uint64_t timer:6;
-               uint64_t reserved_10_63:54;
-#endif
-       } cn61xx;
-       struct cvmx_ciu_sum2_ppx_ip2_cn61xx cn66xx;
-       struct cvmx_ciu_sum2_ppx_ip2_s cnf71xx;
-};
-
-union cvmx_ciu_sum2_ppx_ip3 {
-       uint64_t u64;
-       struct cvmx_ciu_sum2_ppx_ip3_s {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_15_63:49;
-               uint64_t endor:2;
-               uint64_t eoi:1;
-               uint64_t reserved_10_11:2;
-               uint64_t timer:6;
-               uint64_t reserved_0_3:4;
-#else
-               uint64_t reserved_0_3:4;
-               uint64_t timer:6;
-               uint64_t reserved_10_11:2;
-               uint64_t eoi:1;
-               uint64_t endor:2;
-               uint64_t reserved_15_63:49;
-#endif
-       } s;
-       struct cvmx_ciu_sum2_ppx_ip3_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_10_63:54;
-               uint64_t timer:6;
-               uint64_t reserved_0_3:4;
-#else
-               uint64_t reserved_0_3:4;
-               uint64_t timer:6;
-               uint64_t reserved_10_63:54;
-#endif
-       } cn61xx;
-       struct cvmx_ciu_sum2_ppx_ip3_cn61xx cn66xx;
-       struct cvmx_ciu_sum2_ppx_ip3_s cnf71xx;
-};
-
-union cvmx_ciu_sum2_ppx_ip4 {
-       uint64_t u64;
-       struct cvmx_ciu_sum2_ppx_ip4_s {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_15_63:49;
-               uint64_t endor:2;
-               uint64_t eoi:1;
-               uint64_t reserved_10_11:2;
-               uint64_t timer:6;
-               uint64_t reserved_0_3:4;
-#else
-               uint64_t reserved_0_3:4;
-               uint64_t timer:6;
-               uint64_t reserved_10_11:2;
-               uint64_t eoi:1;
-               uint64_t endor:2;
-               uint64_t reserved_15_63:49;
-#endif
-       } s;
-       struct cvmx_ciu_sum2_ppx_ip4_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_10_63:54;
-               uint64_t timer:6;
-               uint64_t reserved_0_3:4;
-#else
-               uint64_t reserved_0_3:4;
-               uint64_t timer:6;
-               uint64_t reserved_10_63:54;
-#endif
-       } cn61xx;
-       struct cvmx_ciu_sum2_ppx_ip4_cn61xx cn66xx;
-       struct cvmx_ciu_sum2_ppx_ip4_s cnf71xx;
 };
 
 union cvmx_ciu_timx {
        uint64_t u64;
        struct cvmx_ciu_timx_s {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_37_63:27;
-               uint64_t one_shot:1;
-               uint64_t len:36;
-#else
-               uint64_t len:36;
-               uint64_t one_shot:1;
-               uint64_t reserved_37_63:27;
-#endif
-       } s;
-       struct cvmx_ciu_timx_s cn30xx;
-       struct cvmx_ciu_timx_s cn31xx;
-       struct cvmx_ciu_timx_s cn38xx;
-       struct cvmx_ciu_timx_s cn38xxp2;
-       struct cvmx_ciu_timx_s cn50xx;
-       struct cvmx_ciu_timx_s cn52xx;
-       struct cvmx_ciu_timx_s cn52xxp1;
-       struct cvmx_ciu_timx_s cn56xx;
-       struct cvmx_ciu_timx_s cn56xxp1;
-       struct cvmx_ciu_timx_s cn58xx;
-       struct cvmx_ciu_timx_s cn58xxp1;
-       struct cvmx_ciu_timx_s cn61xx;
-       struct cvmx_ciu_timx_s cn63xx;
-       struct cvmx_ciu_timx_s cn63xxp1;
-       struct cvmx_ciu_timx_s cn66xx;
-       struct cvmx_ciu_timx_s cn68xx;
-       struct cvmx_ciu_timx_s cn68xxp1;
-       struct cvmx_ciu_timx_s cnf71xx;
-};
-
-union cvmx_ciu_tim_multi_cast {
-       uint64_t u64;
-       struct cvmx_ciu_tim_multi_cast_s {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_1_63:63;
-               uint64_t en:1;
-#else
-               uint64_t en:1;
-               uint64_t reserved_1_63:63;
-#endif
+               __BITFIELD_FIELD(uint64_t reserved_37_63:27,
+               __BITFIELD_FIELD(uint64_t one_shot:1,
+               __BITFIELD_FIELD(uint64_t len:36,
+               ;)))
        } s;
-       struct cvmx_ciu_tim_multi_cast_s cn61xx;
-       struct cvmx_ciu_tim_multi_cast_s cn66xx;
-       struct cvmx_ciu_tim_multi_cast_s cnf71xx;
 };
 
 union cvmx_ciu_wdogx {
        uint64_t u64;
        struct cvmx_ciu_wdogx_s {
-#ifdef __BIG_ENDIAN_BITFIELD
-               uint64_t reserved_46_63:18;
-               uint64_t gstopen:1;
-               uint64_t dstop:1;
-               uint64_t cnt:24;
-               uint64_t len:16;
-               uint64_t state:2;
-               uint64_t mode:2;
-#else
-               uint64_t mode:2;
-               uint64_t state:2;
-               uint64_t len:16;
-               uint64_t cnt:24;
-               uint64_t dstop:1;
-               uint64_t gstopen:1;
-               uint64_t reserved_46_63:18;
-#endif
+               __BITFIELD_FIELD(uint64_t reserved_46_63:18,
+               __BITFIELD_FIELD(uint64_t gstopen:1,
+               __BITFIELD_FIELD(uint64_t dstop:1,
+               __BITFIELD_FIELD(uint64_t cnt:24,
+               __BITFIELD_FIELD(uint64_t len:16,
+               __BITFIELD_FIELD(uint64_t state:2,
+               __BITFIELD_FIELD(uint64_t mode:2,
+               ;)))))))
        } s;
-       struct cvmx_ciu_wdogx_s cn30xx;
-       struct cvmx_ciu_wdogx_s cn31xx;
-       struct cvmx_ciu_wdogx_s cn38xx;
-       struct cvmx_ciu_wdogx_s cn38xxp2;
-       struct cvmx_ciu_wdogx_s cn50xx;
-       struct cvmx_ciu_wdogx_s cn52xx;
-       struct cvmx_ciu_wdogx_s cn52xxp1;
-       struct cvmx_ciu_wdogx_s cn56xx;
-       struct cvmx_ciu_wdogx_s cn56xxp1;
-       struct cvmx_ciu_wdogx_s cn58xx;
-       struct cvmx_ciu_wdogx_s cn58xxp1;
-       struct cvmx_ciu_wdogx_s cn61xx;
-       struct cvmx_ciu_wdogx_s cn63xx;
-       struct cvmx_ciu_wdogx_s cn63xxp1;
-       struct cvmx_ciu_wdogx_s cn66xx;
-       struct cvmx_ciu_wdogx_s cn68xx;
-       struct cvmx_ciu_wdogx_s cn68xxp1;
-       struct cvmx_ciu_wdogx_s cnf71xx;
 };
 
-#endif
+#endif /* __CVMX_CIU_DEFS_H__ */
index e347496a33c38c69869a7dc47ef94565ca9dcb5c..80e4f8358b81f34b5ec76432f4fb1f8874d707e9 100644 (file)
@@ -4,7 +4,7 @@
  * Contact: support@caviumnetworks.com
  * This file is part of the OCTEON SDK
  *
- * Copyright (c) 2003-2012 Cavium Networks
+ * Copyright (C) 2003-2018 Cavium, Inc.
  *
  * This file is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License, Version 2, as
@@ -2070,6 +2070,8 @@ static inline uint64_t CVMX_GMXX_XAUI_EXT_LOOPBACK(unsigned long block_id)
        return CVMX_ADD_IO_SEG(0x0001180008000540ull) + (block_id) * 0x8000000ull;
 }
 
+void __cvmx_interrupt_gmxx_enable(int interface);
+
 union cvmx_gmxx_bad_reg {
        uint64_t u64;
        struct cvmx_gmxx_bad_reg_s {
index a5e8fd861c37f21609222bcaddf155acfcac1d65..39da7f9d7b3fbd40938c3e9bc74551f0af198b77 100644 (file)
@@ -4,7 +4,7 @@
  * Contact: support@caviumnetworks.com
  * This file is part of the OCTEON SDK
  *
- * Copyright (c) 2003-2012 Cavium Networks
+ * Copyright (C) 2003-2018 Cavium, Inc.
  *
  * This file is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License, Version 2, as
@@ -334,6 +334,8 @@ static inline uint64_t CVMX_PCSX_TX_RXX_POLARITY_REG(unsigned long offset, unsig
        return CVMX_ADD_IO_SEG(0x00011800B0001048ull) + ((offset) + (block_id) * 0x20000ull) * 1024;
 }
 
+void __cvmx_interrupt_pcsx_intx_en_reg_enable(int index, int block);
+
 union cvmx_pcsx_anx_adv_reg {
        uint64_t u64;
        struct cvmx_pcsx_anx_adv_reg_s {
index b5b45d26f1c57d0e67b135ad6bb69e3c3cd696f2..847dd9dca6ea52ad283939821913cc88cb49d4f8 100644 (file)
@@ -4,7 +4,7 @@
  * Contact: support@caviumnetworks.com
  * This file is part of the OCTEON SDK
  *
- * Copyright (c) 2003-2012 Cavium Networks
+ * Copyright (C) 2003-2018 Cavium, Inc.
  *
  * This file is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License, Version 2, as
@@ -268,6 +268,8 @@ static inline uint64_t CVMX_PCSXX_TX_RX_STATES_REG(unsigned long block_id)
        return CVMX_ADD_IO_SEG(0x00011800B0000830ull) + (block_id) * 0x1000000ull;
 }
 
+void __cvmx_interrupt_pcsxx_int_en_reg_enable(int index);
+
 union cvmx_pcsxx_10gbx_status_reg {
        uint64_t u64;
        struct cvmx_pcsxx_10gbx_status_reg_s {
index c7d601d9446e03379a290e92fe34389171afe186..f4c4e8051160db6107b2f9733b1b890c6e449945 100644 (file)
@@ -4,7 +4,7 @@
  * Contact: support@caviumnetworks.com
  * This file is part of the OCTEON SDK
  *
- * Copyright (c) 2003-2012 Cavium Networks
+ * Copyright (C) 2003-2018 Cavium, Inc.
  *
  * This file is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License, Version 2, as
@@ -45,6 +45,8 @@
 #define CVMX_SPXX_TPA_SEL(block_id) (CVMX_ADD_IO_SEG(0x0001180090000328ull) + ((block_id) & 1) * 0x8000000ull)
 #define CVMX_SPXX_TRN4_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180090000360ull) + ((block_id) & 1) * 0x8000000ull)
 
+void __cvmx_interrupt_spxx_int_msk_enable(int index);
+
 union cvmx_spxx_bckprs_cnt {
        uint64_t u64;
        struct cvmx_spxx_bckprs_cnt_s {
index 146354005d3b7b4a8bcb6ca8c8588f02559fdb58..3c409a854d912e1aa0fe9ae2f227d1f60b34c664 100644 (file)
@@ -4,7 +4,7 @@
  * Contact: support@caviumnetworks.com
  * This file is part of the OCTEON SDK
  *
- * Copyright (c) 2003-2012 Cavium Networks
+ * Copyright (C) 2003-2018 Cavium, Inc.
  *
  * This file is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License, Version 2, as
@@ -45,6 +45,8 @@
 #define CVMX_STXX_STAT_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180090000638ull) + ((block_id) & 1) * 0x8000000ull)
 #define CVMX_STXX_STAT_PKT_XMT(block_id) (CVMX_ADD_IO_SEG(0x0001180090000640ull) + ((block_id) & 1) * 0x8000000ull)
 
+void __cvmx_interrupt_stxx_int_msk_enable(int index);
+
 union cvmx_stxx_arb_ctl {
        uint64_t u64;
        struct cvmx_stxx_arb_ctl_s {
index c99c4b6a79f44d3c4bbb4d2144d76fe008e0deaf..60481502826ade26d02b62b9cfc5e427f815ca06 100644 (file)
@@ -279,13 +279,12 @@ union octeon_cvmemctl {
        } s;
 };
 
-extern void octeon_write_lcd(const char *s);
 extern void octeon_check_cpu_bist(void);
-extern int octeon_get_boot_uart(void);
 
-struct uart_port;
-extern unsigned int octeon_serial_in(struct uart_port *, int);
-extern void octeon_serial_out(struct uart_port *, int, int);
+int octeon_prune_device_tree(void);
+extern const char __appended_dtb;
+extern const char __dtb_octeon_3xxx_begin;
+extern const char __dtb_octeon_68xx_begin;
 
 /**
  * Write a 32bit value to the Octeon NPI register space
index 1884609741a83d6bbd0af7cf5162cce051bac3ba..b12d9a3fbfb6c040aab5b5b1a55fe67f517f44ec 100644 (file)
@@ -63,4 +63,7 @@ enum octeon_dma_bar_type {
  */
 extern enum octeon_dma_bar_type octeon_dma_bar_type;
 
+void octeon_pci_dma_init(void);
+extern char *octeon_swiotlb;
+
 #endif
index ad461216b5a1f1ae70832c4f4a778c1d2e0ec123..e8cc328fce2d62cb49100d4135b7d4d91b1a4735 100644 (file)
@@ -80,7 +80,12 @@ extern void build_copy_page(void);
  * used in our early mem init code for all memory models.
  * So always define it.
  */
-#define ARCH_PFN_OFFSET                PFN_UP(PHYS_OFFSET)
+#ifdef CONFIG_MIPS_AUTO_PFN_OFFSET
+extern unsigned long ARCH_PFN_OFFSET;
+# define ARCH_PFN_OFFSET       ARCH_PFN_OFFSET
+#else
+# define ARCH_PFN_OFFSET       PFN_UP(PHYS_OFFSET)
+#endif
 
 extern void clear_page(void * page);
 extern void copy_page(void * to, void * from);
@@ -252,8 +257,8 @@ extern int __virt_addr_valid(const volatile void *kaddr);
         ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
         VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
 
-#define UNCAC_ADDR(addr)       ((addr) - PAGE_OFFSET + UNCAC_BASE)
-#define CAC_ADDR(addr)         ((addr) - UNCAC_BASE + PAGE_OFFSET)
+#define UNCAC_ADDR(addr)       (UNCAC_BASE + __pa(addr))
+#define CAC_ADDR(addr)         ((unsigned long)__va((addr) - UNCAC_BASE))
 
 #include <asm-generic/memory_model.h>
 #include <asm-generic/getorder.h>
index af34afbc32d946f6e91e6c4cef69ac025a4c9d54..b2fa62922d88443dd307d1875eb433479ee7f993 100644 (file)
@@ -141,7 +141,7 @@ struct mips_fpu_struct {
 
 #define NUM_DSP_REGS   6
 
-typedef __u32 dspreg_t;
+typedef unsigned long dspreg_t;
 
 struct mips_dsp_state {
        dspreg_t        dspr[NUM_DSP_REGS];
@@ -386,7 +386,20 @@ unsigned long get_wchan(struct task_struct *p);
 #define KSTK_ESP(tsk) (task_pt_regs(tsk)->regs[29])
 #define KSTK_STATUS(tsk) (task_pt_regs(tsk)->cp0_status)
 
+#ifdef CONFIG_CPU_LOONGSON3
+/*
+ * Loongson-3's SFB (Store-Fill-Buffer) may buffer writes indefinitely when a
+ * tight read loop is executed, because reads take priority over writes & the
+ * hardware (incorrectly) doesn't ensure that writes will eventually occur.
+ *
+ * Since spin loops of any kind should have a cpu_relax() in them, force an SFB
+ * flush from cpu_relax() such that any pending writes will become visible as
+ * expected.
+ */
+#define cpu_relax()    smp_mb()
+#else
 #define cpu_relax()    barrier()
+#endif
 
 /*
  * Return_address is a replacement for __builtin_return_address(count)
index d49d247d48a114a3f2a301a7796d848b677f0c07..bb36a400203df50d8a76a74420213bad6b61b45c 100644 (file)
@@ -2,8 +2,10 @@
 #ifndef _MIPS_SETUP_H
 #define _MIPS_SETUP_H
 
+#include <linux/types.h>
 #include <uapi/asm/setup.h>
 
+extern void prom_putchar(char);
 extern void setup_early_printk(void);
 
 #ifdef CONFIG_EARLY_PRINTK_8250
index 195db5045ae57fa972096417a41ba76b4048a4e5..0d9fad5915fe647747111d6d41da4e0a147b07ef 100644 (file)
@@ -31,7 +31,6 @@ extern int prom_flags;
 #define PROM_FLAG_DONT_FREE_TEMP       4
 
 /* Simple char-by-char console I/O. */
-extern void prom_putchar(char c);
 extern char prom_getchar(void);
 
 /* Get next memory descriptor after CURR, returns first descriptor
index 91831800c48074a4a56bcdd881fb6035d0c00ba8..59f31a95facd1911a48ab038e7c5f99f28650259 100644 (file)
@@ -39,8 +39,6 @@ __asm__(                                                              \
        ".end\t__" #symbol "\n\t"                                       \
        ".size\t__" #symbol",. - __" #symbol)
 
-#define nabi_no_regargs
-
 #endif /* CONFIG_32BIT */
 
 #ifdef CONFIG_64BIT
@@ -67,16 +65,6 @@ __asm__(                                                             \
        ".end\t__" #symbol "\n\t"                                       \
        ".size\t__" #symbol",. - __" #symbol)
 
-#define nabi_no_regargs                                                        \
-       unsigned long __dummy0,                                         \
-       unsigned long __dummy1,                                         \
-       unsigned long __dummy2,                                         \
-       unsigned long __dummy3,                                         \
-       unsigned long __dummy4,                                         \
-       unsigned long __dummy5,                                         \
-       unsigned long __dummy6,                                         \
-       unsigned long __dummy7,
-
 #endif /* CONFIG_64BIT */
 
 #endif /* _ASM_SIM_H */
index 88ebd83b3bf96491379bfbe5de9b3d68e6a42469..056a6bf13491cf9c10e446c4831052476bbbb541 100644 (file)
@@ -25,7 +25,17 @@ extern cpumask_t cpu_sibling_map[];
 extern cpumask_t cpu_core_map[];
 extern cpumask_t cpu_foreign_map[];
 
-#define raw_smp_processor_id() (current_thread_info()->cpu)
+static inline int raw_smp_processor_id(void)
+{
+#if defined(__VDSO__)
+       extern int vdso_smp_processor_id(void)
+               __compiletime_error("VDSO should not call smp_processor_id()");
+       return vdso_smp_processor_id();
+#else
+       return current_thread_info()->cpu;
+#endif
+}
+#define raw_smp_processor_id raw_smp_processor_id
 
 /* Map from cpu id to sequential logical cpu number.  This will only
    not be idempotent when cpus failed to come on-line. */
index 64887d3c7ec3200d8e812d8befbd5c5537d28ce5..9a2c47bf3c4045e79d913d0528f2803e1dcbe01f 100644 (file)
@@ -49,7 +49,6 @@ void txx9_spi_init(int busid, unsigned long base, int irq);
 void txx9_ethaddr_init(unsigned int id, unsigned char *ethaddr);
 void txx9_sio_init(unsigned long baseaddr, int irq,
                   unsigned int line, unsigned int sclk, int nocts);
-void prom_putchar(char c);
 #ifdef CONFIG_EARLY_PRINTK
 extern void (*txx9_prom_putchar)(char c);
 void txx9_sio_putchar_init(unsigned long baseaddr);
index 6d667087f2aa1dc1ac8aac7a6fc45e107d664027..00805ac6e9fc723812b3c920380c8da7f6d58c82 100644 (file)
@@ -101,13 +101,6 @@ struct tx4939_irc_reg {
        struct tx4939_le_reg maskext;
 };
 
-struct tx4939_rtc_reg {
-       __u32 ctl;
-       __u32 adr;
-       __u32 dat;
-       __u32 tbc;
-};
-
 struct tx4939_crypto_reg {
        struct tx4939_le_reg csr;
        struct tx4939_le_reg idesptr;
@@ -369,26 +362,6 @@ struct tx4939_vpc_desc {
 #define TX4939_CLKCTR_SIO0RST  0x00000002
 #define TX4939_CLKCTR_CYPRST   0x00000001
 
-/*
- * RTC
- */
-#define TX4939_RTCCTL_ALME     0x00000080
-#define TX4939_RTCCTL_ALMD     0x00000040
-#define TX4939_RTCCTL_BUSY     0x00000020
-
-#define TX4939_RTCCTL_COMMAND  0x00000007
-#define TX4939_RTCCTL_COMMAND_NOP      0x00000000
-#define TX4939_RTCCTL_COMMAND_GETTIME  0x00000001
-#define TX4939_RTCCTL_COMMAND_SETTIME  0x00000002
-#define TX4939_RTCCTL_COMMAND_GETALARM 0x00000003
-#define TX4939_RTCCTL_COMMAND_SETALARM 0x00000004
-
-#define TX4939_RTCTBC_PM       0x00000080
-#define TX4939_RTCTBC_COMP     0x0000007f
-
-#define TX4939_RTC_REG_RAMSIZE 0x00000100
-#define TX4939_RTC_REG_RWBSIZE 0x00000006
-
 /*
  * CRYPTO
  */
@@ -498,8 +471,6 @@ struct tx4939_vpc_desc {
 #define tx4939_ccfgptr \
                ((struct tx4939_ccfg_reg __iomem *)TX4939_CCFG_REG)
 #define tx4939_sramcptr                tx4938_sramcptr
-#define tx4939_rtcptr \
-               ((struct tx4939_rtc_reg __iomem *)TX4939_RTC_REG)
 #define tx4939_cryptoptr \
                ((struct tx4939_crypto_reg __iomem *)TX4939_CRYPTO_REG)
 #define tx4939_vpcptr  ((struct tx4939_vpc_reg __iomem *)TX4939_VPC_REG)
index bb05e9916a5fa7f969d915b742329ae67cd51b57..f25dd1d83fb74700b33e4bf2387ebf89ac200f64 100644 (file)
 #define __NR_pkey_alloc                        (__NR_Linux + 364)
 #define __NR_pkey_free                 (__NR_Linux + 365)
 #define __NR_statx                     (__NR_Linux + 366)
+#define __NR_rseq                      (__NR_Linux + 367)
+#define __NR_io_pgetevents             (__NR_Linux + 368)
 
 
 /*
  * Offset of the last Linux o32 flavoured syscall
  */
-#define __NR_Linux_syscalls            366
+#define __NR_Linux_syscalls            368
 
 #endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
 
 #define __NR_O32_Linux                 4000
-#define __NR_O32_Linux_syscalls                366
+#define __NR_O32_Linux_syscalls                368
 
 #if _MIPS_SIM == _MIPS_SIM_ABI64
 
 #define __NR_pkey_alloc                        (__NR_Linux + 324)
 #define __NR_pkey_free                 (__NR_Linux + 325)
 #define __NR_statx                     (__NR_Linux + 326)
+#define __NR_rseq                      (__NR_Linux + 327)
+#define __NR_io_pgetevents             (__NR_Linux + 328)
 
 /*
  * Offset of the last Linux 64-bit flavoured syscall
  */
-#define __NR_Linux_syscalls            326
+#define __NR_Linux_syscalls            328
 
 #endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */
 
 #define __NR_64_Linux                  5000
-#define __NR_64_Linux_syscalls         326
+#define __NR_64_Linux_syscalls         328
 
 #if _MIPS_SIM == _MIPS_SIM_NABI32
 
 #define __NR_pkey_alloc                        (__NR_Linux + 328)
 #define __NR_pkey_free                 (__NR_Linux + 329)
 #define __NR_statx                     (__NR_Linux + 330)
+#define __NR_rseq                      (__NR_Linux + 331)
+#define __NR_io_pgetevents             (__NR_Linux + 332)
 
 /*
  * Offset of the last N32 flavoured syscall
  */
-#define __NR_Linux_syscalls            330
+#define __NR_Linux_syscalls            332
 
 #endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */
 
 #define __NR_N32_Linux                 6000
-#define __NR_N32_Linux_syscalls                330
+#define __NR_N32_Linux_syscalls                332
 
 #endif /* _UAPI_ASM_UNISTD_H */
index d626a9a391cc9dc594a3924ddf249c0d70ce88e2..d31bc2f0120882afa493f2dd19efa4fa79ae946f 100644 (file)
@@ -16,6 +16,8 @@
 #include <linux/bootmem.h>
 #include <linux/spinlock.h>
 #include <linux/gfp.h>
+#include <linux/dma-direct.h>
+#include <linux/dma-noncoherent.h>
 #include <asm/mipsregs.h>
 #include <asm/jazz.h>
 #include <asm/io.h>
@@ -86,6 +88,7 @@ static int __init vdma_init(void)
        printk(KERN_INFO "VDMA: R4030 DMA pagetables initialized.\n");
        return 0;
 }
+arch_initcall(vdma_init);
 
 /*
  * Allocate DMA pagetables using a simple first-fit algorithm
@@ -556,4 +559,140 @@ int vdma_get_enable(int channel)
        return enable;
 }
 
-arch_initcall(vdma_init);
+static void *jazz_dma_alloc(struct device *dev, size_t size,
+               dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
+{
+       void *ret;
+
+       ret = dma_direct_alloc(dev, size, dma_handle, gfp, attrs);
+       if (!ret)
+               return NULL;
+
+       *dma_handle = vdma_alloc(virt_to_phys(ret), size);
+       if (*dma_handle == VDMA_ERROR) {
+               dma_direct_free(dev, size, ret, *dma_handle, attrs);
+               return NULL;
+       }
+
+       if (!(attrs & DMA_ATTR_NON_CONSISTENT)) {
+               dma_cache_wback_inv((unsigned long)ret, size);
+               ret = (void *)UNCAC_ADDR(ret);
+       }
+       return ret;
+}
+
+static void jazz_dma_free(struct device *dev, size_t size, void *vaddr,
+               dma_addr_t dma_handle, unsigned long attrs)
+{
+       vdma_free(dma_handle);
+       if (!(attrs & DMA_ATTR_NON_CONSISTENT))
+               vaddr = (void *)CAC_ADDR((unsigned long)vaddr);
+       return dma_direct_free(dev, size, vaddr, dma_handle, attrs);
+}
+
+static dma_addr_t jazz_dma_map_page(struct device *dev, struct page *page,
+               unsigned long offset, size_t size, enum dma_data_direction dir,
+               unsigned long attrs)
+{
+       phys_addr_t phys = page_to_phys(page) + offset;
+
+       if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+               arch_sync_dma_for_device(dev, phys, size, dir);
+       return vdma_alloc(phys, size);
+}
+
+static void jazz_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
+               size_t size, enum dma_data_direction dir, unsigned long attrs)
+{
+       if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+               arch_sync_dma_for_cpu(dev, vdma_log2phys(dma_addr), size, dir);
+       vdma_free(dma_addr);
+}
+
+static int jazz_dma_map_sg(struct device *dev, struct scatterlist *sglist,
+               int nents, enum dma_data_direction dir, unsigned long attrs)
+{
+       int i;
+       struct scatterlist *sg;
+
+       for_each_sg(sglist, sg, nents, i) {
+               if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+                       arch_sync_dma_for_device(dev, sg_phys(sg), sg->length,
+                               dir);
+               sg->dma_address = vdma_alloc(sg_phys(sg), sg->length);
+               if (sg->dma_address == VDMA_ERROR)
+                       return 0;
+               sg_dma_len(sg) = sg->length;
+       }
+
+       return nents;
+}
+
+static void jazz_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
+               int nents, enum dma_data_direction dir, unsigned long attrs)
+{
+       int i;
+       struct scatterlist *sg;
+
+       for_each_sg(sglist, sg, nents, i) {
+               if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+                       arch_sync_dma_for_cpu(dev, sg_phys(sg), sg->length,
+                               dir);
+               vdma_free(sg->dma_address);
+       }
+}
+
+static void jazz_dma_sync_single_for_device(struct device *dev,
+               dma_addr_t addr, size_t size, enum dma_data_direction dir)
+{
+       arch_sync_dma_for_device(dev, vdma_log2phys(addr), size, dir);
+}
+
+static void jazz_dma_sync_single_for_cpu(struct device *dev,
+               dma_addr_t addr, size_t size, enum dma_data_direction dir)
+{
+       arch_sync_dma_for_cpu(dev, vdma_log2phys(addr), size, dir);
+}
+
+static void jazz_dma_sync_sg_for_device(struct device *dev,
+               struct scatterlist *sgl, int nents, enum dma_data_direction dir)
+{
+       struct scatterlist *sg;
+       int i;
+
+       for_each_sg(sgl, sg, nents, i)
+               arch_sync_dma_for_device(dev, sg_phys(sg), sg->length, dir);
+}
+
+static void jazz_dma_sync_sg_for_cpu(struct device *dev,
+               struct scatterlist *sgl, int nents, enum dma_data_direction dir)
+{
+       struct scatterlist *sg;
+       int i;
+
+       for_each_sg(sgl, sg, nents, i)
+               arch_sync_dma_for_cpu(dev, sg_phys(sg), sg->length, dir);
+}
+
+static int jazz_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
+{
+       return dma_addr == VDMA_ERROR;
+}
+
+const struct dma_map_ops jazz_dma_ops = {
+       .alloc                  = jazz_dma_alloc,
+       .free                   = jazz_dma_free,
+       .mmap                   = arch_dma_mmap,
+       .map_page               = jazz_dma_map_page,
+       .unmap_page             = jazz_dma_unmap_page,
+       .map_sg                 = jazz_dma_map_sg,
+       .unmap_sg               = jazz_dma_unmap_sg,
+       .sync_single_for_cpu    = jazz_dma_sync_single_for_cpu,
+       .sync_single_for_device = jazz_dma_sync_single_for_device,
+       .sync_sg_for_cpu        = jazz_dma_sync_sg_for_cpu,
+       .sync_sg_for_device     = jazz_dma_sync_sg_for_device,
+       .dma_supported          = dma_direct_supported,
+       .cache_sync             = arch_dma_cache_sync,
+       .mapping_error          = jazz_dma_mapping_error,
+};
+EXPORT_SYMBOL(jazz_dma_ops);
index 448fd41792e4bb24dbe9299450f27ef573320ff7..1b5e121c3f0d8de5f0fe7f7a0f16913c293e25f4 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/screen_info.h>
 #include <linux/platform_device.h>
 #include <linux/serial_8250.h>
+#include <linux/dma-mapping.h>
 
 #include <asm/jazz.h>
 #include <asm/jazzdma.h>
@@ -136,10 +137,16 @@ static struct resource jazz_esp_rsrc[] = {
        }
 };
 
+static u64 jazz_esp_dma_mask = DMA_BIT_MASK(32);
+
 static struct platform_device jazz_esp_pdev = {
        .name           = "jazz_esp",
        .num_resources  = ARRAY_SIZE(jazz_esp_rsrc),
-       .resource       = jazz_esp_rsrc
+       .resource       = jazz_esp_rsrc,
+       .dev = {
+               .dma_mask          = &jazz_esp_dma_mask,
+               .coherent_dma_mask = DMA_BIT_MASK(32),
+       }
 };
 
 static struct resource jazz_sonic_rsrc[] = {
@@ -155,10 +162,16 @@ static struct resource jazz_sonic_rsrc[] = {
        }
 };
 
+static u64 jazz_sonic_dma_mask = DMA_BIT_MASK(32);
+
 static struct platform_device jazz_sonic_pdev = {
        .name           = "jazzsonic",
        .num_resources  = ARRAY_SIZE(jazz_sonic_rsrc),
-       .resource       = jazz_sonic_rsrc
+       .resource       = jazz_sonic_rsrc,
+       .dev = {
+               .dma_mask          = &jazz_sonic_dma_mask,
+               .coherent_dma_mask = DMA_BIT_MASK(32),
+       }
 };
 
 static struct resource jazz_cmos_rsrc[] = {
index 28448d358c10d42c6ed5600ed3a5e5d67919e7e5..a2a5a85ea1f9360d3ff90e7e0419c423b6a0fbdf 100644 (file)
@@ -1,4 +1,4 @@
 platform-$(CONFIG_MACH_INGENIC)        += jz4740/
 cflags-$(CONFIG_MACH_INGENIC)  += -I$(srctree)/arch/mips/include/asm/mach-jz4740
 load-$(CONFIG_MACH_INGENIC)    += 0xffffffff80010000
-zload-$(CONFIG_MACH_INGENIC)   += 0xffffffff80600000
+zload-$(CONFIG_MACH_INGENIC)   += 0xffffffff81000000
index b2509c19cfb5b8cc7185f5cacadfc9f94c59fcbe..d535fc706a8b38a07c8e4c7de12ea111fcd95a60 100644 (file)
@@ -1849,7 +1849,8 @@ static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu)
                        set_elf_platform(cpu, "loongson3a");
                        set_isa(c, MIPS_CPU_ISA_M64R2);
                        break;
-               case PRID_REV_LOONGSON3A_R3:
+               case PRID_REV_LOONGSON3A_R3_0:
+               case PRID_REV_LOONGSON3A_R3_1:
                        c->cputype = CPU_LOONGSON3;
                        __cpu_name[cpu] = "ICT Loongson-3";
                        set_elf_platform(cpu, "loongson3a");
index 505cb77d12805c7d80610abea5c0919320d5ed61..4a1647ddfbd94cc40974a1643f655d6f1da9af2b 100644 (file)
@@ -14,8 +14,6 @@
 
 #include <asm/setup.h>
 
-extern void prom_putchar(char);
-
 static void early_console_write(struct console *con, const char *s, unsigned n)
 {
        while (n-- && *s) {
index 83cea376755697b77adb9208a984ff0e74a7d6d8..ea26614afac60bcc8e909dce162d8240a9ccd718 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/io.h>
 #include <linux/serial_core.h>
 #include <linux/serial_reg.h>
+#include <asm/setup.h>
 
 static void __iomem *serial8250_base;
 static unsigned int serial8250_reg_shift;
index 38a302919e6b5ae8aa07303065bd561529fe828e..d7de8adcfcc8767a826e7823d3bf189326da0e33 100644 (file)
@@ -79,6 +79,10 @@ FEXPORT(ret_from_fork)
        jal     schedule_tail           # a0 = struct task_struct *prev
 
 FEXPORT(syscall_exit)
+#ifdef CONFIG_DEBUG_RSEQ
+       move    a0, sp
+       jal     rseq_syscall
+#endif
        local_irq_disable               # make sure need_resched and
                                        # signals dont change between
                                        # sampling and return
@@ -141,6 +145,10 @@ work_notifysig:                            # deal with pending signals and
        j       resume_userspace_check
 
 FEXPORT(syscall_exit_partial)
+#ifdef CONFIG_DEBUG_RSEQ
+       move    a0, sp
+       jal     rseq_syscall
+#endif
        local_irq_disable               # make sure need_resched doesn't
                                        # change between and return
        LONG_L  a2, TI_FLAGS($28)       # current->work
index 37b9383eacd3e84872b1d2d7d1361cf1b637522f..6c257b52f57fb00ae9174da336c276d9ba9471f8 100644 (file)
@@ -354,16 +354,56 @@ NESTED(ejtag_debug_handler, PT_SIZE, sp)
        sll     k0, k0, 30      # Check for SDBBP.
        bgez    k0, ejtag_return
 
+#ifdef CONFIG_SMP
+1:     PTR_LA  k0, ejtag_debug_buffer_spinlock
+       ll      k0, 0(k0)
+       bnez    k0, 1b
+       PTR_LA  k0, ejtag_debug_buffer_spinlock
+       sc      k0, 0(k0)
+       beqz    k0, 1b
+# ifdef CONFIG_WEAK_REORDERING_BEYOND_LLSC
+       sync
+# endif
+
+       PTR_LA  k0, ejtag_debug_buffer
+       LONG_S  k1, 0(k0)
+
+       ASM_CPUID_MFC0 k1, ASM_SMP_CPUID_REG
+       PTR_SRL k1, SMP_CPUID_PTRSHIFT
+       PTR_SLL k1, LONGLOG
+       PTR_LA  k0, ejtag_debug_buffer_per_cpu
+       PTR_ADDU k0, k1
+
+       PTR_LA  k1, ejtag_debug_buffer
+       LONG_L  k1, 0(k1)
+       LONG_S  k1, 0(k0)
+
+       PTR_LA  k0, ejtag_debug_buffer_spinlock
+       sw      zero, 0(k0)
+#else
        PTR_LA  k0, ejtag_debug_buffer
        LONG_S  k1, 0(k0)
+#endif
+
        SAVE_ALL
        move    a0, sp
        jal     ejtag_exception_handler
        RESTORE_ALL
+
+#ifdef CONFIG_SMP
+       ASM_CPUID_MFC0 k1, ASM_SMP_CPUID_REG
+       PTR_SRL k1, SMP_CPUID_PTRSHIFT
+       PTR_SLL k1, LONGLOG
+       PTR_LA  k0, ejtag_debug_buffer_per_cpu
+       PTR_ADDU k0, k1
+       LONG_L  k1, 0(k0)
+#else
        PTR_LA  k0, ejtag_debug_buffer
        LONG_L  k1, 0(k0)
+#endif
 
 ejtag_return:
+       back_to_back_c0_hazard
        MFC0    k0, CP0_DESAVE
        .set    mips32
        deret
@@ -377,6 +417,12 @@ ejtag_return:
        .data
 EXPORT(ejtag_debug_buffer)
        .fill   LONGSIZE
+#ifdef CONFIG_SMP
+EXPORT(ejtag_debug_buffer_spinlock)
+       .fill   LONGSIZE
+EXPORT(ejtag_debug_buffer_per_cpu)
+       .fill   LONGSIZE * NR_CPUS
+#endif
        .previous
 
        __INIT
index 7c246b69c5458344fb1dc7b03c79d54fef53f528..046846999efdb5013a12596b2887b1deaa024549 100644 (file)
 void (*cpu_wait)(void);
 EXPORT_SYMBOL(cpu_wait);
 
-static void r3081_wait(void)
+static void __cpuidle r3081_wait(void)
 {
        unsigned long cfg = read_c0_conf();
        write_c0_conf(cfg | R30XX_CONF_HALT);
        local_irq_enable();
 }
 
-static void r39xx_wait(void)
+static void __cpuidle r39xx_wait(void)
 {
        if (!need_resched())
                write_c0_conf(read_c0_conf() | TX39_CONF_HALT);
        local_irq_enable();
 }
 
-void r4k_wait(void)
+void __cpuidle r4k_wait(void)
 {
        local_irq_enable();
        __r4k_wait();
@@ -60,7 +60,7 @@ void r4k_wait(void)
  * interrupt is requested" restriction in the MIPS32/MIPS64 architecture makes
  * using this version a gamble.
  */
-void r4k_wait_irqoff(void)
+void __cpuidle r4k_wait_irqoff(void)
 {
        if (!need_resched())
                __asm__(
@@ -75,7 +75,7 @@ void r4k_wait_irqoff(void)
  * The RM7000 variant has to handle erratum 38.         The workaround is to not
  * have any pending stores when the WAIT instruction is executed.
  */
-static void rm7k_wait_irqoff(void)
+static void __cpuidle rm7k_wait_irqoff(void)
 {
        if (!need_resched())
                __asm__(
@@ -96,7 +96,7 @@ static void rm7k_wait_irqoff(void)
  * since coreclock (and the cp0 counter) stops upon executing it. Only an
  * interrupt can wake it, so they must be enabled before entering idle modes.
  */
-static void au1k_wait(void)
+static void __cpuidle au1k_wait(void)
 {
        unsigned long c0status = read_c0_status() | 1;  /* irqs on */
 
index f5c8bce70db29cb59430cdf0bd46d4fccb80aee8..54cd675c5d1d474153f1d3c6b265bdc1c194b378 100644 (file)
@@ -326,19 +326,13 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
                                preempt_enable_no_resched();
                        }
                        return 1;
-               } else {
-                       if (addr->word != breakpoint_insn.word) {
-                               /*
-                                * The breakpoint instruction was removed by
-                                * another cpu right after we hit, no further
-                                * handling of this interrupt is appropriate
-                                */
-                               ret = 1;
-                               goto no_kprobe;
-                       }
-                       p = __this_cpu_read(current_kprobe);
-                       if (p->break_handler && p->break_handler(p, regs))
-                               goto ss_probe;
+               } else if (addr->word != breakpoint_insn.word) {
+                       /*
+                        * The breakpoint instruction was removed by
+                        * another cpu right after we hit, no further
+                        * handling of this interrupt is appropriate
+                        */
+                       ret = 1;
                }
                goto no_kprobe;
        }
@@ -364,10 +358,11 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
 
        if (p->pre_handler && p->pre_handler(p, regs)) {
                /* handler has already set things up, so skip ss setup */
+               reset_current_kprobe();
+               preempt_enable_no_resched();
                return 1;
        }
 
-ss_probe:
        prepare_singlestep(p, regs, kcb);
        if (kcb->flags & SKIP_DELAYSLOT) {
                kcb->kprobe_status = KPROBE_HIT_SSDONE;
@@ -468,51 +463,6 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
        return ret;
 }
 
-int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
-{
-       struct jprobe *jp = container_of(p, struct jprobe, kp);
-       struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
-
-       kcb->jprobe_saved_regs = *regs;
-       kcb->jprobe_saved_sp = regs->regs[29];
-
-       memcpy(kcb->jprobes_stack, (void *)kcb->jprobe_saved_sp,
-              MIN_JPROBES_STACK_SIZE(kcb->jprobe_saved_sp));
-
-       regs->cp0_epc = (unsigned long)(jp->entry);
-
-       return 1;
-}
-
-/* Defined in the inline asm below. */
-void jprobe_return_end(void);
-
-void __kprobes jprobe_return(void)
-{
-       /* Assembler quirk necessitates this '0,code' business.  */
-       asm volatile(
-               "break 0,%0\n\t"
-               ".globl jprobe_return_end\n"
-               "jprobe_return_end:\n"
-               : : "n" (BRK_KPROBE_BP) : "memory");
-}
-
-int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
-{
-       struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
-
-       if (regs->cp0_epc >= (unsigned long)jprobe_return &&
-           regs->cp0_epc <= (unsigned long)jprobe_return_end) {
-               *regs = kcb->jprobe_saved_regs;
-               memcpy((void *)kcb->jprobe_saved_sp, kcb->jprobes_stack,
-                      MIN_JPROBES_STACK_SIZE(kcb->jprobe_saved_sp));
-               preempt_enable_no_resched();
-
-               return 1;
-       }
-       return 0;
-}
-
 /*
  * Function return probe trampoline:
  *     - init_kprobes() establishes a probepoint here
@@ -595,9 +545,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
        kretprobe_assert(ri, orig_ret_address, trampoline_address);
        instruction_pointer(regs) = orig_ret_address;
 
-       reset_current_kprobe();
        kretprobe_hash_unlock(current, &flags);
-       preempt_enable_no_resched();
 
        hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
                hlist_del(&ri->hlist);
index 318f1c05c5b34946dad665852a92ce2b83760cf5..6b61be486303bb1d36bc3b62895f0551a27be8c2 100644 (file)
 #include <asm/mmu_context.h>
 #include <asm/mman.h>
 
-/* Use this to get at 32-bit user passed pointers. */
-/* A() macro should be used for places where you e.g.
-   have some internal variable u32 and just want to get
-   rid of a compiler warning. AA() has to be used in
-   places where you want to convert a function argument
-   to 32bit pointer or when you e.g. access pt_regs
-   structure and want to consider 32bit registers only.
- */
-#define A(__x) ((unsigned long)(__x))
-#define AA(__x) ((unsigned long)((int)__x))
-
 #ifdef __MIPSEB__
 #define merge_64(r1, r2) ((((r1) & 0xffffffffUL) << 32) + ((r2) & 0xffffffffUL))
 #endif
 #define merge_64(r1, r2) ((((r2) & 0xffffffffUL) << 32) + ((r1) & 0xffffffffUL))
 #endif
 
-SYSCALL_DEFINE6(32_mmap2, unsigned long, addr, unsigned long, len,
-       unsigned long, prot, unsigned long, flags, unsigned long, fd,
-       unsigned long, pgoff)
-{
-       if (pgoff & (~PAGE_MASK >> 12))
-               return -EINVAL;
-       return ksys_mmap_pgoff(addr, len, prot, flags, fd,
-                              pgoff >> (PAGE_SHIFT-12));
-}
-
-#define RLIM_INFINITY32 0x7fffffff
-#define RESOURCE32(x) ((x > RLIM_INFINITY32) ? RLIM_INFINITY32 : x)
-
-struct rlimit32 {
-       int     rlim_cur;
-       int     rlim_max;
-};
-
 SYSCALL_DEFINE4(32_truncate64, const char __user *, path,
        unsigned long, __dummy, unsigned long, a2, unsigned long, a3)
 {
index f2ee7e1e3342e498be961f8995fc91b1de1f2744..cff52b283e03843519201ca8fe8754e0899c0c3c 100644 (file)
@@ -119,10 +119,20 @@ NESTED(_mcount, PT_SIZE, ra)
 EXPORT_SYMBOL(_mcount)
        PTR_LA  t1, ftrace_stub
        PTR_L   t2, ftrace_trace_function /* Prepare t2 for (1) */
-       bne     t1, t2, static_trace
+       beq     t1, t2, fgraph_trace
         nop
 
+       MCOUNT_SAVE_REGS
+
+       move    a0, ra          /* arg1: self return address */
+       jalr    t2              /* (1) call *ftrace_trace_function */
+        move   a1, AT          /* arg2: parent's return address */
+
+       MCOUNT_RESTORE_REGS
+
+fgraph_trace:
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
+       PTR_LA  t1, ftrace_stub
        PTR_L   t3, ftrace_graph_return
        bne     t1, t3, ftrace_graph_caller
         nop
@@ -131,24 +141,11 @@ EXPORT_SYMBOL(_mcount)
        bne     t1, t3, ftrace_graph_caller
         nop
 #endif
-       b       ftrace_stub
-#ifdef CONFIG_32BIT
-        addiu sp, sp, 8
-#else
-        nop
-#endif
 
-static_trace:
-       MCOUNT_SAVE_REGS
-
-       move    a0, ra          /* arg1: self return address */
-       jalr    t2              /* (1) call *ftrace_trace_function */
-        move   a1, AT          /* arg2: parent's return address */
-
-       MCOUNT_RESTORE_REGS
 #ifdef CONFIG_32BIT
        addiu sp, sp, 8
 #endif
+
        .globl ftrace_stub
 ftrace_stub:
        RETURN_BACK
index 8d85046adcc8dd858cb5b392b68dc22da19185c4..8fc69891e1173a91da5e972a5feaadb3e8547775 100644 (file)
@@ -29,6 +29,8 @@
 #include <linux/kallsyms.h>
 #include <linux/random.h>
 #include <linux/prctl.h>
+#include <linux/nmi.h>
+#include <linux/cpu.h>
 
 #include <asm/asm.h>
 #include <asm/bootinfo.h>
@@ -655,28 +657,42 @@ unsigned long arch_align_stack(unsigned long sp)
        return sp & ALMASK;
 }
 
-static void arch_dump_stack(void *info)
+static DEFINE_PER_CPU(call_single_data_t, backtrace_csd);
+static struct cpumask backtrace_csd_busy;
+
+static void handle_backtrace(void *info)
 {
-       struct pt_regs *regs;
+       nmi_cpu_backtrace(get_irq_regs());
+       cpumask_clear_cpu(smp_processor_id(), &backtrace_csd_busy);
+}
 
-       regs = get_irq_regs();
+static void raise_backtrace(cpumask_t *mask)
+{
+       call_single_data_t *csd;
+       int cpu;
 
-       if (regs)
-               show_regs(regs);
+       for_each_cpu(cpu, mask) {
+               /*
+                * If we previously sent an IPI to the target CPU & it hasn't
+                * cleared its bit in the busy cpumask then it didn't handle
+                * our previous IPI & it's not safe for us to reuse the
+                * call_single_data_t.
+                */
+               if (cpumask_test_and_set_cpu(cpu, &backtrace_csd_busy)) {
+                       pr_warn("Unable to send backtrace IPI to CPU%u - perhaps it hung?\n",
+                               cpu);
+                       continue;
+               }
 
-       dump_stack();
+               csd = &per_cpu(backtrace_csd, cpu);
+               csd->func = handle_backtrace;
+               smp_call_function_single_async(cpu, csd);
+       }
 }
 
 void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
 {
-       long this_cpu = get_cpu();
-
-       if (cpumask_test_cpu(this_cpu, mask) && !exclude_self)
-               dump_stack();
-
-       smp_call_function_many(mask, arch_dump_stack, NULL, 1);
-
-       put_cpu();
+       nmi_trigger_cpumask_backtrace(mask, exclude_self, raise_backtrace);
 }
 
 int mips_get_process_fp_mode(struct task_struct *task)
@@ -691,19 +707,25 @@ int mips_get_process_fp_mode(struct task_struct *task)
        return value;
 }
 
-static void prepare_for_fp_mode_switch(void *info)
+static long prepare_for_fp_mode_switch(void *unused)
 {
-       struct mm_struct *mm = info;
-
-       if (current->mm == mm)
-               lose_fpu(1);
+       /*
+        * This is icky, but we use this to simply ensure that all CPUs have
+        * context switched, regardless of whether they were previously running
+        * kernel or user code. This ensures that no CPU currently has its FPU
+        * enabled, or is about to attempt to enable it through any path other
+        * than enable_restore_fp_context() which will wait appropriately for
+        * fp_mode_switching to be zero.
+        */
+       return 0;
 }
 
 int mips_set_process_fp_mode(struct task_struct *task, unsigned int value)
 {
        const unsigned int known_bits = PR_FP_MODE_FR | PR_FP_MODE_FRE;
        struct task_struct *t;
-       int max_users;
+       struct cpumask process_cpus;
+       int cpu;
 
        /* If nothing to change, return right away, successfully.  */
        if (value == mips_get_process_fp_mode(task))
@@ -736,35 +758,7 @@ int mips_set_process_fp_mode(struct task_struct *task, unsigned int value)
        if (!(value & PR_FP_MODE_FR) && raw_cpu_has_fpu && cpu_has_mips_r6)
                return -EOPNOTSUPP;
 
-       /* Proceed with the mode switch */
-       preempt_disable();
-
-       /* Save FP & vector context, then disable FPU & MSA */
-       if (task->signal == current->signal)
-               lose_fpu(1);
-
-       /* Prevent any threads from obtaining live FP context */
-       atomic_set(&task->mm->context.fp_mode_switching, 1);
-       smp_mb__after_atomic();
-
-       /*
-        * If there are multiple online CPUs then force any which are running
-        * threads in this process to lose their FPU context, which they can't
-        * regain until fp_mode_switching is cleared later.
-        */
-       if (num_online_cpus() > 1) {
-               /* No need to send an IPI for the local CPU */
-               max_users = (task->mm == current->mm) ? 1 : 0;
-
-               if (atomic_read(&current->mm->mm_users) > max_users)
-                       smp_call_function(prepare_for_fp_mode_switch,
-                                         (void *)current->mm, 1);
-       }
-
-       /*
-        * There are now no threads of the process with live FP context, so it
-        * is safe to proceed with the FP mode switch.
-        */
+       /* Indicate the new FP mode in each thread */
        for_each_thread(task, t) {
                /* Update desired FP register width */
                if (value & PR_FP_MODE_FR) {
@@ -781,9 +775,34 @@ int mips_set_process_fp_mode(struct task_struct *task, unsigned int value)
                        clear_tsk_thread_flag(t, TIF_HYBRID_FPREGS);
        }
 
-       /* Allow threads to use FP again */
-       atomic_set(&task->mm->context.fp_mode_switching, 0);
-       preempt_enable();
+       /*
+        * We need to ensure that all threads in the process have switched mode
+        * before returning, in order to allow userland to not worry about
+        * races. We can do this by forcing all CPUs that any thread in the
+        * process may be running on to schedule something else - in this case
+        * prepare_for_fp_mode_switch().
+        *
+        * We begin by generating a mask of all CPUs that any thread in the
+        * process may be running on.
+        */
+       cpumask_clear(&process_cpus);
+       for_each_thread(task, t)
+               cpumask_set_cpu(task_cpu(t), &process_cpus);
+
+       /*
+        * Now we schedule prepare_for_fp_mode_switch() on each of those CPUs.
+        *
+        * The CPUs may have rescheduled already since we switched mode or
+        * generated the cpumask, but that doesn't matter. If the task in this
+        * process is scheduled out then our scheduling
+        * prepare_for_fp_mode_switch() will simply be redundant. If it's
+        * scheduled in then it will already have picked up the new FP mode
+        * whilst doing so.
+        */
+       get_online_cpus();
+       for_each_cpu_and(cpu, &process_cpus, cpu_online_mask)
+               work_on_cpu(cpu, prepare_for_fp_mode_switch, NULL);
+       put_online_cpus();
 
        wake_up_var(&task->mm->context.fp_mode_switching);
 
index 9f6c3f2aa2e2eff85e31480fee4e765b628364b2..e5ba56c01ee0a88b090507e6d9bb6cb08f7b13ba 100644 (file)
@@ -41,6 +41,7 @@
 #include <asm/mipsmtregs.h>
 #include <asm/pgtable.h>
 #include <asm/page.h>
+#include <asm/processor.h>
 #include <asm/syscall.h>
 #include <linux/uaccess.h>
 #include <asm/bootinfo.h>
@@ -589,9 +590,226 @@ static int fpr_set(struct task_struct *target,
        return err;
 }
 
+#if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32)
+
+/*
+ * Copy the DSP context to the supplied 32-bit NT_MIPS_DSP buffer.
+ */
+static int dsp32_get(struct task_struct *target,
+                    const struct user_regset *regset,
+                    unsigned int pos, unsigned int count,
+                    void *kbuf, void __user *ubuf)
+{
+       unsigned int start, num_regs, i;
+       u32 dspregs[NUM_DSP_REGS + 1];
+
+       BUG_ON(count % sizeof(u32));
+
+       if (!cpu_has_dsp)
+               return -EIO;
+
+       start = pos / sizeof(u32);
+       num_regs = count / sizeof(u32);
+
+       if (start + num_regs > NUM_DSP_REGS + 1)
+               return -EIO;
+
+       for (i = start; i < num_regs; i++)
+               switch (i) {
+               case 0 ... NUM_DSP_REGS - 1:
+                       dspregs[i] = target->thread.dsp.dspr[i];
+                       break;
+               case NUM_DSP_REGS:
+                       dspregs[i] = target->thread.dsp.dspcontrol;
+                       break;
+               }
+       return user_regset_copyout(&pos, &count, &kbuf, &ubuf, dspregs, 0,
+                                  sizeof(dspregs));
+}
+
+/*
+ * Copy the supplied 32-bit NT_MIPS_DSP buffer to the DSP context.
+ */
+static int dsp32_set(struct task_struct *target,
+                    const struct user_regset *regset,
+                    unsigned int pos, unsigned int count,
+                    const void *kbuf, const void __user *ubuf)
+{
+       unsigned int start, num_regs, i;
+       u32 dspregs[NUM_DSP_REGS + 1];
+       int err;
+
+       BUG_ON(count % sizeof(u32));
+
+       if (!cpu_has_dsp)
+               return -EIO;
+
+       start = pos / sizeof(u32);
+       num_regs = count / sizeof(u32);
+
+       if (start + num_regs > NUM_DSP_REGS + 1)
+               return -EIO;
+
+       err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, dspregs, 0,
+                                sizeof(dspregs));
+       if (err)
+               return err;
+
+       for (i = start; i < num_regs; i++)
+               switch (i) {
+               case 0 ... NUM_DSP_REGS - 1:
+                       target->thread.dsp.dspr[i] = (s32)dspregs[i];
+                       break;
+               case NUM_DSP_REGS:
+                       target->thread.dsp.dspcontrol = (s32)dspregs[i];
+                       break;
+               }
+
+       return 0;
+}
+
+#endif /* CONFIG_32BIT || CONFIG_MIPS32_O32 */
+
+#ifdef CONFIG_64BIT
+
+/*
+ * Copy the DSP context to the supplied 64-bit NT_MIPS_DSP buffer.
+ */
+static int dsp64_get(struct task_struct *target,
+                    const struct user_regset *regset,
+                    unsigned int pos, unsigned int count,
+                    void *kbuf, void __user *ubuf)
+{
+       unsigned int start, num_regs, i;
+       u64 dspregs[NUM_DSP_REGS + 1];
+
+       BUG_ON(count % sizeof(u64));
+
+       if (!cpu_has_dsp)
+               return -EIO;
+
+       start = pos / sizeof(u64);
+       num_regs = count / sizeof(u64);
+
+       if (start + num_regs > NUM_DSP_REGS + 1)
+               return -EIO;
+
+       for (i = start; i < num_regs; i++)
+               switch (i) {
+               case 0 ... NUM_DSP_REGS - 1:
+                       dspregs[i] = target->thread.dsp.dspr[i];
+                       break;
+               case NUM_DSP_REGS:
+                       dspregs[i] = target->thread.dsp.dspcontrol;
+                       break;
+               }
+       return user_regset_copyout(&pos, &count, &kbuf, &ubuf, dspregs, 0,
+                                  sizeof(dspregs));
+}
+
+/*
+ * Copy the supplied 64-bit NT_MIPS_DSP buffer to the DSP context.
+ */
+static int dsp64_set(struct task_struct *target,
+                    const struct user_regset *regset,
+                    unsigned int pos, unsigned int count,
+                    const void *kbuf, const void __user *ubuf)
+{
+       unsigned int start, num_regs, i;
+       u64 dspregs[NUM_DSP_REGS + 1];
+       int err;
+
+       BUG_ON(count % sizeof(u64));
+
+       if (!cpu_has_dsp)
+               return -EIO;
+
+       start = pos / sizeof(u64);
+       num_regs = count / sizeof(u64);
+
+       if (start + num_regs > NUM_DSP_REGS + 1)
+               return -EIO;
+
+       err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, dspregs, 0,
+                                sizeof(dspregs));
+       if (err)
+               return err;
+
+       for (i = start; i < num_regs; i++)
+               switch (i) {
+               case 0 ... NUM_DSP_REGS - 1:
+                       target->thread.dsp.dspr[i] = dspregs[i];
+                       break;
+               case NUM_DSP_REGS:
+                       target->thread.dsp.dspcontrol = dspregs[i];
+                       break;
+               }
+
+       return 0;
+}
+
+#endif /* CONFIG_64BIT */
+
+/*
+ * Determine whether the DSP context is present.
+ */
+static int dsp_active(struct task_struct *target,
+                     const struct user_regset *regset)
+{
+       return cpu_has_dsp ? NUM_DSP_REGS + 1 : -ENODEV;
+}
+
+/* Copy the FP mode setting to the supplied NT_MIPS_FP_MODE buffer.  */
+static int fp_mode_get(struct task_struct *target,
+                      const struct user_regset *regset,
+                      unsigned int pos, unsigned int count,
+                      void *kbuf, void __user *ubuf)
+{
+       int fp_mode;
+
+       fp_mode = mips_get_process_fp_mode(target);
+       return user_regset_copyout(&pos, &count, &kbuf, &ubuf, &fp_mode, 0,
+                                  sizeof(fp_mode));
+}
+
+/*
+ * Copy the supplied NT_MIPS_FP_MODE buffer to the FP mode setting.
+ *
+ * We optimize for the case where `count % sizeof(int) == 0', which
+ * is supposed to have been guaranteed by the kernel before calling
+ * us, e.g. in `ptrace_regset'.  We enforce that requirement, so
+ * that we can safely avoid preinitializing temporaries for partial
+ * mode writes.
+ */
+static int fp_mode_set(struct task_struct *target,
+                      const struct user_regset *regset,
+                      unsigned int pos, unsigned int count,
+                      const void *kbuf, const void __user *ubuf)
+{
+       int fp_mode;
+       int err;
+
+       BUG_ON(count % sizeof(int));
+
+       if (pos + count > sizeof(fp_mode))
+               return -EIO;
+
+       err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &fp_mode, 0,
+                                sizeof(fp_mode));
+       if (err)
+               return err;
+
+       if (count > 0)
+               err = mips_set_process_fp_mode(target, fp_mode);
+
+       return err;
+}
+
 enum mips_regset {
        REGSET_GPR,
        REGSET_FPR,
+       REGSET_DSP,
+       REGSET_FP_MODE,
 };
 
 struct pt_regs_offset {
@@ -697,6 +915,23 @@ static const struct user_regset mips_regsets[] = {
                .get            = fpr_get,
                .set            = fpr_set,
        },
+       [REGSET_DSP] = {
+               .core_note_type = NT_MIPS_DSP,
+               .n              = NUM_DSP_REGS + 1,
+               .size           = sizeof(u32),
+               .align          = sizeof(u32),
+               .get            = dsp32_get,
+               .set            = dsp32_set,
+               .active         = dsp_active,
+       },
+       [REGSET_FP_MODE] = {
+               .core_note_type = NT_MIPS_FP_MODE,
+               .n              = 1,
+               .size           = sizeof(int),
+               .align          = sizeof(int),
+               .get            = fp_mode_get,
+               .set            = fp_mode_set,
+       },
 };
 
 static const struct user_regset_view user_mips_view = {
@@ -728,6 +963,23 @@ static const struct user_regset mips64_regsets[] = {
                .get            = fpr_get,
                .set            = fpr_set,
        },
+       [REGSET_DSP] = {
+               .core_note_type = NT_MIPS_DSP,
+               .n              = NUM_DSP_REGS + 1,
+               .size           = sizeof(u64),
+               .align          = sizeof(u64),
+               .get            = dsp64_get,
+               .set            = dsp64_set,
+               .active         = dsp_active,
+       },
+       [REGSET_FP_MODE] = {
+               .core_note_type = NT_MIPS_FP_MODE,
+               .n              = 1,
+               .size           = sizeof(int),
+               .align          = sizeof(int),
+               .get            = fp_mode_get,
+               .set            = fp_mode_set,
+       },
 };
 
 static const struct user_regset_view user_mips64_view = {
@@ -856,7 +1108,7 @@ long arch_ptrace(struct task_struct *child, long request,
                                goto out;
                        }
                        dregs = __get_dsp_regs(child);
-                       tmp = (unsigned long) (dregs[addr - DSP_BASE]);
+                       tmp = dregs[addr - DSP_BASE];
                        break;
                }
                case DSP_CONTROL:
index 7edc629304c8fe1a7b53b6792486fc2c147b7973..bc348d44d151705770c9e4c1275fe0ed750a7050 100644 (file)
@@ -142,7 +142,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
                                goto out;
                        }
                        dregs = __get_dsp_regs(child);
-                       tmp = (unsigned long) (dregs[addr - DSP_BASE]);
+                       tmp = dregs[addr - DSP_BASE];
                        break;
                }
                case DSP_CONTROL:
index c6bbf21650515d1e71eead45b41a7729f8794476..419c92197b2f81e22385806144cfe3fb19570a95 100644 (file)
@@ -85,7 +85,7 @@ done:
 
 #ifdef CONFIG_CPU_CAVIUM_OCTEON
        /* We need to flush I-cache before jumping to new kernel.
-        * Unfortunatelly, this code is cpu-specific.
+        * Unfortunately, this code is cpu-specific.
         */
        .set push
        .set noreorder
@@ -145,7 +145,7 @@ LEAF(kexec_smp_wait)
 #endif
 
 /* All parameters to new kernel are passed in registers a0-a3.
- * kexec_args[0..3] are uses to prepare register values.
+ * kexec_args[0..3] are used to prepare register values.
  */
 
 kexec_args:
index a9a7d78803cde30097a02c76aa49bef9f812be7e..91d3c8c46097cd960fd541cdf7c76d7e0d3636e3 100644 (file)
@@ -590,3 +590,5 @@ EXPORT(sys_call_table)
        PTR     sys_pkey_alloc
        PTR     sys_pkey_free                   /* 4365 */
        PTR     sys_statx
+       PTR     sys_rseq
+       PTR     sys_io_pgetevents
index 65d5aeeb9bdb51ac846d5acc213f3a1af9b97533..358d9599983d17840cd909ea7851197e7b38b838 100644 (file)
@@ -439,4 +439,6 @@ EXPORT(sys_call_table)
        PTR     sys_pkey_alloc
        PTR     sys_pkey_free                   /* 5325 */
        PTR     sys_statx
+       PTR     sys_rseq
+       PTR     sys_io_pgetevents
        .size   sys_call_table,.-sys_call_table
index cbf190ef9e8a5e2a0e499cfaf721908abf4213ec..c65eaacc1abfcf4c15a40721056cf6c3503927ee 100644 (file)
@@ -434,4 +434,6 @@ EXPORT(sysn32_call_table)
        PTR     sys_pkey_alloc
        PTR     sys_pkey_free
        PTR     sys_statx                       /* 6330 */
+       PTR     sys_rseq
+       PTR     compat_sys_io_pgetevents
        .size   sysn32_call_table,.-sysn32_call_table
index 9ebe3e2403b1d7b84d66732cd261364208f6020d..73913f072e3916f36c23bda86870f83002a725c0 100644 (file)
@@ -583,4 +583,6 @@ EXPORT(sys32_call_table)
        PTR     sys_pkey_alloc
        PTR     sys_pkey_free                   /* 4365 */
        PTR     sys_statx
+       PTR     sys_rseq
+       PTR     compat_sys_io_pgetevents
        .size   sys32_call_table,.-sys32_call_table
index 2c96c0c68116252e2965a4d4ff395a71603779fd..c71d1eb7da5944b182c287aae347488a7594529b 100644 (file)
@@ -36,6 +36,7 @@
 #include <asm/cdmm.h>
 #include <asm/cpu.h>
 #include <asm/debug.h>
+#include <asm/dma-coherence.h>
 #include <asm/sections.h>
 #include <asm/setup.h>
 #include <asm/smp-ops.h>
@@ -84,6 +85,11 @@ static struct resource bss_resource = { .name = "Kernel bss", };
 
 static void *detect_magic __initdata = detect_memory_region;
 
+#ifdef CONFIG_MIPS_AUTO_PFN_OFFSET
+unsigned long ARCH_PFN_OFFSET;
+EXPORT_SYMBOL(ARCH_PFN_OFFSET);
+#endif
+
 void __init add_memory_region(phys_addr_t start, phys_addr_t size, long type)
 {
        int x = boot_mem_map.nr_map;
@@ -441,6 +447,12 @@ static void __init bootmem_init(void)
                mapstart = max(reserved_end, start);
        }
 
+       if (min_low_pfn >= max_low_pfn)
+               panic("Incorrect memory mapping !!!");
+
+#ifdef CONFIG_MIPS_AUTO_PFN_OFFSET
+       ARCH_PFN_OFFSET = PFN_UP(ramstart);
+#else
        /*
         * Reserve any memory between the start of RAM and PHYS_OFFSET
         */
@@ -448,8 +460,6 @@ static void __init bootmem_init(void)
                add_memory_region(PHYS_OFFSET, ramstart - PHYS_OFFSET,
                                  BOOT_MEM_RESERVED);
 
-       if (min_low_pfn >= max_low_pfn)
-               panic("Incorrect memory mapping !!!");
        if (min_low_pfn > ARCH_PFN_OFFSET) {
                pr_info("Wasting %lu bytes for tracking %lu unused pages\n",
                        (min_low_pfn - ARCH_PFN_OFFSET) * sizeof(struct page),
@@ -459,6 +469,7 @@ static void __init bootmem_init(void)
                        ARCH_PFN_OFFSET - min_low_pfn);
        }
        min_low_pfn = ARCH_PFN_OFFSET;
+#endif
 
        /*
         * Determine low and high memory ranges
@@ -1055,3 +1066,26 @@ static int __init debugfs_mips(void)
 }
 arch_initcall(debugfs_mips);
 #endif
+
+#if defined(CONFIG_DMA_MAYBE_COHERENT) && !defined(CONFIG_DMA_PERDEV_COHERENT)
+/* User defined DMA coherency from command line. */
+enum coherent_io_user_state coherentio = IO_COHERENCE_DEFAULT;
+EXPORT_SYMBOL_GPL(coherentio);
+int hw_coherentio = 0; /* Actual hardware supported DMA coherency setting. */
+
+static int __init setcoherentio(char *str)
+{
+       coherentio = IO_COHERENCE_ENABLED;
+       pr_info("Hardware DMA cache coherency (command line)\n");
+       return 0;
+}
+early_param("coherentio", setcoherentio);
+
+static int __init setnocoherentio(char *str)
+{
+       coherentio = IO_COHERENCE_DISABLED;
+       pr_info("Software DMA cache coherency (command line)\n");
+       return 0;
+}
+early_param("nocoherentio", setnocoherentio);
+#endif
index 9e224469c78887e9c2eb55779bfc8d4646ca2f09..109ed163a6a6aeaa25015c737052ced450924f00 100644 (file)
@@ -592,13 +592,15 @@ SYSCALL_DEFINE3(sigaction, int, sig, const struct sigaction __user *, act,
 #endif
 
 #ifdef CONFIG_TRAD_SIGNALS
-asmlinkage void sys_sigreturn(nabi_no_regargs struct pt_regs regs)
+asmlinkage void sys_sigreturn(void)
 {
        struct sigframe __user *frame;
+       struct pt_regs *regs;
        sigset_t blocked;
        int sig;
 
-       frame = (struct sigframe __user *) regs.regs[29];
+       regs = current_pt_regs();
+       frame = (struct sigframe __user *)regs->regs[29];
        if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
                goto badframe;
        if (__copy_from_user(&blocked, &frame->sf_mask, sizeof(blocked)))
@@ -606,7 +608,7 @@ asmlinkage void sys_sigreturn(nabi_no_regargs struct pt_regs regs)
 
        set_current_blocked(&blocked);
 
-       sig = restore_sigcontext(&regs, &frame->sf_sc);
+       sig = restore_sigcontext(regs, &frame->sf_sc);
        if (sig < 0)
                goto badframe;
        else if (sig)
@@ -618,8 +620,8 @@ asmlinkage void sys_sigreturn(nabi_no_regargs struct pt_regs regs)
        __asm__ __volatile__(
                "move\t$29, %0\n\t"
                "j\tsyscall_exit"
-               :/* no outputs */
-               :"r" (&regs));
+               : /* no outputs */
+               : "r" (regs));
        /* Unreached */
 
 badframe:
@@ -627,13 +629,15 @@ badframe:
 }
 #endif /* CONFIG_TRAD_SIGNALS */
 
-asmlinkage void sys_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
+asmlinkage void sys_rt_sigreturn(void)
 {
        struct rt_sigframe __user *frame;
+       struct pt_regs *regs;
        sigset_t set;
        int sig;
 
-       frame = (struct rt_sigframe __user *) regs.regs[29];
+       regs = current_pt_regs();
+       frame = (struct rt_sigframe __user *)regs->regs[29];
        if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
                goto badframe;
        if (__copy_from_user(&set, &frame->rs_uc.uc_sigmask, sizeof(set)))
@@ -641,7 +645,7 @@ asmlinkage void sys_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
 
        set_current_blocked(&set);
 
-       sig = restore_sigcontext(&regs, &frame->rs_uc.uc_mcontext);
+       sig = restore_sigcontext(regs, &frame->rs_uc.uc_mcontext);
        if (sig < 0)
                goto badframe;
        else if (sig)
@@ -656,8 +660,8 @@ asmlinkage void sys_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
        __asm__ __volatile__(
                "move\t$29, %0\n\t"
                "j\tsyscall_exit"
-               :/* no outputs */
-               :"r" (&regs));
+               : /* no outputs */
+               : "r" (regs));
        /* Unreached */
 
 badframe:
@@ -801,6 +805,8 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
                regs->regs[0] = 0;              /* Don't deal with this again.  */
        }
 
+       rseq_signal_deliver(ksig, regs);
+
        if (sig_uses_siginfo(&ksig->ka, abi))
                ret = abi->setup_rt_frame(vdso + abi->vdso->off_rt_sigreturn,
                                          ksig, regs, oldset);
@@ -868,6 +874,7 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, void *unused,
        if (thread_info_flags & _TIF_NOTIFY_RESUME) {
                clear_thread_flag(TIF_NOTIFY_RESUME);
                tracehook_notify_resume(regs);
+               rseq_handle_notify_resume(NULL, regs);
        }
 
        user_enter();
index b672cebb4a1a507144cfc85964e4b39ac5ee70fa..8f65aaf9206d1ba88ab0f68e580eb176aacd173e 100644 (file)
@@ -64,13 +64,15 @@ struct rt_sigframe_n32 {
        struct ucontextn32 rs_uc;
 };
 
-asmlinkage void sysn32_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
+asmlinkage void sysn32_rt_sigreturn(void)
 {
        struct rt_sigframe_n32 __user *frame;
+       struct pt_regs *regs;
        sigset_t set;
        int sig;
 
-       frame = (struct rt_sigframe_n32 __user *) regs.regs[29];
+       regs = current_pt_regs();
+       frame = (struct rt_sigframe_n32 __user *)regs->regs[29];
        if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
                goto badframe;
        if (__copy_conv_sigset_from_user(&set, &frame->rs_uc.uc_sigmask))
@@ -78,7 +80,7 @@ asmlinkage void sysn32_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
 
        set_current_blocked(&set);
 
-       sig = restore_sigcontext(&regs, &frame->rs_uc.uc_mcontext);
+       sig = restore_sigcontext(regs, &frame->rs_uc.uc_mcontext);
        if (sig < 0)
                goto badframe;
        else if (sig)
@@ -93,8 +95,8 @@ asmlinkage void sysn32_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
        __asm__ __volatile__(
                "move\t$29, %0\n\t"
                "j\tsyscall_exit"
-               :/* no outputs */
-               :"r" (&regs));
+               : /* no outputs */
+               : "r" (regs));
        /* Unreached */
 
 badframe:
index 2b3572fb5f1b9d7ccd308e931078fe31fb4b924a..b6e3ddef48a06f9a543a10edc3c96e5ce1116b2b 100644 (file)
@@ -151,13 +151,15 @@ static int setup_frame_32(void *sig_return, struct ksignal *ksig,
        return 0;
 }
 
-asmlinkage void sys32_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
+asmlinkage void sys32_rt_sigreturn(void)
 {
        struct rt_sigframe32 __user *frame;
+       struct pt_regs *regs;
        sigset_t set;
        int sig;
 
-       frame = (struct rt_sigframe32 __user *) regs.regs[29];
+       regs = current_pt_regs();
+       frame = (struct rt_sigframe32 __user *)regs->regs[29];
        if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
                goto badframe;
        if (__copy_conv_sigset_from_user(&set, &frame->rs_uc.uc_sigmask))
@@ -165,7 +167,7 @@ asmlinkage void sys32_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
 
        set_current_blocked(&set);
 
-       sig = restore_sigcontext32(&regs, &frame->rs_uc.uc_mcontext);
+       sig = restore_sigcontext32(regs, &frame->rs_uc.uc_mcontext);
        if (sig < 0)
                goto badframe;
        else if (sig)
@@ -180,8 +182,8 @@ asmlinkage void sys32_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
        __asm__ __volatile__(
                "move\t$29, %0\n\t"
                "j\tsyscall_exit"
-               :/* no outputs */
-               :"r" (&regs));
+               : /* no outputs */
+               : "r" (regs));
        /* Unreached */
 
 badframe:
@@ -251,13 +253,15 @@ struct mips_abi mips_abi_32 = {
 };
 
 
-asmlinkage void sys32_sigreturn(nabi_no_regargs struct pt_regs regs)
+asmlinkage void sys32_sigreturn(void)
 {
        struct sigframe32 __user *frame;
+       struct pt_regs *regs;
        sigset_t blocked;
        int sig;
 
-       frame = (struct sigframe32 __user *) regs.regs[29];
+       regs = current_pt_regs();
+       frame = (struct sigframe32 __user *)regs->regs[29];
        if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
                goto badframe;
        if (__copy_conv_sigset_from_user(&blocked, &frame->sf_mask))
@@ -265,7 +269,7 @@ asmlinkage void sys32_sigreturn(nabi_no_regargs struct pt_regs regs)
 
        set_current_blocked(&blocked);
 
-       sig = restore_sigcontext32(&regs, &frame->sf_sc);
+       sig = restore_sigcontext32(regs, &frame->sf_sc);
        if (sig < 0)
                goto badframe;
        else if (sig)
@@ -277,8 +281,8 @@ asmlinkage void sys32_sigreturn(nabi_no_regargs struct pt_regs regs)
        __asm__ __volatile__(
                "move\t$29, %0\n\t"
                "j\tsyscall_exit"
-               :/* no outputs */
-               :"r" (&regs));
+               : /* no outputs */
+               : "r" (regs));
        /* Unreached */
 
 badframe:
index d67fa74622ee287200bf6b6664c3292ad72131d5..f8871d5b7eb3e863bc5abf6b77dbd2f04fdd877d 100644 (file)
@@ -351,6 +351,7 @@ static void __show_regs(const struct pt_regs *regs)
 void show_regs(struct pt_regs *regs)
 {
        __show_regs((struct pt_regs *)regs);
+       dump_stack();
 }
 
 void show_registers(struct pt_regs *regs)
@@ -1220,13 +1221,6 @@ static int enable_restore_fp_context(int msa)
 {
        int err, was_fpu_owner, prior_msa;
 
-       /*
-        * If an FP mode switch is currently underway, wait for it to
-        * complete before proceeding.
-        */
-       wait_var_event(&current->mm->context.fp_mode_switching,
-                      !atomic_read(&current->mm->context.fp_mode_switching));
-
        if (!used_math()) {
                /* First time FP context user. */
                preempt_disable();
index 7cd76f93a438ab00d7085b3aaa93a5294c494c55..f7ea8e21656b168fbd16a57a46851c0ab78b0129 100644 (file)
@@ -515,7 +515,7 @@ int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
        dvcpu->arch.wait = 0;
 
        if (swq_has_sleeper(&dvcpu->wq))
-               swake_up(&dvcpu->wq);
+               swake_up_one(&dvcpu->wq);
 
        return 0;
 }
@@ -1204,7 +1204,7 @@ static void kvm_mips_comparecount_func(unsigned long data)
 
        vcpu->arch.wait = 0;
        if (swq_has_sleeper(&vcpu->wq))
-               swake_up(&vcpu->wq);
+               swake_up_one(&vcpu->wq);
 }
 
 /* low level hrtimer wake routine */
index 44bccaee822b1f6ebcb827f039cc73eab2b7f634..c4aa140b7c919830afec124c9595055a43ab0295 100644 (file)
@@ -8,6 +8,7 @@
 
 #include <linux/cpu.h>
 #include <lantiq_soc.h>
+#include <asm/setup.h>
 
 #define ASC_BUF                1024
 #define LTQ_ASC_FSTAT  ((u32 *)(LTQ_EARLY_ASC + 0x0048))
index 9ff7ccde9de0e8d9899018d3d9152e4a243408e8..d984bd5c2ec5fa24c4e966311a2504962ffd4963 100644 (file)
@@ -9,7 +9,6 @@
 #include <linux/export.h>
 #include <linux/clk.h>
 #include <linux/bootmem.h>
-#include <linux/of_platform.h>
 #include <linux/of_fdt.h>
 
 #include <asm/bootinfo.h>
@@ -114,10 +113,3 @@ void __init prom_init(void)
                panic("failed to register_vsmp_smp_ops()");
 #endif
 }
-
-int __init plat_of_setup(void)
-{
-       return of_platform_default_populate(NULL, NULL, NULL);
-}
-
-arch_initcall(plat_of_setup);
index 805b3a6ab2d60c95c8c8a13477cfd21f9bc616e4..4b9fbb6744adecfe17fdba69649e8f95f875dfcb 100644 (file)
@@ -130,10 +130,9 @@ ltq_dma_alloc(struct ltq_dma_channel *ch)
        unsigned long flags;
 
        ch->desc = 0;
-       ch->desc_base = dma_alloc_coherent(NULL,
+       ch->desc_base = dma_zalloc_coherent(NULL,
                                LTQ_DESC_NUM * LTQ_DESC_SIZE,
                                &ch->phys, GFP_ATOMIC);
-       memset(ch->desc_base, 0, LTQ_DESC_NUM * LTQ_DESC_SIZE);
 
        spin_lock_irqsave(&ltq_dma_lock, flags);
        ltq_dma_w32(ch->nr, LTQ_DMA_CS);
index 17e15b50a551fbaa75891ef626e0daf873dae7cf..37b8fc5b9ac9e82da57918d6eff5e0872d276e72 100644 (file)
@@ -13,6 +13,7 @@
 #include <asm/bootinfo.h>
 #include <asm/lasat/lasat.h>
 #include <asm/cpu.h>
+#include <asm/setup.h>
 
 #include "at93c.h"
 #include <asm/lasat/eeprom.h>
index 1cc306520a55be39d316b6b8ec98d09519a14aa5..3a6f34ef5ffc38edf71fe616b2888af9b6dc6f30 100644 (file)
 #endif
 #else
         PTR_SUBU       t0, $0, a2
+       move            a2, zero                /* No remaining longs */
        PTR_ADDIU       t0, 1
        STORE_BYTE(0)
        STORE_BYTE(1)
 
 #ifdef CONFIG_CPU_MIPSR6
 .Lbyte_fixup\@:
-       PTR_SUBU        a2, $0, t0
+       /*
+        * unset_bytes = (#bytes - (#unaligned bytes)) - (-#unaligned bytes remaining + 1) + 1
+        *      a2     =             a2                -              t0                   + 1
+        */
+       PTR_SUBU        a2, t0
        jr              ra
         PTR_ADDIU      a2, 1
 #endif /* CONFIG_CPU_MIPSR6 */
 
 .Lfirst_fixup\@:
+       /* unset_bytes already in a2 */
        jr      ra
         nop
 
 .Lfwd_fixup\@:
+       /*
+        * unset_bytes = partial_start_addr +  #bytes   -     fault_addr
+        *      a2     =         t1         + (a2 & 3f) - $28->task->BUADDR
+        */
        PTR_L           t0, TI_TASK($28)
        andi            a2, 0x3f
        LONG_L          t0, THREAD_BUADDR(t0)
         LONG_SUBU      a2, t0
 
 .Lpartial_fixup\@:
+       /*
+        * unset_bytes = partial_end_addr +      #bytes     -     fault_addr
+        *      a2     =       a0         + (a2 & STORMASK) - $28->task->BUADDR
+        */
        PTR_L           t0, TI_TASK($28)
        andi            a2, STORMASK
        LONG_L          t0, THREAD_BUADDR(t0)
         LONG_SUBU      a2, t0
 
 .Llast_fixup\@:
+       /* unset_bytes already in a2 */
        jr              ra
         nop
 
 .Lsmall_fixup\@:
+       /*
+        * unset_bytes = end_addr - current_addr + 1
+        *      a2     =    t1    -      a0      + 1
+        */
        PTR_SUBU        a2, t1, a0
        jr              ra
         PTR_ADDIU      a2, 1
index ffe01c6d0037db032e84344d9eb4682c9d081fb4..a0dbb3b2f2de389f1b83d89b517b1a35c2d312e7 100644 (file)
@@ -1,8 +1,4 @@
-cflags-$(CONFIG_CPU_LOONGSON1) += \
-       $(call cc-option,-march=mips32r2,-mips32r2 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS32) \
-       -Wa,-mips32r2 -Wa,--trap
-
+cflags-$(CONFIG_CPU_LOONGSON1)         += -march=mips32 -Wa,--trap
 platform-$(CONFIG_MACH_LOONGSON32)     += loongson32/
 cflags-$(CONFIG_MACH_LOONGSON32)       += -I$(srctree)/arch/mips/include/asm/mach-loongson32
-load-$(CONFIG_LOONGSON1_LS1B)          += 0xffffffff80100000
-load-$(CONFIG_LOONGSON1_LS1C)          += 0xffffffff80100000
+load-$(CONFIG_CPU_LOONGSON1)           += 0xffffffff80100000
index c79e6a565572f35668608e877fece23f9f3c0f46..c865b4b9b77503b6a8ae6ce4d8cad25c26ef41ad 100644 (file)
@@ -91,7 +91,6 @@ config LOONGSON_MACH3X
        select LOONGSON_MC146818
        select ZONE_DMA32
        select LEFI_FIRMWARE_INTERFACE
-       select PHYS48_TO_HT40
        help
                Generic Loongson 3 family machines utilize the 3A/3B revision
                of Loongson processor and RS780/SBX00 chipset.
@@ -130,10 +129,6 @@ config LOONGSON_UART_BASE
        default y
        depends on EARLY_PRINTK || SERIAL_8250
 
-config PHYS48_TO_HT40
-       bool
-       default y if CPU_LOONGSON3
-
 config LOONGSON_MC146818
        bool
        default n
index 8235ac7eac95f09309b6526e2ffada05c27e0db6..57ee0302294186c6fcc0e2db3ccf97226fffb2fc 100644 (file)
@@ -6,6 +6,7 @@
 obj-y += setup.o init.o cmdline.o env.o time.o reset.o irq.o \
     bonito-irq.o mem.o machtype.o platform.o serial.o
 obj-$(CONFIG_PCI) += pci.o
+obj-$(CONFIG_CPU_LOONGSON2) += dma.o
 
 #
 # Serial port support
@@ -25,8 +26,3 @@ obj-$(CONFIG_CS5536) += cs5536/
 #
 
 obj-$(CONFIG_SUSPEND) += pm.o
-
-#
-# Big Memory (SWIOTLB) Support
-#
-obj-$(CONFIG_SWIOTLB) += dma-swiotlb.o
index f7c905e50dc415e21eb258b08a69cc61525bd67b..92dc6bafc1271795b3c66b7c21467f3c22214fd0 100644 (file)
@@ -138,7 +138,7 @@ u32 pci_ohci_read_reg(int reg)
                break;
        case PCI_OHCI_INT_REG:
                _rdmsr(DIVIL_MSR_REG(PIC_YSEL_LOW), &hi, &lo);
-               if ((lo & 0x00000f00) == CS5536_USB_INTR)
+               if (((lo >> PIC_YSEL_LOW_USB_SHIFT) & 0xf) == CS5536_USB_INTR)
                        conf_data = 1;
                break;
        default:
diff --git a/arch/mips/loongson64/common/dma-swiotlb.c b/arch/mips/loongson64/common/dma-swiotlb.c
deleted file mode 100644 (file)
index 6a739f8..0000000
+++ /dev/null
@@ -1,109 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <linux/mm.h>
-#include <linux/init.h>
-#include <linux/dma-mapping.h>
-#include <linux/scatterlist.h>
-#include <linux/swiotlb.h>
-#include <linux/bootmem.h>
-
-#include <asm/bootinfo.h>
-#include <boot_param.h>
-#include <dma-coherence.h>
-
-static void *loongson_dma_alloc_coherent(struct device *dev, size_t size,
-               dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
-{
-       void *ret = swiotlb_alloc(dev, size, dma_handle, gfp, attrs);
-
-       mb();
-       return ret;
-}
-
-static dma_addr_t loongson_dma_map_page(struct device *dev, struct page *page,
-                               unsigned long offset, size_t size,
-                               enum dma_data_direction dir,
-                               unsigned long attrs)
-{
-       dma_addr_t daddr = swiotlb_map_page(dev, page, offset, size,
-                                       dir, attrs);
-       mb();
-       return daddr;
-}
-
-static int loongson_dma_map_sg(struct device *dev, struct scatterlist *sg,
-                               int nents, enum dma_data_direction dir,
-                               unsigned long attrs)
-{
-       int r = swiotlb_map_sg_attrs(dev, sg, nents, dir, attrs);
-       mb();
-
-       return r;
-}
-
-static void loongson_dma_sync_single_for_device(struct device *dev,
-                               dma_addr_t dma_handle, size_t size,
-                               enum dma_data_direction dir)
-{
-       swiotlb_sync_single_for_device(dev, dma_handle, size, dir);
-       mb();
-}
-
-static void loongson_dma_sync_sg_for_device(struct device *dev,
-                               struct scatterlist *sg, int nents,
-                               enum dma_data_direction dir)
-{
-       swiotlb_sync_sg_for_device(dev, sg, nents, dir);
-       mb();
-}
-
-static int loongson_dma_supported(struct device *dev, u64 mask)
-{
-       if (mask > DMA_BIT_MASK(loongson_sysconf.dma_mask_bits))
-               return 0;
-       return swiotlb_dma_supported(dev, mask);
-}
-
-dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr)
-{
-       long nid;
-#ifdef CONFIG_PHYS48_TO_HT40
-       /* We extract 2bit node id (bit 44~47, only bit 44~45 used now) from
-        * Loongson-3's 48bit address space and embed it into 40bit */
-       nid = (paddr >> 44) & 0x3;
-       paddr = ((nid << 44) ^ paddr) | (nid << 37);
-#endif
-       return paddr;
-}
-
-phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t daddr)
-{
-       long nid;
-#ifdef CONFIG_PHYS48_TO_HT40
-       /* We extract 2bit node id (bit 44~47, only bit 44~45 used now) from
-        * Loongson-3's 48bit address space and embed it into 40bit */
-       nid = (daddr >> 37) & 0x3;
-       daddr = ((nid << 37) ^ daddr) | (nid << 44);
-#endif
-       return daddr;
-}
-
-static const struct dma_map_ops loongson_dma_map_ops = {
-       .alloc = loongson_dma_alloc_coherent,
-       .free = swiotlb_free,
-       .map_page = loongson_dma_map_page,
-       .unmap_page = swiotlb_unmap_page,
-       .map_sg = loongson_dma_map_sg,
-       .unmap_sg = swiotlb_unmap_sg_attrs,
-       .sync_single_for_cpu = swiotlb_sync_single_for_cpu,
-       .sync_single_for_device = loongson_dma_sync_single_for_device,
-       .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
-       .sync_sg_for_device = loongson_dma_sync_sg_for_device,
-       .mapping_error = swiotlb_dma_mapping_error,
-       .dma_supported = loongson_dma_supported,
-};
-
-void __init plat_swiotlb_setup(void)
-{
-       swiotlb_init(1);
-       mips_dma_map_ops = &loongson_dma_map_ops;
-}
diff --git a/arch/mips/loongson64/common/dma.c b/arch/mips/loongson64/common/dma.c
new file mode 100644 (file)
index 0000000..48f0412
--- /dev/null
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/dma-direct.h>
+
+dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr)
+{
+       return paddr | 0x80000000;
+}
+
+phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t dma_addr)
+{
+#if defined(CONFIG_CPU_LOONGSON2F) && defined(CONFIG_64BIT)
+       if (dma_addr > 0x8fffffff)
+               return dma_addr;
+       return dma_addr & 0x0fffffff;
+#else
+       return dma_addr & 0x7fffffff;
+#endif
+}
index 6ca632e529dc265f1014b5502ce52302f3f64dcb..a782e2b247475f6be6836de609f6f79230a91e96 100644 (file)
@@ -10,6 +10,7 @@
  *  option) any later version.
  */
 #include <linux/serial_reg.h>
+#include <asm/setup.h>
 
 #include <loongson.h>
 
index 1e8a955ae5a820e37a5bdb23e970e6356693ccb9..8f68ee02a8c2447439c9b821f7f9835155333ac8 100644 (file)
@@ -198,7 +198,8 @@ void __init prom_init_env(void)
                        break;
                case PRID_REV_LOONGSON3A_R1:
                case PRID_REV_LOONGSON3A_R2:
-               case PRID_REV_LOONGSON3A_R3:
+               case PRID_REV_LOONGSON3A_R3_0:
+               case PRID_REV_LOONGSON3A_R3_1:
                        cpu_clock_freq = 900000000;
                        break;
                case PRID_REV_LOONGSON3B_R1:
index 44bc1482158bf2b335b3784bafb87f5cda7ba6a1..b5a0c2fa5446335e292268f32601a31114f15d7a 100644 (file)
@@ -1,7 +1,7 @@
 #
 # Makefile for Loongson-3 family machines
 #
-obj-y                  += irq.o cop2-ex.o platform.o acpi_init.o
+obj-y                  += irq.o cop2-ex.o platform.o acpi_init.o dma.o
 
 obj-$(CONFIG_SMP)      += smp.o
 
diff --git a/arch/mips/loongson64/loongson-3/dma.c b/arch/mips/loongson64/loongson-3/dma.c
new file mode 100644 (file)
index 0000000..5e86635
--- /dev/null
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/dma-direct.h>
+#include <linux/init.h>
+#include <linux/swiotlb.h>
+
+dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr)
+{
+       /* We extract 2bit node id (bit 44~47, only bit 44~45 used now) from
+        * Loongson-3's 48bit address space and embed it into 40bit */
+       long nid = (paddr >> 44) & 0x3;
+       return ((nid << 44) ^ paddr) | (nid << 37);
+}
+
+phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t daddr)
+{
+       /* We extract 2bit node id (bit 44~47, only bit 44~45 used now) from
+        * Loongson-3's 48bit address space and embed it into 40bit */
+       long nid = (daddr >> 37) & 0x3;
+       return ((nid << 37) ^ daddr) | (nid << 44);
+}
+
+void __init plat_swiotlb_setup(void)
+{
+       swiotlb_init(1);
+}
index 8501109bb0f0f5bb70f7a4907431ff0c54c13e65..fea95d00326912ede09aba8f2c54d76ad9cfefcc 100644 (file)
@@ -682,7 +682,8 @@ void play_dead(void)
                        (void *)CKSEG1ADDR((unsigned long)loongson3a_r1_play_dead);
                break;
        case PRID_REV_LOONGSON3A_R2:
-       case PRID_REV_LOONGSON3A_R3:
+       case PRID_REV_LOONGSON3A_R3_0:
+       case PRID_REV_LOONGSON3A_R3_1:
                play_dead_at_ckseg1 =
                        (void *)CKSEG1ADDR((unsigned long)loongson3a_r2r3_play_dead);
                break;
index c463bdad45c799a422eb94cc62ecd2fa79c61268..3e5bb203c95ac6d3fc094cd80214c4bc38240359 100644 (file)
@@ -3,7 +3,7 @@
 # Makefile for the Linux/MIPS-specific parts of the memory manager.
 #
 
-obj-y                          += cache.o dma-default.o extable.o fault.o \
+obj-y                          += cache.o extable.o fault.o \
                                   gup.o init.o mmap.o page.o page-funcs.o \
                                   pgtable.o tlbex.o tlbex-fault.o tlb-funcs.o
 
@@ -17,6 +17,7 @@ obj-$(CONFIG_32BIT)           += ioremap.o pgtable-32.o
 obj-$(CONFIG_64BIT)            += pgtable-64.o
 obj-$(CONFIG_HIGHMEM)          += highmem.o
 obj-$(CONFIG_HUGETLB_PAGE)     += hugetlbpage.o
+obj-$(CONFIG_DMA_NONCOHERENT)  += dma-noncoherent.o
 
 obj-$(CONFIG_CPU_R4K_CACHE_TLB) += c-r4k.o cex-gen.o tlb-r4k.o
 obj-$(CONFIG_CPU_R3000)                += c-r3k.o tlb-r3k.o
index e12dfa48b478dd3ec51369236bb84040c044bd82..a9ef057c79fe4a23e0f27d2b54ba6b97e1a41df0 100644 (file)
@@ -830,12 +830,13 @@ static void r4k_flush_icache_user_range(unsigned long start, unsigned long end)
        return __r4k_flush_icache_range(start, end, true);
 }
 
-#if defined(CONFIG_DMA_NONCOHERENT) || defined(CONFIG_DMA_MAYBE_COHERENT)
+#ifdef CONFIG_DMA_NONCOHERENT
 
 static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
 {
        /* Catch bad driver code */
-       BUG_ON(size == 0);
+       if (WARN_ON(size == 0))
+               return;
 
        preempt_disable();
        if (cpu_has_inclusive_pcaches) {
@@ -871,7 +872,8 @@ static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
 static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
 {
        /* Catch bad driver code */
-       BUG_ON(size == 0);
+       if (WARN_ON(size == 0))
+               return;
 
        preempt_disable();
        if (cpu_has_inclusive_pcaches) {
@@ -904,7 +906,7 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
        bc_inv(addr, size);
        __sync();
 }
-#endif /* CONFIG_DMA_NONCOHERENT || CONFIG_DMA_MAYBE_COHERENT */
+#endif /* CONFIG_DMA_NONCOHERENT */
 
 struct flush_cache_sigtramp_args {
        struct mm_struct *mm;
@@ -1505,6 +1507,14 @@ static void probe_pcache(void)
        if (c->dcache.flags & MIPS_CACHE_PINDEX)
                c->dcache.flags &= ~MIPS_CACHE_ALIASES;
 
+       /*
+        * In systems with CM the icache fills from L2 or closer caches, and
+        * thus sees remote stores without needing to write them back any
+        * further than that.
+        */
+       if (mips_cm_present())
+               c->icache.flags |= MIPS_IC_SNOOPS_REMOTE;
+
        switch (current_cpu_type()) {
        case CPU_20KC:
                /*
index 0d3c656feba046508dd99bb403e7aaa6da85622e..70a523151ff39dfa41b330f03c1dd1330838b4f5 100644 (file)
@@ -56,7 +56,7 @@ EXPORT_SYMBOL_GPL(local_flush_data_cache_page);
 EXPORT_SYMBOL(flush_data_cache_page);
 EXPORT_SYMBOL(flush_icache_all);
 
-#if defined(CONFIG_DMA_NONCOHERENT) || defined(CONFIG_DMA_MAYBE_COHERENT)
+#ifdef CONFIG_DMA_NONCOHERENT
 
 /* DMA cache operations. */
 void (*_dma_cache_wback_inv)(unsigned long start, unsigned long size);
@@ -65,7 +65,7 @@ void (*_dma_cache_inv)(unsigned long start, unsigned long size);
 
 EXPORT_SYMBOL(_dma_cache_wback_inv);
 
-#endif /* CONFIG_DMA_NONCOHERENT || CONFIG_DMA_MAYBE_COHERENT */
+#endif /* CONFIG_DMA_NONCOHERENT */
 
 /*
  * We could optimize the case where the cache argument is not BCACHE but
diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c
deleted file mode 100644 (file)
index f9fef00..0000000
+++ /dev/null
@@ -1,404 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2000  Ani Joshi <ajoshi@unixbox.com>
- * Copyright (C) 2000, 2001, 06         Ralf Baechle <ralf@linux-mips.org>
- * swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
- */
-
-#include <linux/types.h>
-#include <linux/dma-mapping.h>
-#include <linux/mm.h>
-#include <linux/export.h>
-#include <linux/scatterlist.h>
-#include <linux/string.h>
-#include <linux/gfp.h>
-#include <linux/highmem.h>
-#include <linux/dma-contiguous.h>
-
-#include <asm/cache.h>
-#include <asm/cpu-type.h>
-#include <asm/io.h>
-
-#include <dma-coherence.h>
-
-#if defined(CONFIG_DMA_MAYBE_COHERENT) && !defined(CONFIG_DMA_PERDEV_COHERENT)
-/* User defined DMA coherency from command line. */
-enum coherent_io_user_state coherentio = IO_COHERENCE_DEFAULT;
-EXPORT_SYMBOL_GPL(coherentio);
-int hw_coherentio = 0; /* Actual hardware supported DMA coherency setting. */
-
-static int __init setcoherentio(char *str)
-{
-       coherentio = IO_COHERENCE_ENABLED;
-       pr_info("Hardware DMA cache coherency (command line)\n");
-       return 0;
-}
-early_param("coherentio", setcoherentio);
-
-static int __init setnocoherentio(char *str)
-{
-       coherentio = IO_COHERENCE_DISABLED;
-       pr_info("Software DMA cache coherency (command line)\n");
-       return 0;
-}
-early_param("nocoherentio", setnocoherentio);
-#endif
-
-static inline struct page *dma_addr_to_page(struct device *dev,
-       dma_addr_t dma_addr)
-{
-       return pfn_to_page(
-               plat_dma_addr_to_phys(dev, dma_addr) >> PAGE_SHIFT);
-}
-
-/*
- * The affected CPUs below in 'cpu_needs_post_dma_flush()' can
- * speculatively fill random cachelines with stale data at any time,
- * requiring an extra flush post-DMA.
- *
- * Warning on the terminology - Linux calls an uncached area coherent;
- * MIPS terminology calls memory areas with hardware maintained coherency
- * coherent.
- *
- * Note that the R14000 and R16000 should also be checked for in this
- * condition.  However this function is only called on non-I/O-coherent
- * systems and only the R10000 and R12000 are used in such systems, the
- * SGI IP28 Indigo² rsp. SGI IP32 aka O2.
- */
-static inline bool cpu_needs_post_dma_flush(struct device *dev)
-{
-       if (plat_device_is_coherent(dev))
-               return false;
-
-       switch (boot_cpu_type()) {
-       case CPU_R10000:
-       case CPU_R12000:
-       case CPU_BMIPS5000:
-               return true;
-
-       default:
-               /*
-                * Presence of MAARs suggests that the CPU supports
-                * speculatively prefetching data, and therefore requires
-                * the post-DMA flush/invalidate.
-                */
-               return cpu_has_maar;
-       }
-}
-
-static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp)
-{
-       gfp_t dma_flag;
-
-#ifdef CONFIG_ISA
-       if (dev == NULL)
-               dma_flag = __GFP_DMA;
-       else
-#endif
-#if defined(CONFIG_ZONE_DMA32) && defined(CONFIG_ZONE_DMA)
-            if (dev == NULL || dev->coherent_dma_mask < DMA_BIT_MASK(32))
-                       dma_flag = __GFP_DMA;
-       else if (dev->coherent_dma_mask < DMA_BIT_MASK(64))
-                       dma_flag = __GFP_DMA32;
-       else
-#endif
-#if defined(CONFIG_ZONE_DMA32) && !defined(CONFIG_ZONE_DMA)
-            if (dev == NULL || dev->coherent_dma_mask < DMA_BIT_MASK(64))
-               dma_flag = __GFP_DMA32;
-       else
-#endif
-#if defined(CONFIG_ZONE_DMA) && !defined(CONFIG_ZONE_DMA32)
-            if (dev == NULL ||
-                dev->coherent_dma_mask < DMA_BIT_MASK(sizeof(phys_addr_t) * 8))
-               dma_flag = __GFP_DMA;
-       else
-#endif
-               dma_flag = 0;
-
-       /* Don't invoke OOM killer */
-       gfp |= __GFP_NORETRY;
-
-       return gfp | dma_flag;
-}
-
-static void *mips_dma_alloc_coherent(struct device *dev, size_t size,
-       dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
-{
-       void *ret;
-       struct page *page = NULL;
-       unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
-
-       gfp = massage_gfp_flags(dev, gfp);
-
-       if (IS_ENABLED(CONFIG_DMA_CMA) && gfpflags_allow_blocking(gfp))
-               page = dma_alloc_from_contiguous(dev, count, get_order(size),
-                                                gfp);
-       if (!page)
-               page = alloc_pages(gfp, get_order(size));
-
-       if (!page)
-               return NULL;
-
-       ret = page_address(page);
-       memset(ret, 0, size);
-       *dma_handle = plat_map_dma_mem(dev, ret, size);
-       if (!(attrs & DMA_ATTR_NON_CONSISTENT) &&
-           !plat_device_is_coherent(dev)) {
-               dma_cache_wback_inv((unsigned long) ret, size);
-               ret = UNCAC_ADDR(ret);
-       }
-
-       return ret;
-}
-
-static void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr,
-       dma_addr_t dma_handle, unsigned long attrs)
-{
-       unsigned long addr = (unsigned long) vaddr;
-       unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
-       struct page *page = NULL;
-
-       plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL);
-
-       if (!(attrs & DMA_ATTR_NON_CONSISTENT) && !plat_device_is_coherent(dev))
-               addr = CAC_ADDR(addr);
-
-       page = virt_to_page((void *) addr);
-
-       if (!dma_release_from_contiguous(dev, page, count))
-               __free_pages(page, get_order(size));
-}
-
-static int mips_dma_mmap(struct device *dev, struct vm_area_struct *vma,
-       void *cpu_addr, dma_addr_t dma_addr, size_t size,
-       unsigned long attrs)
-{
-       unsigned long user_count = vma_pages(vma);
-       unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
-       unsigned long addr = (unsigned long)cpu_addr;
-       unsigned long off = vma->vm_pgoff;
-       unsigned long pfn;
-       int ret = -ENXIO;
-
-       if (!plat_device_is_coherent(dev))
-               addr = CAC_ADDR(addr);
-
-       pfn = page_to_pfn(virt_to_page((void *)addr));
-
-       if (attrs & DMA_ATTR_WRITE_COMBINE)
-               vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
-       else
-               vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-
-       if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
-               return ret;
-
-       if (off < count && user_count <= (count - off)) {
-               ret = remap_pfn_range(vma, vma->vm_start,
-                                     pfn + off,
-                                     user_count << PAGE_SHIFT,
-                                     vma->vm_page_prot);
-       }
-
-       return ret;
-}
-
-static inline void __dma_sync_virtual(void *addr, size_t size,
-       enum dma_data_direction direction)
-{
-       switch (direction) {
-       case DMA_TO_DEVICE:
-               dma_cache_wback((unsigned long)addr, size);
-               break;
-
-       case DMA_FROM_DEVICE:
-               dma_cache_inv((unsigned long)addr, size);
-               break;
-
-       case DMA_BIDIRECTIONAL:
-               dma_cache_wback_inv((unsigned long)addr, size);
-               break;
-
-       default:
-               BUG();
-       }
-}
-
-/*
- * A single sg entry may refer to multiple physically contiguous
- * pages. But we still need to process highmem pages individually.
- * If highmem is not configured then the bulk of this loop gets
- * optimized out.
- */
-static inline void __dma_sync(struct page *page,
-       unsigned long offset, size_t size, enum dma_data_direction direction)
-{
-       size_t left = size;
-
-       do {
-               size_t len = left;
-
-               if (PageHighMem(page)) {
-                       void *addr;
-
-                       if (offset + len > PAGE_SIZE) {
-                               if (offset >= PAGE_SIZE) {
-                                       page += offset >> PAGE_SHIFT;
-                                       offset &= ~PAGE_MASK;
-                               }
-                               len = PAGE_SIZE - offset;
-                       }
-
-                       addr = kmap_atomic(page);
-                       __dma_sync_virtual(addr + offset, len, direction);
-                       kunmap_atomic(addr);
-               } else
-                       __dma_sync_virtual(page_address(page) + offset,
-                                          size, direction);
-               offset = 0;
-               page++;
-               left -= len;
-       } while (left);
-}
-
-static void mips_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
-       size_t size, enum dma_data_direction direction, unsigned long attrs)
-{
-       if (cpu_needs_post_dma_flush(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
-               __dma_sync(dma_addr_to_page(dev, dma_addr),
-                          dma_addr & ~PAGE_MASK, size, direction);
-       plat_post_dma_flush(dev);
-       plat_unmap_dma_mem(dev, dma_addr, size, direction);
-}
-
-static int mips_dma_map_sg(struct device *dev, struct scatterlist *sglist,
-       int nents, enum dma_data_direction direction, unsigned long attrs)
-{
-       int i;
-       struct scatterlist *sg;
-
-       for_each_sg(sglist, sg, nents, i) {
-               if (!plat_device_is_coherent(dev) &&
-                   !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
-                       __dma_sync(sg_page(sg), sg->offset, sg->length,
-                                  direction);
-#ifdef CONFIG_NEED_SG_DMA_LENGTH
-               sg->dma_length = sg->length;
-#endif
-               sg->dma_address = plat_map_dma_mem_page(dev, sg_page(sg)) +
-                                 sg->offset;
-       }
-
-       return nents;
-}
-
-static dma_addr_t mips_dma_map_page(struct device *dev, struct page *page,
-       unsigned long offset, size_t size, enum dma_data_direction direction,
-       unsigned long attrs)
-{
-       if (!plat_device_is_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
-               __dma_sync(page, offset, size, direction);
-
-       return plat_map_dma_mem_page(dev, page) + offset;
-}
-
-static void mips_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
-       int nhwentries, enum dma_data_direction direction,
-       unsigned long attrs)
-{
-       int i;
-       struct scatterlist *sg;
-
-       for_each_sg(sglist, sg, nhwentries, i) {
-               if (!plat_device_is_coherent(dev) &&
-                   !(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
-                   direction != DMA_TO_DEVICE)
-                       __dma_sync(sg_page(sg), sg->offset, sg->length,
-                                  direction);
-               plat_unmap_dma_mem(dev, sg->dma_address, sg->length, direction);
-       }
-}
-
-static void mips_dma_sync_single_for_cpu(struct device *dev,
-       dma_addr_t dma_handle, size_t size, enum dma_data_direction direction)
-{
-       if (cpu_needs_post_dma_flush(dev))
-               __dma_sync(dma_addr_to_page(dev, dma_handle),
-                          dma_handle & ~PAGE_MASK, size, direction);
-       plat_post_dma_flush(dev);
-}
-
-static void mips_dma_sync_single_for_device(struct device *dev,
-       dma_addr_t dma_handle, size_t size, enum dma_data_direction direction)
-{
-       if (!plat_device_is_coherent(dev))
-               __dma_sync(dma_addr_to_page(dev, dma_handle),
-                          dma_handle & ~PAGE_MASK, size, direction);
-}
-
-static void mips_dma_sync_sg_for_cpu(struct device *dev,
-       struct scatterlist *sglist, int nelems,
-       enum dma_data_direction direction)
-{
-       int i;
-       struct scatterlist *sg;
-
-       if (cpu_needs_post_dma_flush(dev)) {
-               for_each_sg(sglist, sg, nelems, i) {
-                       __dma_sync(sg_page(sg), sg->offset, sg->length,
-                                  direction);
-               }
-       }
-       plat_post_dma_flush(dev);
-}
-
-static void mips_dma_sync_sg_for_device(struct device *dev,
-       struct scatterlist *sglist, int nelems,
-       enum dma_data_direction direction)
-{
-       int i;
-       struct scatterlist *sg;
-
-       if (!plat_device_is_coherent(dev)) {
-               for_each_sg(sglist, sg, nelems, i) {
-                       __dma_sync(sg_page(sg), sg->offset, sg->length,
-                                  direction);
-               }
-       }
-}
-
-static int mips_dma_supported(struct device *dev, u64 mask)
-{
-       return plat_dma_supported(dev, mask);
-}
-
-static void mips_dma_cache_sync(struct device *dev, void *vaddr, size_t size,
-                        enum dma_data_direction direction)
-{
-       BUG_ON(direction == DMA_NONE);
-
-       if (!plat_device_is_coherent(dev))
-               __dma_sync_virtual(vaddr, size, direction);
-}
-
-static const struct dma_map_ops mips_default_dma_map_ops = {
-       .alloc = mips_dma_alloc_coherent,
-       .free = mips_dma_free_coherent,
-       .mmap = mips_dma_mmap,
-       .map_page = mips_dma_map_page,
-       .unmap_page = mips_dma_unmap_page,
-       .map_sg = mips_dma_map_sg,
-       .unmap_sg = mips_dma_unmap_sg,
-       .sync_single_for_cpu = mips_dma_sync_single_for_cpu,
-       .sync_single_for_device = mips_dma_sync_single_for_device,
-       .sync_sg_for_cpu = mips_dma_sync_sg_for_cpu,
-       .sync_sg_for_device = mips_dma_sync_sg_for_device,
-       .dma_supported = mips_dma_supported,
-       .cache_sync = mips_dma_cache_sync,
-};
-
-const struct dma_map_ops *mips_dma_map_ops = &mips_default_dma_map_ops;
-EXPORT_SYMBOL(mips_dma_map_ops);
diff --git a/arch/mips/mm/dma-noncoherent.c b/arch/mips/mm/dma-noncoherent.c
new file mode 100644 (file)
index 0000000..2aca123
--- /dev/null
@@ -0,0 +1,208 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2000  Ani Joshi <ajoshi@unixbox.com>
+ * Copyright (C) 2000, 2001, 06         Ralf Baechle <ralf@linux-mips.org>
+ * swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
+ */
+#include <linux/dma-direct.h>
+#include <linux/dma-noncoherent.h>
+#include <linux/dma-contiguous.h>
+#include <linux/highmem.h>
+
+#include <asm/cache.h>
+#include <asm/cpu-type.h>
+#include <asm/dma-coherence.h>
+#include <asm/io.h>
+
+#ifdef CONFIG_DMA_PERDEV_COHERENT
+static inline int dev_is_coherent(struct device *dev)
+{
+       return dev->archdata.dma_coherent;
+}
+#else
+static inline int dev_is_coherent(struct device *dev)
+{
+       switch (coherentio) {
+       default:
+       case IO_COHERENCE_DEFAULT:
+               return hw_coherentio;
+       case IO_COHERENCE_ENABLED:
+               return 1;
+       case IO_COHERENCE_DISABLED:
+               return 0;
+       }
+}
+#endif /* CONFIG_DMA_PERDEV_COHERENT */
+
+/*
+ * The affected CPUs below in 'cpu_needs_post_dma_flush()' can speculatively
+ * fill random cachelines with stale data at any time, requiring an extra
+ * flush post-DMA.
+ *
+ * Warning on the terminology - Linux calls an uncached area coherent;  MIPS
+ * terminology calls memory areas with hardware maintained coherency coherent.
+ *
+ * Note that the R14000 and R16000 should also be checked for in this condition.
+ * However this function is only called on non-I/O-coherent systems and only the
+ * R10000 and R12000 are used in such systems, the SGI IP28 Indigo² rsp.
+ * SGI IP32 aka O2.
+ */
+static inline bool cpu_needs_post_dma_flush(struct device *dev)
+{
+       if (dev_is_coherent(dev))
+               return false;
+
+       switch (boot_cpu_type()) {
+       case CPU_R10000:
+       case CPU_R12000:
+       case CPU_BMIPS5000:
+               return true;
+       default:
+               /*
+                * Presence of MAARs suggests that the CPU supports
+                * speculatively prefetching data, and therefore requires
+                * the post-DMA flush/invalidate.
+                */
+               return cpu_has_maar;
+       }
+}
+
+void *arch_dma_alloc(struct device *dev, size_t size,
+               dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
+{
+       void *ret;
+
+       ret = dma_direct_alloc(dev, size, dma_handle, gfp, attrs);
+       if (!ret)
+               return NULL;
+
+       if (!dev_is_coherent(dev) && !(attrs & DMA_ATTR_NON_CONSISTENT)) {
+               dma_cache_wback_inv((unsigned long) ret, size);
+               ret = (void *)UNCAC_ADDR(ret);
+       }
+
+       return ret;
+}
+
+void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
+               dma_addr_t dma_addr, unsigned long attrs)
+{
+       if (!(attrs & DMA_ATTR_NON_CONSISTENT) && !dev_is_coherent(dev))
+               cpu_addr = (void *)CAC_ADDR((unsigned long)cpu_addr);
+       dma_direct_free(dev, size, cpu_addr, dma_addr, attrs);
+}
+
+int arch_dma_mmap(struct device *dev, struct vm_area_struct *vma,
+               void *cpu_addr, dma_addr_t dma_addr, size_t size,
+               unsigned long attrs)
+{
+       unsigned long user_count = vma_pages(vma);
+       unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
+       unsigned long addr = (unsigned long)cpu_addr;
+       unsigned long off = vma->vm_pgoff;
+       unsigned long pfn;
+       int ret = -ENXIO;
+
+       if (!dev_is_coherent(dev))
+               addr = CAC_ADDR(addr);
+
+       pfn = page_to_pfn(virt_to_page((void *)addr));
+
+       if (attrs & DMA_ATTR_WRITE_COMBINE)
+               vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+       else
+               vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+       if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
+               return ret;
+
+       if (off < count && user_count <= (count - off)) {
+               ret = remap_pfn_range(vma, vma->vm_start,
+                                     pfn + off,
+                                     user_count << PAGE_SHIFT,
+                                     vma->vm_page_prot);
+       }
+
+       return ret;
+}
+
+static inline void dma_sync_virt(void *addr, size_t size,
+               enum dma_data_direction dir)
+{
+       switch (dir) {
+       case DMA_TO_DEVICE:
+               dma_cache_wback((unsigned long)addr, size);
+               break;
+
+       case DMA_FROM_DEVICE:
+               dma_cache_inv((unsigned long)addr, size);
+               break;
+
+       case DMA_BIDIRECTIONAL:
+               dma_cache_wback_inv((unsigned long)addr, size);
+               break;
+
+       default:
+               BUG();
+       }
+}
+
+/*
+ * A single sg entry may refer to multiple physically contiguous pages.  But
+ * we still need to process highmem pages individually.  If highmem is not
+ * configured then the bulk of this loop gets optimized out.
+ */
+static inline void dma_sync_phys(phys_addr_t paddr, size_t size,
+               enum dma_data_direction dir)
+{
+       struct page *page = pfn_to_page(paddr >> PAGE_SHIFT);
+       unsigned long offset = paddr & ~PAGE_MASK;
+       size_t left = size;
+
+       do {
+               size_t len = left;
+
+               if (PageHighMem(page)) {
+                       void *addr;
+
+                       if (offset + len > PAGE_SIZE) {
+                               if (offset >= PAGE_SIZE) {
+                                       page += offset >> PAGE_SHIFT;
+                                       offset &= ~PAGE_MASK;
+                               }
+                               len = PAGE_SIZE - offset;
+                       }
+
+                       addr = kmap_atomic(page);
+                       dma_sync_virt(addr + offset, len, dir);
+                       kunmap_atomic(addr);
+               } else
+                       dma_sync_virt(page_address(page) + offset, size, dir);
+               offset = 0;
+               page++;
+               left -= len;
+       } while (left);
+}
+
+void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
+               size_t size, enum dma_data_direction dir)
+{
+       if (!dev_is_coherent(dev))
+               dma_sync_phys(paddr, size, dir);
+}
+
+void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
+               size_t size, enum dma_data_direction dir)
+{
+       if (cpu_needs_post_dma_flush(dev))
+               dma_sync_phys(paddr, size, dir);
+}
+
+void arch_dma_cache_sync(struct device *dev, void *vaddr, size_t size,
+               enum dma_data_direction direction)
+{
+       BUG_ON(direction == DMA_NONE);
+
+       if (!dev_is_coherent(dev))
+               dma_sync_virt(vaddr, size, direction);
+}
index 1986e09fb457c55ba16e3cd19f56f65e2737cb54..1601d90b087b8f933853ac87118aa09749f70f03 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/export.h>
 #include <asm/addrspace.h>
 #include <asm/byteorder.h>
+#include <linux/ioport.h>
 #include <linux/sched.h>
 #include <linux/slab.h>
 #include <linux/vmalloc.h>
@@ -98,6 +99,20 @@ static int remap_area_pages(unsigned long address, phys_addr_t phys_addr,
        return error;
 }
 
+static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages,
+                              void *arg)
+{
+       unsigned long i;
+
+       for (i = 0; i < nr_pages; i++) {
+               if (pfn_valid(start_pfn + i) &&
+                   !PageReserved(pfn_to_page(start_pfn + i)))
+                       return 1;
+       }
+
+       return 0;
+}
+
 /*
  * Generic mapping function (not visible outside):
  */
@@ -116,8 +131,8 @@ static int remap_area_pages(unsigned long address, phys_addr_t phys_addr,
 
 void __iomem * __ioremap(phys_addr_t phys_addr, phys_addr_t size, unsigned long flags)
 {
+       unsigned long offset, pfn, last_pfn;
        struct vm_struct * area;
-       unsigned long offset;
        phys_addr_t last_addr;
        void * addr;
 
@@ -137,18 +152,16 @@ void __iomem * __ioremap(phys_addr_t phys_addr, phys_addr_t size, unsigned long
                return (void __iomem *) CKSEG1ADDR(phys_addr);
 
        /*
-        * Don't allow anybody to remap normal RAM that we're using..
+        * Don't allow anybody to remap RAM that may be allocated by the page
+        * allocator, since that could lead to races & data clobbering.
         */
-       if (phys_addr < virt_to_phys(high_memory)) {
-               char *t_addr, *t_end;
-               struct page *page;
-
-               t_addr = __va(phys_addr);
-               t_end = t_addr + (size - 1);
-
-               for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++)
-                       if(!PageReserved(page))
-                               return NULL;
+       pfn = PFN_DOWN(phys_addr);
+       last_pfn = PFN_DOWN(last_addr);
+       if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL,
+                                 __ioremap_check_ram) == 1) {
+               WARN_ONCE(1, "ioremap on RAM at %pa - %pa\n",
+                         &phys_addr, &last_addr);
+               return NULL;
        }
 
        /*
index d5d02993aa21ba99bdd66e2212e9484ed8d8892b..56e4f8bffd4cd96a0439abac031b48d4c57634f6 100644 (file)
@@ -623,21 +623,6 @@ struct dmadscr {
        u64 pad_b;
 } ____cacheline_aligned_in_smp page_descr[DM_NUM_CHANNELS];
 
-void sb1_dma_init(void)
-{
-       int i;
-
-       for (i = 0; i < DM_NUM_CHANNELS; i++) {
-               const u64 base_val = CPHYSADDR((unsigned long)&page_descr[i]) |
-                                    V_DM_DSCR_BASE_RINGSZ(1);
-               void *base_reg = IOADDR(A_DM_REGISTER(i, R_DM_DSCR_BASE));
-
-               __raw_writeq(base_val, base_reg);
-               __raw_writeq(base_val | M_DM_DSCR_BASE_RESET, base_reg);
-               __raw_writeq(base_val | M_DM_DSCR_BASE_ENABL, base_reg);
-       }
-}
-
 void clear_page(void *page)
 {
        u64 to_phys = CPHYSADDR((unsigned long)page);
index 79b9f2ad3ff514e0e48ab409d6465540a4fe097d..49312a14cd1702e148c22d71c68384c41ef717de 100644 (file)
@@ -1509,7 +1509,7 @@ static void setup_pw(void)
 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
        write_c0_pwctl(1 << 6 | psn);
 #endif
-       write_c0_kpgd(swapper_pg_dir);
+       write_c0_kpgd((long)swapper_pg_dir);
        kscratch_used_mask |= (1 << 7); /* KScratch6 is used for KPGD */
 }
 
index 9bb6baa45da35943506e371a9c5c6da569459d15..24e5b0d068995ed12d414702d1b020bb6db9f705 100644 (file)
@@ -19,7 +19,6 @@
 #include <asm/inst.h>
 #include <asm/elf.h>
 #include <asm/bugs.h>
-#define UASM_ISA       _UASM_ISA_MICROMIPS
 #include <asm/uasm.h>
 
 #define RS_MASK                0x1f
index 9fea6c6bbf49e3768e81ad3ffb8b8af2bbdef61f..60ceb93c71a0846345465f7735d88ea62e9dad58 100644 (file)
@@ -19,7 +19,6 @@
 #include <asm/inst.h>
 #include <asm/elf.h>
 #include <asm/bugs.h>
-#define UASM_ISA       _UASM_ISA_CLASSIC
 #include <asm/uasm.h>
 
 #define RS_MASK                0x1f
index 63940bdce698848b272e27f3f6c8b389e2d11b94..17c7fd471a27f4049ef03dc49f0176f51a5ddde5 100644 (file)
@@ -13,11 +13,9 @@ obj-y                                += malta-init.o
 obj-y                          += malta-int.o
 obj-y                          += malta-memory.o
 obj-y                          += malta-platform.o
-obj-y                          += malta-reset.o
 obj-y                          += malta-setup.o
 obj-y                          += malta-time.o
 
 obj-$(CONFIG_MIPS_CMP)         += malta-amon.o
-obj-$(CONFIG_MIPS_MALTA_PM)    += malta-pm.o
 
 CFLAGS_malta-dtshim.o = -I$(src)/../../../scripts/dtc/libfdt
diff --git a/arch/mips/mti-malta/malta-pm.c b/arch/mips/mti-malta/malta-pm.c
deleted file mode 100644 (file)
index efbd659..0000000
+++ /dev/null
@@ -1,96 +0,0 @@
-/*
- * Copyright (C) 2014 Imagination Technologies
- * Author: Paul Burton <paul.burton@mips.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation;  either version 2 of the  License, or (at your
- * option) any later version.
- */
-
-#include <linux/delay.h>
-#include <linux/init.h>
-#include <linux/io.h>
-#include <linux/pci.h>
-
-#include <asm/mach-malta/malta-pm.h>
-
-static struct pci_bus *pm_pci_bus;
-static resource_size_t pm_io_offset;
-
-int mips_pm_suspend(unsigned state)
-{
-       int spec_devid;
-       u16 sts;
-
-       if (!pm_pci_bus || !pm_io_offset)
-               return -ENODEV;
-
-       /* Ensure the power button status is clear */
-       while (1) {
-               sts = inw(pm_io_offset + PIIX4_FUNC3IO_PMSTS);
-               if (!(sts & PIIX4_FUNC3IO_PMSTS_PWRBTN_STS))
-                       break;
-               outw(sts, pm_io_offset + PIIX4_FUNC3IO_PMSTS);
-       }
-
-       /* Enable entry to suspend */
-       outw(state | PIIX4_FUNC3IO_PMCNTRL_SUS_EN,
-            pm_io_offset + PIIX4_FUNC3IO_PMCNTRL);
-
-       /* If the special cycle occurs too soon this doesn't work... */
-       mdelay(10);
-
-       /*
-        * The PIIX4 will enter the suspend state only after seeing a special
-        * cycle with the correct magic data on the PCI bus. Generate that
-        * cycle now.
-        */
-       spec_devid = PCI_DEVID(0, PCI_DEVFN(0x1f, 0x7));
-       pci_bus_write_config_dword(pm_pci_bus, spec_devid, 0,
-                                  PIIX4_SUSPEND_MAGIC);
-
-       /* Give the system some time to power down */
-       mdelay(1000);
-
-       return 0;
-}
-
-static int __init malta_pm_setup(void)
-{
-       struct pci_dev *dev;
-       int res, io_region = PCI_BRIDGE_RESOURCES;
-
-       /* Find a reference to the PCI bus */
-       pm_pci_bus = pci_find_next_bus(NULL);
-       if (!pm_pci_bus) {
-               pr_warn("malta-pm: failed to find reference to PCI bus\n");
-               return -ENODEV;
-       }
-
-       /* Find the PIIX4 PM device */
-       dev = pci_get_subsys(PCI_VENDOR_ID_INTEL,
-                            PCI_DEVICE_ID_INTEL_82371AB_3, PCI_ANY_ID,
-                            PCI_ANY_ID, NULL);
-       if (!dev) {
-               pr_warn("malta-pm: failed to find PIIX4 PM\n");
-               return -ENODEV;
-       }
-
-       /* Request access to the PIIX4 PM IO registers */
-       res = pci_request_region(dev, io_region, "PIIX4 PM IO registers");
-       if (res) {
-               pr_warn("malta-pm: failed to request PM IO registers (%d)\n",
-                       res);
-               pci_dev_put(dev);
-               return -ENODEV;
-       }
-
-       /* Find the offset to the PIIX4 PM IO registers */
-       pm_io_offset = pci_resource_start(dev, io_region);
-
-       pci_dev_put(dev);
-       return 0;
-}
-
-late_initcall(malta_pm_setup);
diff --git a/arch/mips/mti-malta/malta-reset.c b/arch/mips/mti-malta/malta-reset.c
deleted file mode 100644 (file)
index dd6f62a..0000000
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Carsten Langgaard, carstenl@mips.com
- * Copyright (C) 1999,2000 MIPS Technologies, Inc.  All rights reserved.
- */
-#include <linux/io.h>
-#include <linux/pm.h>
-#include <linux/reboot.h>
-
-#include <asm/reboot.h>
-#include <asm/mach-malta/malta-pm.h>
-
-static void mips_machine_power_off(void)
-{
-       mips_pm_suspend(PIIX4_FUNC3IO_PMCNTRL_SUS_TYP_SOFF);
-
-       pr_info("Failed to power down, resetting\n");
-       machine_restart(NULL);
-}
-
-static int __init mips_reboot_setup(void)
-{
-       pm_power_off = mips_machine_power_off;
-
-       return 0;
-}
-arch_initcall(mips_reboot_setup);
index 7b63914d2e58e0efb17abf011b37c800428e8914..5d4c5e5fbd696ccccbb312f61b77c67a7fa0f328 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/screen_info.h>
 #include <linux/time.h>
 
+#include <asm/dma-coherence.h>
 #include <asm/fw/fw.h>
 #include <asm/mach-malta/malta-dtshim.h>
 #include <asm/mips-cps.h>
@@ -144,12 +145,6 @@ static int __init plat_enable_iocoherency(void)
 
 static void __init plat_setup_iocoherency(void)
 {
-#ifdef CONFIG_DMA_NONCOHERENT
-       /*
-        * Kernel has been configured with software coherency
-        * but we might choose to turn it off and use hardware
-        * coherency instead.
-        */
        if (plat_enable_iocoherency()) {
                if (coherentio == IO_COHERENCE_DISABLED)
                        pr_info("Hardware DMA cache coherency disabled\n");
@@ -161,10 +156,6 @@ static void __init plat_setup_iocoherency(void)
                else
                        pr_info("Software DMA cache coherency enabled\n");
        }
-#else
-       if (!plat_enable_iocoherency())
-               panic("Hardware DMA cache coherency not supported!");
-#endif
 }
 
 static void __init pci_clock_check(void)
@@ -226,29 +217,6 @@ static void __init bonito_quirks_setup(void)
                pr_info("Enabled Bonito debug mode\n");
        } else
                BONITO_BONGENCFG &= ~BONITO_BONGENCFG_DEBUGMODE;
-
-#ifdef CONFIG_DMA_COHERENT
-       if (BONITO_PCICACHECTRL & BONITO_PCICACHECTRL_CPUCOH_PRES) {
-               BONITO_PCICACHECTRL |= BONITO_PCICACHECTRL_CPUCOH_EN;
-               pr_info("Enabled Bonito CPU coherency\n");
-
-               argptr = fw_getcmdline();
-               if (strstr(argptr, "iobcuncached")) {
-                       BONITO_PCICACHECTRL &= ~BONITO_PCICACHECTRL_IOBCCOH_EN;
-                       BONITO_PCIMEMBASECFG = BONITO_PCIMEMBASECFG &
-                               ~(BONITO_PCIMEMBASECFG_MEMBASE0_CACHED |
-                                       BONITO_PCIMEMBASECFG_MEMBASE1_CACHED);
-                       pr_info("Disabled Bonito IOBC coherency\n");
-               } else {
-                       BONITO_PCICACHECTRL |= BONITO_PCICACHECTRL_IOBCCOH_EN;
-                       BONITO_PCIMEMBASECFG |=
-                               (BONITO_PCIMEMBASECFG_MEMBASE0_CACHED |
-                                       BONITO_PCIMEMBASECFG_MEMBASE1_CACHED);
-                       pr_info("Enabled Bonito IOBC coherency\n");
-               }
-       } else
-               panic("Hardware DMA cache coherency not supported");
-#endif
 }
 
 void __init *plat_get_fdt(void)
@@ -279,11 +247,6 @@ void __init plat_mem_setup(void)
         */
        enable_dma(4);
 
-#ifdef CONFIG_DMA_COHERENT
-       if (mips_revision_sconid != MIPS_REVISION_SCON_BONITO)
-               panic("Hardware DMA cache coherency not supported");
-#endif
-
        if (mips_revision_sconid == MIPS_REVISION_SCON_BONITO)
                bonito_quirks_setup();
 
index 769f93032c5331f3628e33b42c621db462e07717..8f5bc15975505f3e308585e8efa8ef59468bb87f 100644 (file)
@@ -36,6 +36,7 @@
 #include <linux/serial_reg.h>
 
 #include <asm/mipsregs.h>
+#include <asm/setup.h>
 #include <asm/netlogic/haldefs.h>
 #include <asm/netlogic/common.h>
 
index 856a6e6d296e978a6435707a781bc15df412824e..b5ba83f4c646b3b48598e5cce482b8c1a49f7d9a 100644 (file)
@@ -93,17 +93,3 @@ void __init device_tree_init(void)
 {
        unflatten_and_copy_device_tree();
 }
-
-static struct of_device_id __initdata xlp_ids[] = {
-       { .compatible = "simple-bus", },
-       {},
-};
-
-int __init xlp8xx_ds_publish_devices(void)
-{
-       if (!of_have_populated_dt())
-               return 0;
-       return of_platform_bus_probe(NULL, xlp_ids, NULL);
-}
-
-device_initcall(xlp8xx_ds_publish_devices);
index 02b665c022721e73f66ce0c586d0ef911e69e02a..a37b6f9f0ede1228abf925524cd392ad6ee7268e 100644 (file)
@@ -9,16 +9,15 @@
 #include <linux/kernel.h>
 #include <linux/virtio_console.h>
 #include <linux/kvm_para.h>
+#include <asm/setup.h>
 
 /*
  * Emit one character to the boot console.
  */
-int prom_putchar(char c)
+void prom_putchar(char c)
 {
        kvm_hypercall3(KVM_HC_MIPS_CONSOLE_OUTPUT, 0 /*  port 0 */,
                (unsigned long)&c, 1 /* len == 1 */);
-
-       return 1;
 }
 
 #ifdef CONFIG_VIRTIO_CONSOLE
index 57e1463fcd02e072e79ede3b5a80c94909ed0dc2..a1d2c4ae0d1b56ca26069f9a929327297e034877 100644 (file)
@@ -167,7 +167,7 @@ oh_my_gawd:
 static int pci_read_config(struct pci_bus *bus, unsigned int devfn,
                           int where, int size, u32 * value)
 {
-       if (bus->number > 0)
+       if (!pci_is_root_bus(bus))
                return pci_conf1_read_config(bus, devfn, where, size, value);
 
        return pci_conf0_read_config(bus, devfn, where, size, value);
@@ -310,7 +310,7 @@ oh_my_gawd:
 static int pci_write_config(struct pci_bus *bus, unsigned int devfn,
        int where, int size, u32 value)
 {
-       if (bus->number > 0)
+       if (!pci_is_root_bus(bus))
                return pci_conf1_write_config(bus, devfn, where, size, value);
 
        return pci_conf0_write_config(bus, devfn, where, size, value);
index b4fa6413c4e52fbdf71c3ec62126577f1ff8f67a..c539d0d2b0cf76f8ea396a215a88c056f595ae11 100644 (file)
 #define AR2315_PCI_HOST_SLOT   3
 #define AR2315_PCI_HOST_DEVID  ((0xff18 << 16) | PCI_VENDOR_ID_ATHEROS)
 
+/*
+ * We need some arbitrary non-zero value to be programmed to the BAR1 register
+ * of PCI host controller to enable DMA. The same value should be used as the
+ * offset to calculate the physical address of DMA buffer for PCI devices.
+ */
+#define AR2315_PCI_HOST_SDRAM_BASEADDR 0x20000000
+
 /* ??? access BAR */
 #define AR2315_PCI_HOST_MBAR0          0x10000000
 /* RAM access BAR */
@@ -167,6 +174,23 @@ struct ar2315_pci_ctrl {
        struct resource io_res;
 };
 
+static inline dma_addr_t ar2315_dev_offset(struct device *dev)
+{
+       if (dev && dev_is_pci(dev))
+               return AR2315_PCI_HOST_SDRAM_BASEADDR;
+       return 0;
+}
+
+dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr)
+{
+       return paddr + ar2315_dev_offset(dev);
+}
+
+phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t dma_addr)
+{
+       return dma_addr - ar2315_dev_offset(dev);
+}
+
 static inline struct ar2315_pci_ctrl *ar2315_pci_bus_to_apc(struct pci_bus *bus)
 {
        struct pci_controller *hose = bus->sysdata;
index 1e23c8d587bdef78db97ee553439d1f1eb3fe3ba..64b58cc48a912ede7142766d05366c065b36961c 100644 (file)
 #include <linux/irq.h>
 #include <linux/pci.h>
 #include <linux/init.h>
+#include <linux/delay.h>
 #include <linux/platform_device.h>
 #include <asm/mach-ath79/ath79.h>
 #include <asm/mach-ath79/ar71xx_regs.h>
 
+#define AR724X_PCI_REG_APP             0x00
 #define AR724X_PCI_REG_RESET           0x18
 #define AR724X_PCI_REG_INT_STATUS      0x4c
 #define AR724X_PCI_REG_INT_MASK                0x50
 
+#define AR724X_PCI_APP_LTSSM_ENABLE    BIT(0)
+
 #define AR724X_PCI_RESET_LINK_UP       BIT(0)
 
 #define AR724X_PCI_INT_DEV0            BIT(14)
@@ -325,6 +329,37 @@ static void ar724x_pci_irq_init(struct ar724x_pci_controller *apc,
                                         apc);
 }
 
+static void ar724x_pci_hw_init(struct ar724x_pci_controller *apc)
+{
+       u32 ppl, app;
+       int wait = 0;
+
+       /* deassert PCIe host controller and PCIe PHY reset */
+       ath79_device_reset_clear(AR724X_RESET_PCIE);
+       ath79_device_reset_clear(AR724X_RESET_PCIE_PHY);
+
+       /* remove the reset of the PCIE PLL */
+       ppl = ath79_pll_rr(AR724X_PLL_REG_PCIE_CONFIG);
+       ppl &= ~AR724X_PLL_REG_PCIE_CONFIG_PPL_RESET;
+       ath79_pll_wr(AR724X_PLL_REG_PCIE_CONFIG, ppl);
+
+       /* deassert bypass for the PCIE PLL */
+       ppl = ath79_pll_rr(AR724X_PLL_REG_PCIE_CONFIG);
+       ppl &= ~AR724X_PLL_REG_PCIE_CONFIG_PPL_BYPASS;
+       ath79_pll_wr(AR724X_PLL_REG_PCIE_CONFIG, ppl);
+
+       /* set PCIE Application Control to ready */
+       app = __raw_readl(apc->ctrl_base + AR724X_PCI_REG_APP);
+       app |= AR724X_PCI_APP_LTSSM_ENABLE;
+       __raw_writel(app, apc->ctrl_base + AR724X_PCI_REG_APP);
+
+       /* wait up to 100ms for PHY link up */
+       do {
+               mdelay(10);
+               wait++;
+       } while (wait < 10 && !ar724x_pci_check_link(apc));
+}
+
 static int ar724x_pci_probe(struct platform_device *pdev)
 {
        struct ar724x_pci_controller *apc;
@@ -383,6 +418,13 @@ static int ar724x_pci_probe(struct platform_device *pdev)
        apc->pci_controller.io_resource = &apc->io_res;
        apc->pci_controller.mem_resource = &apc->mem_res;
 
+       /*
+        * Do the full PCIE Root Complex Initialization Sequence if the PCIe
+        * host controller is in reset.
+        */
+       if (ath79_reset_rr(AR724X_RESET_REG_RESET_MODULE) & AR724X_RESET_PCIE)
+               ar724x_pci_hw_init(apc);
+
        apc->link_up = ar724x_pci_check_link(apc);
        if (!apc->link_up)
                dev_warn(&pdev->dev, "PCIe link is down\n");
index 0f09eafa5e3abb068bd920676f6971ce1bf2578b..c94a66070a60d9339570b94ff51dddc0a49ce138 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/export.h>
 #include <linux/pci.h>
 #include <linux/smp.h>
+#include <linux/dma-direct.h>
 #include <asm/sn/arch.h>
 #include <asm/pci/bridge.h>
 #include <asm/paccess.h>
@@ -182,6 +183,19 @@ int pcibios_plat_dev_init(struct pci_dev *dev)
        return 0;
 }
 
+dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr)
+{
+       struct pci_dev *pdev = to_pci_dev(dev);
+       struct bridge_controller *bc = BRIDGE_CONTROLLER(pdev->bus);
+
+       return bc->baddr + paddr;
+}
+
+phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t dma_addr)
+{
+       return dma_addr & ~(0xffUL << 56);
+}
+
 /*
  * Device might live on a subordinate PCI bus. XXX Walk up the chain of buses
  * to find the slot number in sense of the bridge device register.
@@ -200,17 +214,6 @@ static inline void pci_disable_swapping(struct pci_dev *dev)
        bridge->b_widget.w_tflush;      /* Flush */
 }
 
-static inline void pci_enable_swapping(struct pci_dev *dev)
-{
-       struct bridge_controller *bc = BRIDGE_CONTROLLER(dev->bus);
-       bridge_t *bridge = bc->base;
-       int slot = PCI_SLOT(dev->devfn);
-
-       /* Turn on byte swapping */
-       bridge->b_device[slot].reg |= BRIDGE_DEV_SWAP_DIR;
-       bridge->b_widget.w_tflush;      /* Flush */
-}
-
 static void pci_fixup_ioc3(struct pci_dev *d)
 {
        pci_disable_swapping(d);
index 3e92a06fa77288c2705e8435e9352b6bf8c5e368..5017d5843c5ac4913aa254b2157bd66d86b91bcd 100644 (file)
@@ -21,8 +21,6 @@
 #include <asm/octeon/cvmx-pci-defs.h>
 #include <asm/octeon/pci-octeon.h>
 
-#include <dma-coherence.h>
-
 #define USE_OCTEON_INTERNAL_ARBITER
 
 /*
@@ -166,8 +164,6 @@ int pcibios_plat_dev_init(struct pci_dev *dev)
                pci_write_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, dconfig);
        }
 
-       dev->dev.dma_ops = octeon_pci_dma_map_ops;
-
        return 0;
 }
 
index 9632436d74d7a74b3d584ab6e87a1fc7e55827cc..c2e94cf5ecdab7c7f3263bd65e76c30cf8eb32fc 100644 (file)
@@ -54,5 +54,5 @@ void pci_resource_to_user(const struct pci_dev *dev, int bar,
        phys_addr_t size = resource_size(rsrc);
 
        *start = fixup_bigphys_addr(rsrc->start, size);
-       *end = rsrc->start + size;
+       *end = rsrc->start + size - 1;
 }
index 87ba86bd869663b387ab4f86b03c129da815ce6b..d919a0d813a17a3639f8c6ade767e7dccc822146 100644 (file)
@@ -94,8 +94,6 @@ union cvmx_pcie_address {
 
 static int cvmx_pcie_rc_initialize(int pcie_port);
 
-#include <dma-coherence.h>
-
 /**
  * Return the Core virtual base address for PCIe IO access. IOs are
  * read/written as an offset from this address.
@@ -1239,14 +1237,14 @@ static int __cvmx_pcie_rc_initialize_gen2(int pcie_port)
        /* CN63XX Pass 1.0 errata G-14395 requires the QLM De-emphasis be programmed */
        if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_0)) {
                if (pcie_port) {
-                       union cvmx_ciu_qlm1 ciu_qlm;
+                       union cvmx_ciu_qlm ciu_qlm;
                        ciu_qlm.u64 = cvmx_read_csr(CVMX_CIU_QLM1);
                        ciu_qlm.s.txbypass = 1;
                        ciu_qlm.s.txdeemph = 5;
                        ciu_qlm.s.txmargin = 0x17;
                        cvmx_write_csr(CVMX_CIU_QLM1, ciu_qlm.u64);
                } else {
-                       union cvmx_ciu_qlm0 ciu_qlm;
+                       union cvmx_ciu_qlm ciu_qlm;
                        ciu_qlm.u64 = cvmx_read_csr(CVMX_CIU_QLM0);
                        ciu_qlm.s.txbypass = 1;
                        ciu_qlm.s.txdeemph = 5;
index d7b783463fac1b3af66ff02a1ad16a0c375eec84..8ed4961b1271fc0b0d3292a87712ca5ef298b11e 100644 (file)
@@ -13,6 +13,7 @@
  */
 #include <asm/mach-pic32/pic32.h>
 #include <asm/fw/fw.h>
+#include <asm/setup.h>
 
 #include "pic32mzda.h"
 #include "early_pin.h"
@@ -157,7 +158,7 @@ void __init fw_init_early_console(char port)
        setup_early_console(port, baud);
 }
 
-int prom_putchar(char c)
+void prom_putchar(char c)
 {
        if (console_port >= 0) {
                while (__raw_readl(
@@ -166,6 +167,4 @@ int prom_putchar(char c)
 
                __raw_writel(c, uart_base + U_TXR(console_port));
        }
-
-       return 1;
 }
index 3c59ffe5f5f54116785a501ad39abaccd1ad11a5..ecd30ddfb3dbdffde9b4108e5aba60ead9693997 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/serial_reg.h>
 
 #include <asm/addrspace.h>
+#include <asm/setup.h>
 
 #ifdef CONFIG_SOC_RT288X
 #define EARLY_UART_BASE                0x300c00
@@ -68,7 +69,7 @@ static void find_uart_base(void)
        }
 }
 
-void prom_putchar(unsigned char ch)
+void prom_putchar(char ch)
 {
        if (!init_complete) {
                find_uart_base();
@@ -76,13 +77,13 @@ void prom_putchar(unsigned char ch)
        }
 
        if (IS_ENABLED(CONFIG_SOC_MT7621) || soc_is_mt7628()) {
-               uart_w32(ch, UART_TX);
+               uart_w32((unsigned char)ch, UART_TX);
                while ((uart_r32(UART_REG_LSR) & UART_LSR_THRE) == 0)
                        ;
        } else {
                while ((uart_r32(UART_REG_LSR_RT2880) & UART_LSR_THRE) == 0)
                        ;
-               uart_w32(ch, UART_REG_TX);
+               uart_w32((unsigned char)ch, UART_REG_TX);
                while ((uart_r32(UART_REG_LSR_RT2880) & UART_LSR_THRE) == 0)
                        ;
        }
index 45fdfbcbd4c612149353db396eb648860df0a5ef..6bdb48d412762c9ab1816b0fd6ec71426efd8ed8 100644 (file)
@@ -7,6 +7,7 @@
  */
 
 #include <asm/page.h>
+#include <asm/setup.h>
 #include <asm/sn/addrs.h>
 #include <asm/sn/sn0/hub.h>
 #include <asm/sn/klconfig.h>
index 60f0227425e709ba82a9a882158a1b2e5ecb4e49..4745cd94df11622093f440985c32b3373ade30a8 100644 (file)
@@ -4,4 +4,4 @@
 #
 
 obj-y  += ip32-berr.o ip32-irq.o ip32-platform.o ip32-setup.o ip32-reset.o \
-          crime.o ip32-memory.o
+          crime.o ip32-memory.o ip32-dma.o
diff --git a/arch/mips/sgi-ip32/ip32-dma.c b/arch/mips/sgi-ip32/ip32-dma.c
new file mode 100644 (file)
index 0000000..fa7b17c
--- /dev/null
@@ -0,0 +1,37 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2006  Ralf Baechle <ralf@linux-mips.org>
+ */
+#include <linux/dma-direct.h>
+#include <asm/ip32/crime.h>
+
+/*
+ * Few notes.
+ * 1. CPU sees memory as two chunks: 0-256M@0x0, and the rest @0x40000000+256M
+ * 2. PCI sees memory as one big chunk @0x0 (or we could use 0x40000000 for
+ *    native-endian)
+ * 3. All other devices see memory as one big chunk at 0x40000000
+ * 4. Non-PCI devices will pass NULL as struct device*
+ *
+ * Thus we translate differently, depending on device.
+ */
+
+#define RAM_OFFSET_MASK 0x3fffffffUL
+
+dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr)
+{
+       dma_addr_t dma_addr = paddr & RAM_OFFSET_MASK;
+
+       if (!dev)
+               dma_addr += CRIME_HI_MEM_BASE;
+       return dma_addr;
+}
+
+phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t dma_addr)
+{
+       phys_addr_t paddr = dma_addr & RAM_OFFSET_MASK;
+
+       if (dma_addr >= 256*1024*1024)
+               paddr += CRIME_HI_MEM_BASE;
+       return paddr;
+}
index f4dbce25bc6a5791da300045ca97038443dc49da..7ec278d720966c533a31e9945313c6d07853720d 100644 (file)
@@ -70,7 +70,6 @@ config SIBYTE_BCM1x55
 
 config SIBYTE_SB1xxx_SOC
        bool
-       select DMA_COHERENT
        select IRQ_MIPS_CPU
        select SWAP_IO_SPACE
        select SYS_SUPPORTS_32BIT_KERNEL
index 115399202eab18a252f8a304cfcf791836c3e95f..092fb2a6ec4a04415bb6ed72db53552ede93da35 100644 (file)
@@ -27,6 +27,7 @@
 
 #include <asm/bootinfo.h>
 #include <asm/reboot.h>
+#include <asm/setup.h>
 #include <asm/sibyte/board.h>
 #include <asm/smp-ops.h>
 
index 1791a44ee570a05987d95015cad5854c88ef641b..dde4dc859f79c05a4a3417e43de356d6580361ad 100644 (file)
@@ -32,6 +32,7 @@
 #include <asm/reboot.h>
 #include <asm/r4kcache.h>
 #include <asm/sections.h>
+#include <asm/setup.h>
 #include <asm/txx9/generic.h>
 #include <asm/txx9/pci.h>
 #include <asm/txx9tmr.h>
index ce196046ac3e43e017357d1ff9106fa00a5e703a..34605ca214984c7257507fa2fb5925eb48bd9f92 100644 (file)
@@ -7,7 +7,13 @@ ccflags-vdso := \
        $(filter -I%,$(KBUILD_CFLAGS)) \
        $(filter -E%,$(KBUILD_CFLAGS)) \
        $(filter -mmicromips,$(KBUILD_CFLAGS)) \
-       $(filter -march=%,$(KBUILD_CFLAGS))
+       $(filter -march=%,$(KBUILD_CFLAGS)) \
+       -D__VDSO__
+
+ifeq ($(cc-name),clang)
+ccflags-vdso += $(filter --target=%,$(KBUILD_CFLAGS))
+endif
+
 cflags-vdso := $(ccflags-vdso) \
        $(filter -W%,$(filter-out -Wa$(comma)%,$(KBUILD_CFLAGS))) \
        -O2 -g -fPIC -fno-strict-aliasing -fno-common -fno-builtin -G 0 \
@@ -38,6 +44,7 @@ endif
 # VDSO linker flags.
 VDSO_LDFLAGS := \
        -Wl,-Bsymbolic -Wl,--no-undefined -Wl,-soname=linux-vdso.so.1 \
+       $(addprefix -Wl$(comma),$(filter -E%,$(KBUILD_CFLAGS))) \
        -nostdlib -shared \
        $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) \
        $(call cc-ldoption, -Wl$(comma)--build-id)
index 94334727059a66ac21b7f91e4ceadaca828bf0ec..611b06f01a3c7eb830795e764003b2f66b4d3352 100644 (file)
@@ -15,8 +15,6 @@ static inline bool FUNC(patch_vdso)(const char *path, void *vdso)
        ELF(Shdr) *shdr;
        char *shstrtab, *name;
        uint16_t sh_count, sh_entsize, i;
-       unsigned int local_gotno, symtabno, gotsym;
-       ELF(Dyn) *dyn = NULL;
 
        shdrs = vdso + FUNC(swap_uint)(ehdr->e_shoff);
        sh_count = swap_uint16(ehdr->e_shnum);
@@ -41,9 +39,6 @@ static inline bool FUNC(patch_vdso)(const char *path, void *vdso)
                                "%s: '%s' contains relocation sections\n",
                                program_name, path);
                        return false;
-               case SHT_DYNAMIC:
-                       dyn = vdso + FUNC(swap_uint)(shdr->sh_offset);
-                       break;
                }
 
                /* Check for existing sections. */
@@ -61,52 +56,6 @@ static inline bool FUNC(patch_vdso)(const char *path, void *vdso)
                }
        }
 
-       /*
-        * Ensure the GOT has no entries other than the standard 2, for the same
-        * reason we check that there's no relocation sections above.
-        * The standard two entries are:
-        * - Lazy resolver
-        * - Module pointer
-        */
-       if (dyn) {
-               local_gotno = symtabno = gotsym = 0;
-
-               while (FUNC(swap_uint)(dyn->d_tag) != DT_NULL) {
-                       switch (FUNC(swap_uint)(dyn->d_tag)) {
-                       /*
-                        * This member holds the number of local GOT entries.
-                        */
-                       case DT_MIPS_LOCAL_GOTNO:
-                               local_gotno = FUNC(swap_uint)(dyn->d_un.d_val);
-                               break;
-                       /*
-                        * This member holds the number of entries in the
-                        * .dynsym section.
-                        */
-                       case DT_MIPS_SYMTABNO:
-                               symtabno = FUNC(swap_uint)(dyn->d_un.d_val);
-                               break;
-                       /*
-                        * This member holds the index of the first dynamic
-                        * symbol table entry that corresponds to an entry in
-                        * the GOT.
-                        */
-                       case DT_MIPS_GOTSYM:
-                               gotsym = FUNC(swap_uint)(dyn->d_un.d_val);
-                               break;
-                       }
-
-                       dyn++;
-               }
-
-               if (local_gotno > 2 || symtabno - gotsym) {
-                       fprintf(stderr,
-                               "%s: '%s' contains unexpected GOT entries\n",
-                               program_name, path);
-                       return false;
-               }
-       }
-
        return true;
 }
 
index 39a0db3e2b346f4d9164eca8935a2e4d39663026..16e684b598751642e10007695c8fccdd7934e63c 100644 (file)
@@ -17,6 +17,7 @@
  *  along with this program; if not, write to the Free Software
  *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  */
+#include <linux/cpu.h>
 #include <linux/errno.h>
 #include <linux/init.h>
 #include <linux/ioport.h>
@@ -46,7 +47,7 @@ static void __iomem *pmu_base;
 #define pmu_read(offset)               readw(pmu_base + (offset))
 #define pmu_write(offset, value)       writew((value), pmu_base + (offset))
 
-static void vr41xx_cpu_wait(void)
+static void __cpuidle vr41xx_cpu_wait(void)
 {
        local_irq_disable();
        if (!need_resched())
index 6aed974276d8f2cf9f337e73b04cd8479e99bab8..34f7222c5efe0405af96d09f1c691405109ab810 100644 (file)
@@ -12,17 +12,17 @@ config NDS32
        select CLONE_BACKWARDS
        select COMMON_CLK
        select DMA_NONCOHERENT_OPS
-       select GENERIC_ASHLDI3
-       select GENERIC_ASHRDI3
-       select GENERIC_LSHRDI3
-       select GENERIC_CMPDI2
-       select GENERIC_MULDI3
-       select GENERIC_UCMPDI2
        select GENERIC_ATOMIC64
        select GENERIC_CPU_DEVICES
        select GENERIC_CLOCKEVENTS
        select GENERIC_IRQ_CHIP
        select GENERIC_IRQ_SHOW
+       select GENERIC_LIB_ASHLDI3
+       select GENERIC_LIB_ASHRDI3
+       select GENERIC_LIB_CMPDI2
+       select GENERIC_LIB_LSHRDI3
+       select GENERIC_LIB_MULDI3
+       select GENERIC_LIB_UCMPDI2
        select GENERIC_STRNCPY_FROM_USER
        select GENERIC_STRNLEN_USER
        select GENERIC_TIME_VSYSCALL
index 513bb2e9baf9fa84615a8d03ce9ec57fb7f55849..031c676821ff8797a879c6af56aeffd9ea4c4ed3 100644 (file)
@@ -34,10 +34,12 @@ ifdef CONFIG_CPU_LITTLE_ENDIAN
 KBUILD_CFLAGS   += $(call cc-option, -EL)
 KBUILD_AFLAGS   += $(call cc-option, -EL)
 LDFLAGS         += $(call cc-option, -EL)
+CHECKFLAGS      += -D__NDS32_EL__
 else
 KBUILD_CFLAGS   += $(call cc-option, -EB)
 KBUILD_AFLAGS   += $(call cc-option, -EB)
 LDFLAGS         += $(call cc-option, -EB)
+CHECKFLAGS      += -D__NDS32_EB__
 endif
 
 boot := arch/nds32/boot
index 10b48f0d8e857fe9ae3ec6d35bd4a6004c2b4aba..8b26198d51bb78b60a28748248e1e50f89be52ab 100644 (file)
@@ -8,6 +8,8 @@
 
 #define PG_dcache_dirty PG_arch_1
 
+void flush_icache_range(unsigned long start, unsigned long end);
+void flush_icache_page(struct vm_area_struct *vma, struct page *page);
 #ifdef CONFIG_CPU_CACHE_ALIASING
 void flush_cache_mm(struct mm_struct *mm);
 void flush_cache_dup_mm(struct mm_struct *mm);
@@ -34,13 +36,16 @@ void flush_anon_page(struct vm_area_struct *vma,
 void flush_kernel_dcache_page(struct page *page);
 void flush_kernel_vmap_range(void *addr, int size);
 void invalidate_kernel_vmap_range(void *addr, int size);
-void flush_icache_range(unsigned long start, unsigned long end);
-void flush_icache_page(struct vm_area_struct *vma, struct page *page);
 #define flush_dcache_mmap_lock(mapping)   xa_lock_irq(&(mapping)->i_pages)
 #define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&(mapping)->i_pages)
 
 #else
 #include <asm-generic/cacheflush.h>
+#undef flush_icache_range
+#undef flush_icache_page
+#undef flush_icache_user_range
+void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
+                            unsigned long addr, int len);
 #endif
 
 #endif /* __NDS32_CACHEFLUSH_H__ */
index eab5e84bd9919eaa7503eef129c3cd8072b9a61c..cb6cb91cfdf81622dc170286d83803e2d4e7ad73 100644 (file)
@@ -16,7 +16,7 @@
        "       .popsection\n"                                  \
        "       .pushsection .fixup,\"ax\"\n"                   \
        "4:     move    %0, " err_reg "\n"                      \
-       "       j       3b\n"                                   \
+       "       b       3b\n"                                   \
        "       .popsection"
 
 #define __futex_atomic_op(insn, ret, oldval, tmp, uaddr, oparg)        \
index 2f5b2ccebe47166a9863468960f145e9601a9bf4..63a1a5ef5219f47bcd9797e543298056294da22f 100644 (file)
@@ -278,7 +278,8 @@ static void __init setup_memory(void)
 
 void __init setup_arch(char **cmdline_p)
 {
-       early_init_devtree( __dtb_start);
+       early_init_devtree(__atags_pointer ? \
+               phys_to_virt(__atags_pointer) : __dtb_start);
 
        setup_cpuinfo();
 
index ce8fd34497bf045beafa845d2df1500b00281a4c..254703653b6f5db430af4f4ae01cca63aa67049a 100644 (file)
 
 extern struct cache_info L1_cache_info[2];
 
-#ifndef CONFIG_CPU_CACHE_ALIASING
+void flush_icache_range(unsigned long start, unsigned long end)
+{
+       unsigned long line_size, flags;
+       line_size = L1_cache_info[DCACHE].line_size;
+       start = start & ~(line_size - 1);
+       end = (end + line_size - 1) & ~(line_size - 1);
+       local_irq_save(flags);
+       cpu_cache_wbinval_range(start, end, 1);
+       local_irq_restore(flags);
+}
+EXPORT_SYMBOL(flush_icache_range);
+
+void flush_icache_page(struct vm_area_struct *vma, struct page *page)
+{
+       unsigned long flags;
+       unsigned long kaddr;
+       local_irq_save(flags);
+       kaddr = (unsigned long)kmap_atomic(page);
+       cpu_cache_wbinval_page(kaddr, vma->vm_flags & VM_EXEC);
+       kunmap_atomic((void *)kaddr);
+       local_irq_restore(flags);
+}
+EXPORT_SYMBOL(flush_icache_page);
+
+void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
+                            unsigned long addr, int len)
+{
+       unsigned long kaddr;
+       kaddr = (unsigned long)kmap_atomic(page) + (addr & ~PAGE_MASK);
+       flush_icache_range(kaddr, kaddr + len);
+       kunmap_atomic((void *)kaddr);
+}
+
 void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
                      pte_t * pte)
 {
@@ -35,19 +67,15 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
 
        if ((test_and_clear_bit(PG_dcache_dirty, &page->flags)) ||
            (vma->vm_flags & VM_EXEC)) {
-
-               if (!PageHighMem(page)) {
-                       cpu_cache_wbinval_page((unsigned long)
-                                              page_address(page),
-                                              vma->vm_flags & VM_EXEC);
-               } else {
-                       unsigned long kaddr = (unsigned long)kmap_atomic(page);
-                       cpu_cache_wbinval_page(kaddr, vma->vm_flags & VM_EXEC);
-                       kunmap_atomic((void *)kaddr);
-               }
+               unsigned long kaddr;
+               local_irq_save(flags);
+               kaddr = (unsigned long)kmap_atomic(page);
+               cpu_cache_wbinval_page(kaddr, vma->vm_flags & VM_EXEC);
+               kunmap_atomic((void *)kaddr);
+               local_irq_restore(flags);
        }
 }
-#else
+#ifdef CONFIG_CPU_CACHE_ALIASING
 extern pte_t va_present(struct mm_struct *mm, unsigned long addr);
 
 static inline unsigned long aliasing(unsigned long addr, unsigned long page)
@@ -317,52 +345,4 @@ void invalidate_kernel_vmap_range(void *addr, int size)
        local_irq_restore(flags);
 }
 EXPORT_SYMBOL(invalidate_kernel_vmap_range);
-
-void flush_icache_range(unsigned long start, unsigned long end)
-{
-       unsigned long line_size, flags;
-       line_size = L1_cache_info[DCACHE].line_size;
-       start = start & ~(line_size - 1);
-       end = (end + line_size - 1) & ~(line_size - 1);
-       local_irq_save(flags);
-       cpu_cache_wbinval_range(start, end, 1);
-       local_irq_restore(flags);
-}
-EXPORT_SYMBOL(flush_icache_range);
-
-void flush_icache_page(struct vm_area_struct *vma, struct page *page)
-{
-       unsigned long flags;
-       local_irq_save(flags);
-       cpu_cache_wbinval_page((unsigned long)page_address(page),
-                              vma->vm_flags & VM_EXEC);
-       local_irq_restore(flags);
-}
-
-void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
-                     pte_t * pte)
-{
-       struct page *page;
-       unsigned long flags;
-       unsigned long pfn = pte_pfn(*pte);
-
-       if (!pfn_valid(pfn))
-               return;
-
-       if (vma->vm_mm == current->active_mm) {
-               local_irq_save(flags);
-               __nds32__mtsr_dsb(addr, NDS32_SR_TLB_VPN);
-               __nds32__tlbop_rwr(*pte);
-               __nds32__isb();
-               local_irq_restore(flags);
-       }
-
-       page = pfn_to_page(pfn);
-       if (test_and_clear_bit(PG_dcache_dirty, &page->flags) ||
-           (vma->vm_flags & VM_EXEC)) {
-               local_irq_save(flags);
-               cpu_dcache_wbinval_page((unsigned long)page_address(page));
-               local_irq_restore(flags);
-       }
-}
 #endif
index 9ecad05bfc7343ade8e5164d8fb0194fb494671f..dfb6a79ba7ff7ffd99a23bb2e88b668e1c39d118 100644 (file)
@@ -27,7 +27,6 @@ config OPENRISC
        select GENERIC_STRNLEN_USER
        select GENERIC_SMP_IDLE_THREAD
        select MODULES_USE_ELF_RELA
-       select MULTI_IRQ_HANDLER
        select HAVE_DEBUG_STACKOVERFLOW
        select OR1K_PIC
        select CPU_NO_EFFICIENT_FFS if !OPENRISC_HAVE_INST_FF1
@@ -36,6 +35,7 @@ config OPENRISC
        select ARCH_USE_QUEUED_RWLOCKS
        select OMPIC if SMP
        select ARCH_WANT_FRAME_POINTERS
+       select GENERIC_IRQ_MULTI_HANDLER
 
 config CPU_BIG_ENDIAN
        def_bool y
@@ -69,9 +69,6 @@ config STACKTRACE_SUPPORT
 config LOCKDEP_SUPPORT
        def_bool  y
 
-config MULTI_IRQ_HANDLER
-       def_bool y
-
 source "init/Kconfig"
 
 source "kernel/Kconfig.freezer"
index 146e1660f00ebc59a43e3ce0b72be6dc62a4a58f..b589fac39b923e7db69ada75f4dfc8e9d9c4aa7b 100644 (file)
@@ -100,7 +100,7 @@ ATOMIC_OP(xor)
  *
  * This is often used through atomic_inc_not_zero()
  */
-static inline int __atomic_add_unless(atomic_t *v, int a, int u)
+static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
 {
        int old, tmp;
 
@@ -119,7 +119,7 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
 
        return old;
 }
-#define __atomic_add_unless    __atomic_add_unless
+#define atomic_fetch_add_unless        atomic_fetch_add_unless
 
 #include <asm-generic/atomic.h>
 
index d29f7db53906b77d19dd920ad98bb37e41b45e4a..f9cd43a39d72612decb2c7cfecbfabe38daeba12 100644 (file)
@@ -16,8 +16,9 @@
 #ifndef __ASM_OPENRISC_CMPXCHG_H
 #define __ASM_OPENRISC_CMPXCHG_H
 
+#include  <linux/bits.h>
+#include  <linux/compiler.h>
 #include  <linux/types.h>
-#include  <linux/bitops.h>
 
 #define __HAVE_ARCH_CMPXCHG 1
 
index d9eee0a2b7b4a866c760b5dbcb0a66a7d82c4d3f..eb612b1865d24dd6431063f3bb7d9bc422903040 100644 (file)
@@ -24,6 +24,4 @@
 
 #define NO_IRQ         (-1)
 
-extern void set_handle_irq(void (*handle_irq)(struct pt_regs *));
-
 #endif /* __ASM_OPENRISC_IRQ_H__ */
index 3e1a46615120a566adbfff65b5aab8e860bf3809..8999b922651210f6c20c83e7aa72b3bccf6c3d58 100644 (file)
@@ -98,8 +98,12 @@ static inline void pte_free(struct mm_struct *mm, struct page *pte)
        __free_page(pte);
 }
 
+#define __pte_free_tlb(tlb, pte, addr) \
+do {                                   \
+       pgtable_page_dtor(pte);         \
+       tlb_remove_page((tlb), (pte));  \
+} while (0)
 
-#define __pte_free_tlb(tlb, pte, addr) tlb_remove_page((tlb), (pte))
 #define pmd_pgtable(pmd) pmd_page(pmd)
 
 #define check_pgt_cache()          do { } while (0)
index 690d55272ba688a2adc88bca00e66cc61903c711..0c826ad6e994cce359474229acf08ff0d0330b78 100644 (file)
@@ -277,12 +277,6 @@ EXCEPTION_ENTRY(_data_page_fault_handler)
        l.addi  r3,r1,0                    // pt_regs
        /* r4 set be EXCEPTION_HANDLE */   // effective address of fault
 
-       /*
-        * __PHX__: TODO
-        *
-        * all this can be written much simpler. look at
-        * DTLB miss handler in the CONFIG_GUARD_PROTECTED_CORE part
-        */
 #ifdef CONFIG_OPENRISC_NO_SPR_SR_DSX
        l.lwz   r6,PT_PC(r3)               // address of an offending insn
        l.lwz   r6,0(r6)                   // instruction that caused pf
@@ -314,7 +308,7 @@ EXCEPTION_ENTRY(_data_page_fault_handler)
 
 #else
 
-       l.lwz   r6,PT_SR(r3)               // SR
+       l.mfspr r6,r0,SPR_SR               // SR
        l.andi  r6,r6,SPR_SR_DSX           // check for delay slot exception
        l.sfne  r6,r0                      // exception happened in delay slot
        l.bnf   7f
index fb02b2a1d6f2d875372b125cf837feb119d0164e..9fc6b60140f007bea1442f60727a22aee24776c9 100644 (file)
  *      r4  - EEAR     exception EA
  *      r10 - current  pointing to current_thread_info struct
  *      r12 - syscall  0, since we didn't come from syscall
- *      r13 - temp     it actually contains new SR, not needed anymore
- *      r31 - handler  address of the handler we'll jump to
+ *      r30 - handler  address of the handler we'll jump to
  *
  *      handler has to save remaining registers to the exception
  *      ksp frame *before* tainting them!
        /* r1 is KSP, r30 is __pa(KSP) */                       ;\
        tophys  (r30,r1)                                        ;\
        l.sw    PT_GPR12(r30),r12                               ;\
+       /* r4 use for tmp before EA */                          ;\
        l.mfspr r12,r0,SPR_EPCR_BASE                            ;\
        l.sw    PT_PC(r30),r12                                  ;\
        l.mfspr r12,r0,SPR_ESR_BASE                             ;\
        /* r12 == 1 if we come from syscall */                  ;\
        CLEAR_GPR(r12)                                          ;\
        /* ----- turn on MMU ----- */                           ;\
-       l.ori   r30,r0,(EXCEPTION_SR)                           ;\
+       /* Carry DSX into exception SR */                       ;\
+       l.mfspr r30,r0,SPR_SR                                   ;\
+       l.andi  r30,r30,SPR_SR_DSX                              ;\
+       l.ori   r30,r30,(EXCEPTION_SR)                          ;\
        l.mtspr r0,r30,SPR_ESR_BASE                             ;\
        /* r30: EA address of handler */                        ;\
        LOAD_SYMBOL_2_GPR(r30,handler)                          ;\
index 35e478a93116802991b5242c1c497097399a222e..5f9445effaf8d9d819dcca1060f459db93e3e1e0 100644 (file)
@@ -41,13 +41,6 @@ void __init init_IRQ(void)
        irqchip_init();
 }
 
-static void (*handle_arch_irq)(struct pt_regs *);
-
-void __init set_handle_irq(void (*handle_irq)(struct pt_regs *))
-{
-       handle_arch_irq = handle_irq;
-}
-
 void __irq_entry do_IRQ(struct pt_regs *regs)
 {
        handle_arch_irq(regs);
index fac246e6f37a278e4cd7c001c2cd53a8df88dc4e..d8981cbb852a5f1fc1ea80667df3ed451579d13c 100644 (file)
@@ -300,7 +300,7 @@ static inline int in_delay_slot(struct pt_regs *regs)
                return 0;
        }
 #else
-       return regs->sr & SPR_SR_DSX;
+       return mfspr(SPR_SR) & SPR_SR_DSX;
 #endif
 }
 
index c480770fabcd6287571dacb9d40ccc224f8e13b1..e21751fb24aa72dd4db64b86699094e5dd087f5f 100644 (file)
@@ -11,7 +11,6 @@ config PARISC
        select ARCH_HAS_ELF_RANDOMIZE
        select ARCH_HAS_STRICT_KERNEL_RWX
        select ARCH_HAS_UBSAN_SANITIZE_ALL
-       select ARCH_WANTS_UBSAN_NO_NULL
        select ARCH_SUPPORTS_MEMORY_FAILURE
        select RTC_CLASS
        select RTC_DRV_GENERIC
@@ -46,6 +45,7 @@ config PARISC
        select HAVE_ARCH_HASH
        select HAVE_ARCH_SECCOMP_FILTER
        select HAVE_ARCH_TRACEHOOK
+       select HAVE_REGS_AND_STACK_ACCESS_API
        select GENERIC_SCHED_CLOCK
        select HAVE_UNSTABLE_SCHED_CLOCK if SMP
        select GENERIC_CLOCKEVENTS
@@ -188,6 +188,10 @@ config PA20
 config PA11
        def_bool y
        depends on PA7000 || PA7100LC || PA7200 || PA7300LC
+       select ARCH_HAS_SYNC_DMA_FOR_CPU
+       select ARCH_HAS_SYNC_DMA_FOR_DEVICE
+       select DMA_NONCOHERENT_OPS
+       select DMA_NONCOHERENT_CACHE_SYNC
 
 config PREFETCH
        def_bool y
@@ -195,7 +199,7 @@ config PREFETCH
 
 config MLONGCALLS
        bool "Enable the -mlong-calls compiler option for big kernels"
-       def_bool y if (!MODULES)
+       default y
        depends on PA8X00
        help
          If you configure the kernel to include many drivers built-in instead
@@ -244,11 +248,11 @@ config PARISC_PAGE_SIZE_4KB
 
 config PARISC_PAGE_SIZE_16KB
        bool "16KB"
-       depends on PA8X00
+       depends on PA8X00 && BROKEN
 
 config PARISC_PAGE_SIZE_64KB
        bool "64KB"
-       depends on PA8X00
+       depends on PA8X00 && BROKEN
 
 endchoice
 
@@ -347,7 +351,7 @@ config NR_CPUS
        int "Maximum number of CPUs (2-32)"
        range 2 32
        depends on SMP
-       default "32"
+       default "4"
 
 endmenu
 
index 714284ea6cc214f1011c6e0593f5ad2b0c962ddc..5ce030266e7d03bbfd7da5885471b1a874eefcd7 100644 (file)
@@ -65,10 +65,6 @@ endif
 # kernel.
 cflags-y       += -mdisable-fpregs
 
-# Without this, "ld -r" results in .text sections that are too big
-# (> 0x40000) for branches to reach stubs.
-cflags-y       += -ffunction-sections
-
 # Use long jumps instead of long branches (needed if your linker fails to
 # link a too big vmlinux executable). Not enabled for building modules.
 ifdef CONFIG_MLONGCALLS
index 60e6f07b7e326bc4eb306105dcae477d5aa01f0e..e9c6385ef0d16235dd24ff256a9ea9181745c355 100644 (file)
@@ -36,6 +36,7 @@
 #define RP_OFFSET      16
 #define FRAME_SIZE     128
 #define CALLEE_REG_FRAME_SIZE  144
+#define REG_SZ         8
 #define ASM_ULONG_INSN .dword
 #else  /* CONFIG_64BIT */
 #define LDREG  ldw
@@ -50,6 +51,7 @@
 #define RP_OFFSET      20
 #define FRAME_SIZE     64
 #define CALLEE_REG_FRAME_SIZE  128
+#define REG_SZ         4
 #define ASM_ULONG_INSN .word
 #endif
 
index 88bae6676c9b6ef3823f6a8590882d43b0d83b22..118953d417634369047dc71bb2db3a400b7719fd 100644 (file)
@@ -77,30 +77,6 @@ static __inline__ int atomic_read(const atomic_t *v)
 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
 
-/**
- * __atomic_add_unless - add unless the number is a given value
- * @v: pointer of type atomic_t
- * @a: the amount to add to v...
- * @u: ...unless v is equal to u.
- *
- * Atomically adds @a to @v, so long as it was not @u.
- * Returns the old value of @v.
- */
-static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
-{
-       int c, old;
-       c = atomic_read(v);
-       for (;;) {
-               if (unlikely(c == (u)))
-                       break;
-               old = atomic_cmpxchg((v), c, c + (a));
-               if (likely(old == c))
-                       break;
-               c = old;
-       }
-       return c;
-}
-
 #define ATOMIC_OP(op, c_op)                                            \
 static __inline__ void atomic_##op(int i, atomic_t *v)                 \
 {                                                                      \
@@ -160,28 +136,6 @@ ATOMIC_OPS(xor, ^=)
 #undef ATOMIC_OP_RETURN
 #undef ATOMIC_OP
 
-#define atomic_inc(v)  (atomic_add(   1,(v)))
-#define atomic_dec(v)  (atomic_add(  -1,(v)))
-
-#define atomic_inc_return(v)   (atomic_add_return(   1,(v)))
-#define atomic_dec_return(v)   (atomic_add_return(  -1,(v)))
-
-#define atomic_add_negative(a, v)      (atomic_add_return((a), (v)) < 0)
-
-/*
- * atomic_inc_and_test - increment and test
- * @v: pointer of type atomic_t
- *
- * Atomically increments @v by 1
- * and returns true if the result is zero, or false for all
- * other cases.
- */
-#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
-
-#define atomic_dec_and_test(v) (atomic_dec_return(v) == 0)
-
-#define atomic_sub_and_test(i,v)       (atomic_sub_return((i),(v)) == 0)
-
 #define ATOMIC_INIT(i) { (i) }
 
 #ifdef CONFIG_64BIT
@@ -264,72 +218,11 @@ atomic64_read(const atomic64_t *v)
        return READ_ONCE((v)->counter);
 }
 
-#define atomic64_inc(v)                (atomic64_add(   1,(v)))
-#define atomic64_dec(v)                (atomic64_add(  -1,(v)))
-
-#define atomic64_inc_return(v)         (atomic64_add_return(   1,(v)))
-#define atomic64_dec_return(v)         (atomic64_add_return(  -1,(v)))
-
-#define atomic64_add_negative(a, v)    (atomic64_add_return((a), (v)) < 0)
-
-#define atomic64_inc_and_test(v)       (atomic64_inc_return(v) == 0)
-#define atomic64_dec_and_test(v)       (atomic64_dec_return(v) == 0)
-#define atomic64_sub_and_test(i,v)     (atomic64_sub_return((i),(v)) == 0)
-
 /* exported interface */
 #define atomic64_cmpxchg(v, o, n) \
        ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
 
-/**
- * atomic64_add_unless - add unless the number is a given value
- * @v: pointer of type atomic64_t
- * @a: the amount to add to v...
- * @u: ...unless v is equal to u.
- *
- * Atomically adds @a to @v, so long as it was not @u.
- * Returns the old value of @v.
- */
-static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
-{
-       long c, old;
-       c = atomic64_read(v);
-       for (;;) {
-               if (unlikely(c == (u)))
-                       break;
-               old = atomic64_cmpxchg((v), c, c + (a));
-               if (likely(old == c))
-                       break;
-               c = old;
-       }
-       return c != (u);
-}
-
-#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
-
-/*
- * atomic64_dec_if_positive - decrement by 1 if old value positive
- * @v: pointer of type atomic_t
- *
- * The function returns the old value of *v minus 1, even if
- * the atomic variable, v, was not decremented.
- */
-static inline long atomic64_dec_if_positive(atomic64_t *v)
-{
-       long c, old, dec;
-       c = atomic64_read(v);
-       for (;;) {
-               dec = c - 1;
-               if (unlikely(dec < 0))
-                       break;
-               old = atomic64_cmpxchg((v), c, dec);
-               if (likely(old == c))
-                       break;
-               c = old;
-       }
-       return dec;
-}
-
 #endif /* !CONFIG_64BIT */
 
 
diff --git a/arch/parisc/include/asm/barrier.h b/arch/parisc/include/asm/barrier.h
new file mode 100644 (file)
index 0000000..dbaaca8
--- /dev/null
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_BARRIER_H
+#define __ASM_BARRIER_H
+
+#ifndef __ASSEMBLY__
+
+/* The synchronize caches instruction executes as a nop on systems in
+   which all memory references are performed in order. */
+#define synchronize_caches() __asm__ __volatile__ ("sync" : : : "memory")
+
+#if defined(CONFIG_SMP)
+#define mb()           do { synchronize_caches(); } while (0)
+#define rmb()          mb()
+#define wmb()          mb()
+#define dma_rmb()      mb()
+#define dma_wmb()      mb()
+#else
+#define mb()           barrier()
+#define rmb()          barrier()
+#define wmb()          barrier()
+#define dma_rmb()      barrier()
+#define dma_wmb()      barrier()
+#endif
+
+#define __smp_mb()     mb()
+#define __smp_rmb()    mb()
+#define __smp_wmb()    mb()
+
+#include <asm-generic/barrier.h>
+
+#endif /* !__ASSEMBLY__ */
+#endif /* __ASM_BARRIER_H */
index 01e1fc057c83c1cace261b277727458c73f210d0..44a9f97194aadd46c49027630312ca66d9f476ef 100644 (file)
 ** flush/purge and allocate "regular" cacheable pages for everything.
 */
 
-#ifdef CONFIG_PA11
-extern const struct dma_map_ops pcxl_dma_ops;
-extern const struct dma_map_ops pcx_dma_ops;
-#endif
-
 extern const struct dma_map_ops *hppa_dma_ops;
 
 static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
index 9a69bf6fc4b6938260162b8ba7435301e5ccf2db..49f6f3d772cc643b7cced383bf17ec73a6b1fd6b 100644 (file)
@@ -18,9 +18,9 @@
 #ifdef __ASSEMBLY__
 
 #define ENTRY(name) \
-       .export name !\
-       ALIGN !\
-name:
+       ALIGN   !\
+name:          ASM_NL\
+       .export name
 
 #ifdef CONFIG_64BIT
 #define ENDPROC(name) \
@@ -31,13 +31,18 @@ name:
        END(name)
 #endif
 
-#define ENTRY_CFI(name) \
+#define ENTRY_CFI(name, ...) \
        ENTRY(name)     ASM_NL\
+       .proc           ASM_NL\
+       .callinfo __VA_ARGS__   ASM_NL\
+       .entry          ASM_NL\
        CFI_STARTPROC
 
 #define ENDPROC_CFI(name) \
-       ENDPROC(name)   ASM_NL\
-       CFI_ENDPROC
+       CFI_ENDPROC     ASM_NL\
+       .exit           ASM_NL\
+       .procend        ASM_NL\
+       ENDPROC(name)
 
 #endif /* __ASSEMBLY__ */
 
index 46da07670c2bea266084945fd9ff4f5a42b25c58..2a27b275ab092cc60b3d003250aaaf647aa9c916 100644 (file)
@@ -25,4 +25,15 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
        return regs->gr[20];
 }
 
+static inline void instruction_pointer_set(struct pt_regs *regs,
+                                               unsigned long val)
+{
+        regs->iaoq[0] = val;
+}
+
+/* Query offset/name of register from its name/offset */
+extern int regs_query_register_offset(const char *name);
+extern const char *regs_query_register_name(unsigned int offset);
+#define MAX_REG_OFFSET (offsetof(struct pt_regs, ipsw))
+
 #endif
index eeb5c88586631e8935b96e0edfe410bbbc2ecffc..715c96ba2ec81c2907ead07ffd21fbf79a0fb0cb 100644 (file)
@@ -21,14 +21,6 @@ typedef struct {
        unsigned long sig[_NSIG_WORDS];
 } sigset_t;
 
-#ifndef __KERNEL__
-struct sigaction {
-       __sighandler_t sa_handler;
-       unsigned long sa_flags;
-       sigset_t sa_mask;               /* mask last for extensibility */
-};
-#endif
-
 #include <asm/sigcontext.h>
 
 #endif /* !__ASSEMBLY */
index 6f84b6acc86ed1e291b70818b7715400e0f7b7e6..8a63515f03bfe3931930d094a479060815832fe6 100644 (file)
@@ -20,7 +20,6 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *x,
 {
        volatile unsigned int *a;
 
-       mb();
        a = __ldcw_align(x);
        while (__ldcw(a) == 0)
                while (*a == 0)
@@ -30,17 +29,16 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *x,
                                local_irq_disable();
                        } else
                                cpu_relax();
-       mb();
 }
 #define arch_spin_lock_flags arch_spin_lock_flags
 
 static inline void arch_spin_unlock(arch_spinlock_t *x)
 {
        volatile unsigned int *a;
-       mb();
+
        a = __ldcw_align(x);
-       *a = 1;
        mb();
+       *a = 1;
 }
 
 static inline int arch_spin_trylock(arch_spinlock_t *x)
@@ -48,10 +46,8 @@ static inline int arch_spin_trylock(arch_spinlock_t *x)
        volatile unsigned int *a;
        int ret;
 
-       mb();
        a = __ldcw_align(x);
         ret = __ldcw(a) != 0;
-       mb();
 
        return ret;
 }
index c73a3ee202267b281217014c549272212f387b78..f133b7efbebbf882ac0e26d3edf02c0924175930 100644 (file)
@@ -4,6 +4,9 @@
 
 #include <linux/list.h>
 
+/* Max number of levels to backtrace */
+#define MAX_UNWIND_ENTRIES     30
+
 /* From ABI specifications */
 struct unwind_table_entry {
        unsigned int region_start;
index fc0df353ff0da5f8bd6deb8d69f6aa55e631dc7a..87245c584784ec1f0f877fbe0be54ee136df5456 100644 (file)
 #define        ELOOP           249     /* Too many symbolic links encountered */
 #define        ENOSYS          251     /* Function not implemented */
 
-#define ENOTSUP                252     /* Function not implemented (POSIX.4 / HPUX) */
 #define ECANCELLED     253     /* aio request was canceled before complete (POSIX.4 / HPUX) */
 #define ECANCELED      ECANCELLED      /* SuSv3 and Solaris wants one 'L' */
 
index 4872e77aa96b784d5a1e19bd7f9c4996b8cd0992..dc77c5a51db774a7c691568c010ce0a4500e7286 100644 (file)
 #define __NR_preadv2           (__NR_Linux + 347)
 #define __NR_pwritev2          (__NR_Linux + 348)
 #define __NR_statx             (__NR_Linux + 349)
+#define __NR_io_pgetevents     (__NR_Linux + 350)
 
-#define __NR_Linux_syscalls    (__NR_statx + 1)
+#define __NR_Linux_syscalls    (__NR_io_pgetevents + 1)
 
 
 #define __IGNORE_select                /* newselect */
index e0e1c9775c320b46d85da0f2e6ce22bc2275b9fb..5eb979d04b905420e28f63dd526e6ca13aaa9842 100644 (file)
@@ -154,17 +154,14 @@ int register_parisc_driver(struct parisc_driver *driver)
 {
        /* FIXME: we need this because apparently the sti
         * driver can be registered twice */
-       if(driver->drv.name) {
-               printk(KERN_WARNING 
-                      "BUG: skipping previously registered driver %s\n",
-                      driver->name);
+       if (driver->drv.name) {
+               pr_warn("BUG: skipping previously registered driver %s\n",
+                       driver->name);
                return 1;
        }
 
        if (!driver->probe) {
-               printk(KERN_WARNING 
-                      "BUG: driver %s has no probe routine\n",
-                      driver->name);
+               pr_warn("BUG: driver %s has no probe routine\n", driver->name);
                return 1;
        }
 
@@ -491,12 +488,9 @@ alloc_pa_dev(unsigned long hpa, struct hardware_path *mod_path)
 
        dev = create_parisc_device(mod_path);
        if (dev->id.hw_type != HPHW_FAULTY) {
-               printk(KERN_ERR "Two devices have hardware path [%s].  "
-                               "IODC data for second device: "
-                               "%02x%02x%02x%02x%02x%02x\n"
-                               "Rearranging GSC cards sometimes helps\n",
-                       parisc_pathname(dev), iodc_data[0], iodc_data[1],
-                       iodc_data[3], iodc_data[4], iodc_data[5], iodc_data[6]);
+               pr_err("Two devices have hardware path [%s].  IODC data for second device: %7phN\n"
+                      "Rearranging GSC cards sometimes helps\n",
+                       parisc_pathname(dev), iodc_data);
                return NULL;
        }
 
@@ -528,8 +522,7 @@ alloc_pa_dev(unsigned long hpa, struct hardware_path *mod_path)
         * the keyboard controller
         */
        if ((hpa & 0xfff) == 0 && insert_resource(&iomem_resource, &dev->hpa))
-               printk("Unable to claim HPA %lx for device %s\n",
-                               hpa, name);
+               pr_warn("Unable to claim HPA %lx for device %s\n", hpa, name);
 
        return dev;
 }
@@ -875,7 +868,7 @@ static void print_parisc_device(struct parisc_device *dev)
        static int count;
 
        print_pa_hwpath(dev, hw_path);
-       printk(KERN_INFO "%d. %s at 0x%px [%s] { %d, 0x%x, 0x%.3x, 0x%.5x }",
+       pr_info("%d. %s at 0x%px [%s] { %d, 0x%x, 0x%.3x, 0x%.5x }",
                ++count, dev->name, (void*) dev->hpa.start, hw_path, dev->id.hw_type,
                dev->id.hversion_rev, dev->id.hversion, dev->id.sversion);
 
index e95207c0565eb12308e12d48c45bd5309ae6e2ae..c7508f5717fb511ee463fc9aac83270a00d71cc4 100644 (file)
        /* Release pa_tlb_lock lock without reloading lock address. */
        .macro          tlb_unlock0     spc,tmp
 #ifdef CONFIG_SMP
+       or,COND(=)      %r0,\spc,%r0
+       sync
        or,COND(=)      %r0,\spc,%r0
        stw             \spc,0(\tmp)
 #endif
@@ -764,7 +766,6 @@ END(fault_vector_11)
 #endif
        /* Fault vector is separately protected and *must* be on its own page */
        .align          PAGE_SIZE
-ENTRY(end_fault_vector)
 
        .import         handle_interruption,code
        .import         do_cpu_irq_mask,code
@@ -776,7 +777,6 @@ ENTRY(end_fault_vector)
         */
 
 ENTRY_CFI(ret_from_kernel_thread)
-
        /* Call schedule_tail first though */
        BL      schedule_tail, %r2
        nop
@@ -815,8 +815,9 @@ ENTRY_CFI(_switch_to)
        LDREG   TASK_THREAD_INFO(%r25), %r25
        bv      %r0(%r2)
        mtctl   %r25,%cr30
+ENDPROC_CFI(_switch_to)
 
-_switch_to_ret:
+ENTRY_CFI(_switch_to_ret)
        mtctl   %r0, %cr0               /* Needed for single stepping */
        callee_rest
        callee_rest_float
@@ -824,7 +825,7 @@ _switch_to_ret:
        LDREG   -RP_OFFSET(%r30), %r2
        bv      %r0(%r2)
        copy    %r26, %r28
-ENDPROC_CFI(_switch_to)
+ENDPROC_CFI(_switch_to_ret)
 
        /*
         * Common rfi return path for interruptions, kernel execve, and
@@ -885,12 +886,14 @@ ENTRY_CFI(syscall_exit_rfi)
        STREG   %r19,PT_SR5(%r16)
        STREG   %r19,PT_SR6(%r16)
        STREG   %r19,PT_SR7(%r16)
+ENDPROC_CFI(syscall_exit_rfi)
 
-intr_return:
+ENTRY_CFI(intr_return)
        /* check for reschedule */
        mfctl   %cr30,%r1
        LDREG   TI_FLAGS(%r1),%r19      /* sched.h: TIF_NEED_RESCHED */
        bb,<,n  %r19,31-TIF_NEED_RESCHED,intr_do_resched /* forward */
+ENDPROC_CFI(intr_return)
 
        .import do_notify_resume,code
 intr_check_sig:
@@ -1046,7 +1049,6 @@ intr_extint:
 
        b       do_cpu_irq_mask
        ldo     R%intr_return(%r2), %r2 /* return to intr_return, not here */
-ENDPROC_CFI(syscall_exit_rfi)
 
 
        /* Generic interruptions (illegal insn, unaligned, page fault, etc) */
@@ -1997,12 +1999,9 @@ ENDPROC_CFI(syscall_exit)
        .align L1_CACHE_BYTES
        .globl mcount
        .type  mcount, @function
-ENTRY(mcount)
+ENTRY_CFI(mcount, caller)
 _mcount:
        .export _mcount,data
-       .proc
-       .callinfo caller,frame=0
-       .entry
        /*
         * The 64bit mcount() function pointer needs 4 dwords, of which the
         * first two are free.  We optimize it here and put 2 instructions for
@@ -2024,18 +2023,13 @@ ftrace_stub:
        .dword mcount
        .dword 0 /* code in head.S puts value of global gp here */
 #endif
-       .exit
-       .procend
-ENDPROC(mcount)
+ENDPROC_CFI(mcount)
 
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
        .align 8
        .globl return_to_handler
        .type  return_to_handler, @function
-ENTRY_CFI(return_to_handler)
-       .proc
-       .callinfo caller,frame=FRAME_SIZE
-       .entry
+ENTRY_CFI(return_to_handler, caller,frame=FRAME_SIZE)
        .export parisc_return_to_handler,data
 parisc_return_to_handler:
        copy %r3,%r1
@@ -2074,8 +2068,6 @@ parisc_return_to_handler:
        bv      %r0(%rp)
 #endif
        LDREGM -FRAME_SIZE(%sp),%r3
-       .exit
-       .procend
 ENDPROC_CFI(return_to_handler)
 
 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
@@ -2085,31 +2077,30 @@ ENDPROC_CFI(return_to_handler)
 #ifdef CONFIG_IRQSTACKS
 /* void call_on_stack(unsigned long param1, void *func,
                      unsigned long new_stack) */
-ENTRY_CFI(call_on_stack)
+ENTRY_CFI(call_on_stack, FRAME=2*FRAME_SIZE,CALLS,SAVE_RP,SAVE_SP)
        copy    %sp, %r1
 
        /* Regarding the HPPA calling conventions for function pointers,
           we assume the PIC register is not changed across call.  For
           CONFIG_64BIT, the argument pointer is left to point at the
           argument region allocated for the call to call_on_stack. */
+
+       /* Switch to new stack.  We allocate two frames.  */
+       ldo     2*FRAME_SIZE(%arg2), %sp
 # ifdef CONFIG_64BIT
-       /* Switch to new stack.  We allocate two 128 byte frames.  */
-       ldo     256(%arg2), %sp
        /* Save previous stack pointer and return pointer in frame marker */
-       STREG   %rp, -144(%sp)
+       STREG   %rp, -FRAME_SIZE-RP_OFFSET(%sp)
        /* Calls always use function descriptor */
        LDREG   16(%arg1), %arg1
        bve,l   (%arg1), %rp
-       STREG   %r1, -136(%sp)
-       LDREG   -144(%sp), %rp
+       STREG   %r1, -FRAME_SIZE-REG_SZ(%sp)
+       LDREG   -FRAME_SIZE-RP_OFFSET(%sp), %rp
        bve     (%rp)
-       LDREG   -136(%sp), %sp
+       LDREG   -FRAME_SIZE-REG_SZ(%sp), %sp
 # else
-       /* Switch to new stack.  We allocate two 64 byte frames.  */
-       ldo     128(%arg2), %sp
        /* Save previous stack pointer and return pointer in frame marker */
-       STREG   %r1, -68(%sp)
-       STREG   %rp, -84(%sp)
+       STREG   %r1, -FRAME_SIZE-REG_SZ(%sp)
+       STREG   %rp, -FRAME_SIZE-RP_OFFSET(%sp)
        /* Calls use function descriptor if PLABEL bit is set */
        bb,>=,n %arg1, 30, 1f
        depwi   0,31,2, %arg1
@@ -2117,9 +2108,9 @@ ENTRY_CFI(call_on_stack)
 1:
        be,l    0(%sr4,%arg1), %sr0, %r31
        copy    %r31, %rp
-       LDREG   -84(%sp), %rp
+       LDREG   -FRAME_SIZE-RP_OFFSET(%sp), %rp
        bv      (%rp)
-       LDREG   -68(%sp), %sp
+       LDREG   -FRAME_SIZE-REG_SZ(%sp), %sp
 # endif /* CONFIG_64BIT */
 ENDPROC_CFI(call_on_stack)
 #endif /* CONFIG_IRQSTACKS */
index 22e6374ece4417e76fa91ed196973f6df2356f48..f33bf2d306d607fb915c8469471793d9c1ddf2b5 100644 (file)
        .align  16
 
 ENTRY_CFI(flush_tlb_all_local)
-       .proc
-       .callinfo NO_CALLS
-       .entry
-
        /*
         * The pitlbe and pdtlbe instructions should only be used to
         * flush the entire tlb. Also, there needs to be no intervening
@@ -189,18 +185,11 @@ fdtdone:
 
 2:      bv             %r0(%r2)
        nop
-
-       .exit
-       .procend
 ENDPROC_CFI(flush_tlb_all_local)
 
        .import cache_info,data
 
 ENTRY_CFI(flush_instruction_cache_local)
-       .proc
-       .callinfo NO_CALLS
-       .entry
-
        load32          cache_info, %r1
 
        /* Flush Instruction Cache */
@@ -256,18 +245,11 @@ fisync:
        mtsm            %r22                    /* restore I-bit */
        bv              %r0(%r2)
        nop
-       .exit
-
-       .procend
 ENDPROC_CFI(flush_instruction_cache_local)
 
 
        .import cache_info, data
 ENTRY_CFI(flush_data_cache_local)
-       .proc
-       .callinfo NO_CALLS
-       .entry
-
        load32          cache_info, %r1
 
        /* Flush Data Cache */
@@ -324,9 +306,6 @@ fdsync:
        mtsm            %r22                    /* restore I-bit */
        bv              %r0(%r2)
        nop
-       .exit
-
-       .procend
 ENDPROC_CFI(flush_data_cache_local)
 
 /* Macros to serialize TLB purge operations on SMP.  */
@@ -353,6 +332,7 @@ ENDPROC_CFI(flush_data_cache_local)
        .macro  tlb_unlock      la,flags,tmp
 #ifdef CONFIG_SMP
        ldi             1,\tmp
+       sync
        stw             \tmp,0(\la)
        mtsm            \flags
 #endif
@@ -361,10 +341,6 @@ ENDPROC_CFI(flush_data_cache_local)
 /* Clear page using kernel mapping.  */
 
 ENTRY_CFI(clear_page_asm)
-       .proc
-       .callinfo NO_CALLS
-       .entry
-
 #ifdef CONFIG_64BIT
 
        /* Unroll the loop.  */
@@ -423,18 +399,11 @@ ENTRY_CFI(clear_page_asm)
 #endif
        bv              %r0(%r2)
        nop
-       .exit
-
-       .procend
 ENDPROC_CFI(clear_page_asm)
 
 /* Copy page using kernel mapping.  */
 
 ENTRY_CFI(copy_page_asm)
-       .proc
-       .callinfo NO_CALLS
-       .entry
-
 #ifdef CONFIG_64BIT
        /* PA8x00 CPUs can consume 2 loads or 1 store per cycle.
         * Unroll the loop by hand and arrange insn appropriately.
@@ -541,9 +510,6 @@ ENTRY_CFI(copy_page_asm)
 #endif
        bv              %r0(%r2)
        nop
-       .exit
-
-       .procend
 ENDPROC_CFI(copy_page_asm)
 
 /*
@@ -597,10 +563,6 @@ ENDPROC_CFI(copy_page_asm)
         */
 
 ENTRY_CFI(copy_user_page_asm)
-       .proc
-       .callinfo NO_CALLS
-       .entry
-
        /* Convert virtual `to' and `from' addresses to physical addresses.
           Move `from' physical address to non shadowed register.  */
        ldil            L%(__PAGE_OFFSET), %r1
@@ -749,16 +711,9 @@ ENTRY_CFI(copy_user_page_asm)
 
        bv              %r0(%r2)
        nop
-       .exit
-
-       .procend
 ENDPROC_CFI(copy_user_page_asm)
 
 ENTRY_CFI(clear_user_page_asm)
-       .proc
-       .callinfo NO_CALLS
-       .entry
-
        tophys_r1       %r26
 
        ldil            L%(TMPALIAS_MAP_START), %r28
@@ -835,16 +790,9 @@ ENTRY_CFI(clear_user_page_asm)
 
        bv              %r0(%r2)
        nop
-       .exit
-
-       .procend
 ENDPROC_CFI(clear_user_page_asm)
 
 ENTRY_CFI(flush_dcache_page_asm)
-       .proc
-       .callinfo NO_CALLS
-       .entry
-
        ldil            L%(TMPALIAS_MAP_START), %r28
 #ifdef CONFIG_64BIT
 #if (TMPALIAS_MAP_START >= 0x80000000)
@@ -902,16 +850,9 @@ ENTRY_CFI(flush_dcache_page_asm)
        sync
        bv              %r0(%r2)
        nop
-       .exit
-
-       .procend
 ENDPROC_CFI(flush_dcache_page_asm)
 
 ENTRY_CFI(flush_icache_page_asm)
-       .proc
-       .callinfo NO_CALLS
-       .entry
-
        ldil            L%(TMPALIAS_MAP_START), %r28
 #ifdef CONFIG_64BIT
 #if (TMPALIAS_MAP_START >= 0x80000000)
@@ -976,16 +917,9 @@ ENTRY_CFI(flush_icache_page_asm)
        sync
        bv              %r0(%r2)
        nop
-       .exit
-
-       .procend
 ENDPROC_CFI(flush_icache_page_asm)
 
 ENTRY_CFI(flush_kernel_dcache_page_asm)
-       .proc
-       .callinfo NO_CALLS
-       .entry
-
        ldil            L%dcache_stride, %r1
        ldw             R%dcache_stride(%r1), %r23
 
@@ -1019,16 +953,9 @@ ENTRY_CFI(flush_kernel_dcache_page_asm)
        sync
        bv              %r0(%r2)
        nop
-       .exit
-
-       .procend
 ENDPROC_CFI(flush_kernel_dcache_page_asm)
 
 ENTRY_CFI(purge_kernel_dcache_page_asm)
-       .proc
-       .callinfo NO_CALLS
-       .entry
-
        ldil            L%dcache_stride, %r1
        ldw             R%dcache_stride(%r1), %r23
 
@@ -1061,16 +988,9 @@ ENTRY_CFI(purge_kernel_dcache_page_asm)
        sync
        bv              %r0(%r2)
        nop
-       .exit
-
-       .procend
 ENDPROC_CFI(purge_kernel_dcache_page_asm)
 
 ENTRY_CFI(flush_user_dcache_range_asm)
-       .proc
-       .callinfo NO_CALLS
-       .entry
-
        ldil            L%dcache_stride, %r1
        ldw             R%dcache_stride(%r1), %r23
        ldo             -1(%r23), %r21
@@ -1082,16 +1002,9 @@ ENTRY_CFI(flush_user_dcache_range_asm)
        sync
        bv              %r0(%r2)
        nop
-       .exit
-
-       .procend
 ENDPROC_CFI(flush_user_dcache_range_asm)
 
 ENTRY_CFI(flush_kernel_dcache_range_asm)
-       .proc
-       .callinfo NO_CALLS
-       .entry
-
        ldil            L%dcache_stride, %r1
        ldw             R%dcache_stride(%r1), %r23
        ldo             -1(%r23), %r21
@@ -1104,16 +1017,9 @@ ENTRY_CFI(flush_kernel_dcache_range_asm)
        syncdma
        bv              %r0(%r2)
        nop
-       .exit
-
-       .procend
 ENDPROC_CFI(flush_kernel_dcache_range_asm)
 
 ENTRY_CFI(purge_kernel_dcache_range_asm)
-       .proc
-       .callinfo NO_CALLS
-       .entry
-
        ldil            L%dcache_stride, %r1
        ldw             R%dcache_stride(%r1), %r23
        ldo             -1(%r23), %r21
@@ -1126,16 +1032,9 @@ ENTRY_CFI(purge_kernel_dcache_range_asm)
        syncdma
        bv              %r0(%r2)
        nop
-       .exit
-
-       .procend
 ENDPROC_CFI(purge_kernel_dcache_range_asm)
 
 ENTRY_CFI(flush_user_icache_range_asm)
-       .proc
-       .callinfo NO_CALLS
-       .entry
-
        ldil            L%icache_stride, %r1
        ldw             R%icache_stride(%r1), %r23
        ldo             -1(%r23), %r21
@@ -1147,16 +1046,9 @@ ENTRY_CFI(flush_user_icache_range_asm)
        sync
        bv              %r0(%r2)
        nop
-       .exit
-
-       .procend
 ENDPROC_CFI(flush_user_icache_range_asm)
 
 ENTRY_CFI(flush_kernel_icache_page)
-       .proc
-       .callinfo NO_CALLS
-       .entry
-
        ldil            L%icache_stride, %r1
        ldw             R%icache_stride(%r1), %r23
 
@@ -1190,16 +1082,9 @@ ENTRY_CFI(flush_kernel_icache_page)
        sync
        bv              %r0(%r2)
        nop
-       .exit
-
-       .procend
 ENDPROC_CFI(flush_kernel_icache_page)
 
 ENTRY_CFI(flush_kernel_icache_range_asm)
-       .proc
-       .callinfo NO_CALLS
-       .entry
-
        ldil            L%icache_stride, %r1
        ldw             R%icache_stride(%r1), %r23
        ldo             -1(%r23), %r21
@@ -1211,8 +1096,6 @@ ENTRY_CFI(flush_kernel_icache_range_asm)
        sync
        bv              %r0(%r2)
        nop
-       .exit
-       .procend
 ENDPROC_CFI(flush_kernel_icache_range_asm)
 
        __INIT
@@ -1222,10 +1105,6 @@ ENDPROC_CFI(flush_kernel_icache_range_asm)
         */
        .align  256
 ENTRY_CFI(disable_sr_hashing_asm)
-       .proc
-       .callinfo NO_CALLS
-       .entry
-
        /*
         * Switch to real mode
         */
@@ -1307,9 +1186,6 @@ srdis_done:
 
 2:      bv             %r0(%r2)
        nop
-       .exit
-
-       .procend
 ENDPROC_CFI(disable_sr_hashing_asm)
 
        .end
index 6df07ce4f3c2d7be7009a1884aafad7933b920a6..04c48f1ef3fbddcbc63ba8d52cbbe9683441cc5d 100644 (file)
 #include <linux/init.h>
 #include <linux/gfp.h>
 #include <linux/mm.h>
-#include <linux/pci.h>
 #include <linux/proc_fs.h>
 #include <linux/seq_file.h>
 #include <linux/string.h>
 #include <linux/types.h>
-#include <linux/scatterlist.h>
-#include <linux/export.h>
+#include <linux/dma-direct.h>
+#include <linux/dma-noncoherent.h>
 
 #include <asm/cacheflush.h>
 #include <asm/dma.h>    /* for DMA_CHUNK_SIZE */
@@ -395,7 +394,7 @@ pcxl_dma_init(void)
 
 __initcall(pcxl_dma_init);
 
-static void *pa11_dma_alloc(struct device *dev, size_t size,
+static void *pcxl_dma_alloc(struct device *dev, size_t size,
                dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs)
 {
        unsigned long vaddr;
@@ -422,190 +421,60 @@ static void *pa11_dma_alloc(struct device *dev, size_t size,
        return (void *)vaddr;
 }
 
-static void pa11_dma_free(struct device *dev, size_t size, void *vaddr,
-               dma_addr_t dma_handle, unsigned long attrs)
+static void *pcx_dma_alloc(struct device *dev, size_t size,
+               dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs)
 {
-       int order;
-
-       order = get_order(size);
-       size = 1 << (order + PAGE_SHIFT);
-       unmap_uncached_pages((unsigned long)vaddr, size);
-       pcxl_free_range((unsigned long)vaddr, size);
-       free_pages((unsigned long)__va(dma_handle), order);
-}
+       void *addr;
 
-static dma_addr_t pa11_dma_map_page(struct device *dev, struct page *page,
-               unsigned long offset, size_t size,
-               enum dma_data_direction direction, unsigned long attrs)
-{
-       void *addr = page_address(page) + offset;
-       BUG_ON(direction == DMA_NONE);
+       if ((attrs & DMA_ATTR_NON_CONSISTENT) == 0)
+               return NULL;
 
-       if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
-               flush_kernel_dcache_range((unsigned long) addr, size);
+       addr = (void *)__get_free_pages(flag, get_order(size));
+       if (addr)
+               *dma_handle = (dma_addr_t)virt_to_phys(addr);
 
-       return virt_to_phys(addr);
+       return addr;
 }
 
-static void pa11_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
-               size_t size, enum dma_data_direction direction,
-               unsigned long attrs)
+void *arch_dma_alloc(struct device *dev, size_t size,
+               dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
 {
-       BUG_ON(direction == DMA_NONE);
-
-       if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
-               return;
 
-       if (direction == DMA_TO_DEVICE)
-               return;
-
-       /*
-        * For PCI_DMA_FROMDEVICE this flush is not necessary for the
-        * simple map/unmap case. However, it IS necessary if if
-        * pci_dma_sync_single_* has been called and the buffer reused.
-        */
-
-       flush_kernel_dcache_range((unsigned long) phys_to_virt(dma_handle), size);
+       if (boot_cpu_data.cpu_type == pcxl2 || boot_cpu_data.cpu_type == pcxl)
+               return pcxl_dma_alloc(dev, size, dma_handle, gfp, attrs);
+       else
+               return pcx_dma_alloc(dev, size, dma_handle, gfp, attrs);
 }
 
-static int pa11_dma_map_sg(struct device *dev, struct scatterlist *sglist,
-               int nents, enum dma_data_direction direction,
-               unsigned long attrs)
+void arch_dma_free(struct device *dev, size_t size, void *vaddr,
+               dma_addr_t dma_handle, unsigned long attrs)
 {
-       int i;
-       struct scatterlist *sg;
-
-       BUG_ON(direction == DMA_NONE);
+       int order = get_order(size);
 
-       for_each_sg(sglist, sg, nents, i) {
-               unsigned long vaddr = (unsigned long)sg_virt(sg);
+       if (boot_cpu_data.cpu_type == pcxl2 || boot_cpu_data.cpu_type == pcxl) {
+               size = 1 << (order + PAGE_SHIFT);
+               unmap_uncached_pages((unsigned long)vaddr, size);
+               pcxl_free_range((unsigned long)vaddr, size);
 
-               sg_dma_address(sg) = (dma_addr_t) virt_to_phys(vaddr);
-               sg_dma_len(sg) = sg->length;
-
-               if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
-                       continue;
-
-               flush_kernel_dcache_range(vaddr, sg->length);
+               vaddr = __va(dma_handle);
        }
-       return nents;
-}
-
-static void pa11_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
-               int nents, enum dma_data_direction direction,
-               unsigned long attrs)
-{
-       int i;
-       struct scatterlist *sg;
-
-       BUG_ON(direction == DMA_NONE);
-
-       if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
-               return;
-
-       if (direction == DMA_TO_DEVICE)
-               return;
-
-       /* once we do combining we'll need to use phys_to_virt(sg_dma_address(sglist)) */
-
-       for_each_sg(sglist, sg, nents, i)
-               flush_kernel_vmap_range(sg_virt(sg), sg->length);
-}
-
-static void pa11_dma_sync_single_for_cpu(struct device *dev,
-               dma_addr_t dma_handle, size_t size,
-               enum dma_data_direction direction)
-{
-       BUG_ON(direction == DMA_NONE);
-
-       flush_kernel_dcache_range((unsigned long) phys_to_virt(dma_handle),
-                       size);
-}
-
-static void pa11_dma_sync_single_for_device(struct device *dev,
-               dma_addr_t dma_handle, size_t size,
-               enum dma_data_direction direction)
-{
-       BUG_ON(direction == DMA_NONE);
-
-       flush_kernel_dcache_range((unsigned long) phys_to_virt(dma_handle),
-                       size);
+       free_pages((unsigned long)vaddr, get_order(size));
 }
 
-static void pa11_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction)
+void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
+               size_t size, enum dma_data_direction dir)
 {
-       int i;
-       struct scatterlist *sg;
-
-       /* once we do combining we'll need to use phys_to_virt(sg_dma_address(sglist)) */
-
-       for_each_sg(sglist, sg, nents, i)
-               flush_kernel_vmap_range(sg_virt(sg), sg->length);
+       flush_kernel_dcache_range((unsigned long)phys_to_virt(paddr), size);
 }
 
-static void pa11_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction)
+void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
+               size_t size, enum dma_data_direction dir)
 {
-       int i;
-       struct scatterlist *sg;
-
-       /* once we do combining we'll need to use phys_to_virt(sg_dma_address(sglist)) */
-
-       for_each_sg(sglist, sg, nents, i)
-               flush_kernel_vmap_range(sg_virt(sg), sg->length);
+       flush_kernel_dcache_range((unsigned long)phys_to_virt(paddr), size);
 }
 
-static void pa11_dma_cache_sync(struct device *dev, void *vaddr, size_t size,
+void arch_dma_cache_sync(struct device *dev, void *vaddr, size_t size,
               enum dma_data_direction direction)
 {
        flush_kernel_dcache_range((unsigned long)vaddr, size);
 }
-
-const struct dma_map_ops pcxl_dma_ops = {
-       .alloc =                pa11_dma_alloc,
-       .free =                 pa11_dma_free,
-       .map_page =             pa11_dma_map_page,
-       .unmap_page =           pa11_dma_unmap_page,
-       .map_sg =               pa11_dma_map_sg,
-       .unmap_sg =             pa11_dma_unmap_sg,
-       .sync_single_for_cpu =  pa11_dma_sync_single_for_cpu,
-       .sync_single_for_device = pa11_dma_sync_single_for_device,
-       .sync_sg_for_cpu =      pa11_dma_sync_sg_for_cpu,
-       .sync_sg_for_device =   pa11_dma_sync_sg_for_device,
-       .cache_sync =           pa11_dma_cache_sync,
-};
-
-static void *pcx_dma_alloc(struct device *dev, size_t size,
-               dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs)
-{
-       void *addr;
-
-       if ((attrs & DMA_ATTR_NON_CONSISTENT) == 0)
-               return NULL;
-
-       addr = (void *)__get_free_pages(flag, get_order(size));
-       if (addr)
-               *dma_handle = (dma_addr_t)virt_to_phys(addr);
-
-       return addr;
-}
-
-static void pcx_dma_free(struct device *dev, size_t size, void *vaddr,
-               dma_addr_t iova, unsigned long attrs)
-{
-       free_pages((unsigned long)vaddr, get_order(size));
-       return;
-}
-
-const struct dma_map_ops pcx_dma_ops = {
-       .alloc =                pcx_dma_alloc,
-       .free =                 pcx_dma_free,
-       .map_page =             pa11_dma_map_page,
-       .unmap_page =           pa11_dma_unmap_page,
-       .map_sg =               pa11_dma_map_sg,
-       .unmap_sg =             pa11_dma_unmap_sg,
-       .sync_single_for_cpu =  pa11_dma_sync_single_for_cpu,
-       .sync_single_for_device = pa11_dma_sync_single_for_device,
-       .sync_sg_for_cpu =      pa11_dma_sync_sg_for_cpu,
-       .sync_sg_for_device =   pa11_dma_sync_sg_for_device,
-       .cache_sync =           pa11_dma_cache_sync,
-};
index b931745815e0f4e752ccd2c0b0342c5b49e69773..eb39e7e380d7e27b24f6bae39ae0e6c3583511e3 100644 (file)
@@ -302,7 +302,7 @@ get_wchan(struct task_struct *p)
                ip = info.ip;
                if (!in_sched_functions(ip))
                        return ip;
-       } while (count++ < 16);
+       } while (count++ < MAX_UNWIND_ENTRIES);
        return 0;
 }
 
index 7aa1d4d0d4442a50792e2d3f467af106a8febca4..2582df1c529bbcbb00262bba3f4e9534439766df 100644 (file)
@@ -676,3 +676,103 @@ const struct user_regset_view *task_user_regset_view(struct task_struct *task)
 #endif
        return &user_parisc_native_view;
 }
+
+
+/* HAVE_REGS_AND_STACK_ACCESS_API feature */
+
+struct pt_regs_offset {
+       const char *name;
+       int offset;
+};
+
+#define REG_OFFSET_NAME(r)    {.name = #r, .offset = offsetof(struct pt_regs, r)}
+#define REG_OFFSET_INDEX(r,i) {.name = #r#i, .offset = offsetof(struct pt_regs, r[i])}
+#define REG_OFFSET_END {.name = NULL, .offset = 0}
+
+static const struct pt_regs_offset regoffset_table[] = {
+       REG_OFFSET_INDEX(gr,0),
+       REG_OFFSET_INDEX(gr,1),
+       REG_OFFSET_INDEX(gr,2),
+       REG_OFFSET_INDEX(gr,3),
+       REG_OFFSET_INDEX(gr,4),
+       REG_OFFSET_INDEX(gr,5),
+       REG_OFFSET_INDEX(gr,6),
+       REG_OFFSET_INDEX(gr,7),
+       REG_OFFSET_INDEX(gr,8),
+       REG_OFFSET_INDEX(gr,9),
+       REG_OFFSET_INDEX(gr,10),
+       REG_OFFSET_INDEX(gr,11),
+       REG_OFFSET_INDEX(gr,12),
+       REG_OFFSET_INDEX(gr,13),
+       REG_OFFSET_INDEX(gr,14),
+       REG_OFFSET_INDEX(gr,15),
+       REG_OFFSET_INDEX(gr,16),
+       REG_OFFSET_INDEX(gr,17),
+       REG_OFFSET_INDEX(gr,18),
+       REG_OFFSET_INDEX(gr,19),
+       REG_OFFSET_INDEX(gr,20),
+       REG_OFFSET_INDEX(gr,21),
+       REG_OFFSET_INDEX(gr,22),
+       REG_OFFSET_INDEX(gr,23),
+       REG_OFFSET_INDEX(gr,24),
+       REG_OFFSET_INDEX(gr,25),
+       REG_OFFSET_INDEX(gr,26),
+       REG_OFFSET_INDEX(gr,27),
+       REG_OFFSET_INDEX(gr,28),
+       REG_OFFSET_INDEX(gr,29),
+       REG_OFFSET_INDEX(gr,30),
+       REG_OFFSET_INDEX(gr,31),
+       REG_OFFSET_INDEX(sr,0),
+       REG_OFFSET_INDEX(sr,1),
+       REG_OFFSET_INDEX(sr,2),
+       REG_OFFSET_INDEX(sr,3),
+       REG_OFFSET_INDEX(sr,4),
+       REG_OFFSET_INDEX(sr,5),
+       REG_OFFSET_INDEX(sr,6),
+       REG_OFFSET_INDEX(sr,7),
+       REG_OFFSET_INDEX(iasq,0),
+       REG_OFFSET_INDEX(iasq,1),
+       REG_OFFSET_INDEX(iaoq,0),
+       REG_OFFSET_INDEX(iaoq,1),
+       REG_OFFSET_NAME(cr27),
+       REG_OFFSET_NAME(ksp),
+       REG_OFFSET_NAME(kpc),
+       REG_OFFSET_NAME(sar),
+       REG_OFFSET_NAME(iir),
+       REG_OFFSET_NAME(isr),
+       REG_OFFSET_NAME(ior),
+       REG_OFFSET_NAME(ipsw),
+       REG_OFFSET_END,
+};
+
+/**
+ * regs_query_register_offset() - query register offset from its name
+ * @name:      the name of a register
+ *
+ * regs_query_register_offset() returns the offset of a register in struct
+ * pt_regs from its name. If the name is invalid, this returns -EINVAL;
+ */
+int regs_query_register_offset(const char *name)
+{
+       const struct pt_regs_offset *roff;
+       for (roff = regoffset_table; roff->name != NULL; roff++)
+               if (!strcmp(roff->name, name))
+                       return roff->offset;
+       return -EINVAL;
+}
+
+/**
+ * regs_query_register_name() - query register name from its offset
+ * @offset:    the offset of a register in struct pt_regs.
+ *
+ * regs_query_register_name() returns the name of a register from its
+ * offset in struct pt_regs. If the @offset is invalid, this returns NULL;
+ */
+const char *regs_query_register_name(unsigned int offset)
+{
+       const struct pt_regs_offset *roff;
+       for (roff = regoffset_table; roff->name != NULL; roff++)
+               if (roff->offset == offset)
+                       return roff->name;
+       return NULL;
+}
index cc9963421a19377f3faea08bf2686611a0856816..2b16d8d6598f1d6015795a4455a24a4c039d0108 100644 (file)
@@ -35,12 +35,6 @@ real32_stack:
 real64_stack:
        .block  8192
 
-#ifdef CONFIG_64BIT
-#  define REG_SZ 8
-#else
-#  define REG_SZ 4
-#endif
-
 #define N_SAVED_REGS 9
 
 save_cr_space:
index 8d3a7b80ac4286c209563051d4e6cf09bebad08d..4e87c35c22b7215722aebbf6a980e2706a15c44a 100644 (file)
@@ -97,14 +97,12 @@ void __init dma_ops_init(void)
                panic(  "PA-RISC Linux currently only supports machines that conform to\n"
                        "the PA-RISC 1.1 or 2.0 architecture specification.\n");
 
-       case pcxs:
-       case pcxt:
-               hppa_dma_ops = &pcx_dma_ops;
-               break;
        case pcxl2:
                pa7300lc_init();
        case pcxl: /* falls through */
-               hppa_dma_ops = &pcxl_dma_ops;
+       case pcxs:
+       case pcxt:
+               hppa_dma_ops = &dma_noncoherent_ops;
                break;
        default:
                break;
index e775f80ae28c5ab8d7fac7456235fa4f415c7215..5f7e57fcaeef0333da7482f8af566a0e4da7fc00 100644 (file)
@@ -629,11 +629,12 @@ cas_action:
        stw     %r1, 4(%sr2,%r20)
 #endif
        /* The load and store could fail */
-1:     ldw,ma  0(%r26), %r28
+1:     ldw     0(%r26), %r28
        sub,<>  %r28, %r25, %r0
-2:     stw,ma  %r24, 0(%r26)
+2:     stw     %r24, 0(%r26)
        /* Free lock */
-       stw,ma  %r20, 0(%sr2,%r20)
+       sync
+       stw     %r20, 0(%sr2,%r20)
 #if ENABLE_LWS_DEBUG
        /* Clear thread register indicator */
        stw     %r0, 4(%sr2,%r20)
@@ -647,6 +648,7 @@ cas_action:
 3:             
        /* Error occurred on load or store */
        /* Free lock */
+       sync
        stw     %r20, 0(%sr2,%r20)
 #if ENABLE_LWS_DEBUG
        stw     %r0, 4(%sr2,%r20)
@@ -796,30 +798,30 @@ cas2_action:
        ldo     1(%r0),%r28
 
        /* 8bit CAS */
-13:    ldb,ma  0(%r26), %r29
+13:    ldb     0(%r26), %r29
        sub,=   %r29, %r25, %r0
        b,n     cas2_end
-14:    stb,ma  %r24, 0(%r26)
+14:    stb     %r24, 0(%r26)
        b       cas2_end
        copy    %r0, %r28
        nop
        nop
 
        /* 16bit CAS */
-15:    ldh,ma  0(%r26), %r29
+15:    ldh     0(%r26), %r29
        sub,=   %r29, %r25, %r0
        b,n     cas2_end
-16:    sth,ma  %r24, 0(%r26)
+16:    sth     %r24, 0(%r26)
        b       cas2_end
        copy    %r0, %r28
        nop
        nop
 
        /* 32bit CAS */
-17:    ldw,ma  0(%r26), %r29
+17:    ldw     0(%r26), %r29
        sub,=   %r29, %r25, %r0
        b,n     cas2_end
-18:    stw,ma  %r24, 0(%r26)
+18:    stw     %r24, 0(%r26)
        b       cas2_end
        copy    %r0, %r28
        nop
@@ -827,10 +829,10 @@ cas2_action:
 
        /* 64bit CAS */
 #ifdef CONFIG_64BIT
-19:    ldd,ma  0(%r26), %r29
+19:    ldd     0(%r26), %r29
        sub,*=  %r29, %r25, %r0
        b,n     cas2_end
-20:    std,ma  %r24, 0(%r26)
+20:    std     %r24, 0(%r26)
        copy    %r0, %r28
 #else
        /* Compare first word */
@@ -848,7 +850,8 @@ cas2_action:
 
 cas2_end:
        /* Free lock */
-       stw,ma  %r20, 0(%sr2,%r20)
+       sync
+       stw     %r20, 0(%sr2,%r20)
        /* Enable interrupts */
        ssm     PSW_SM_I, %r0
        /* Return to userspace, set no error */
@@ -858,6 +861,7 @@ cas2_end:
 22:
        /* Error occurred on load or store */
        /* Free lock */
+       sync
        stw     %r20, 0(%sr2,%r20)
        ssm     PSW_SM_I, %r0
        ldo     1(%r0),%r28
index 6308749359e4b7d6ee348062d584f7b747f1a115..fe3f2a49d2b1063a93daa0a9d4077d2978c5bdaf 100644 (file)
        ENTRY_COMP(preadv2)
        ENTRY_COMP(pwritev2)
        ENTRY_SAME(statx)
+       ENTRY_COMP(io_pgetevents)       /* 350 */
 
 
 .ifne (. - 90b) - (__NR_Linux_syscalls * (91b - 90b))
index 4309ad31a8743d5b39bfae82095e8a08b7cd0532..318815212518962bec3bf8391abd791ad30f6879 100644 (file)
@@ -172,7 +172,7 @@ static void do_show_stack(struct unwind_frame_info *info)
        int i = 1;
 
        printk(KERN_CRIT "Backtrace:\n");
-       while (i <= 16) {
+       while (i <= MAX_UNWIND_ENTRIES) {
                if (unwind_once(info) < 0 || info->ip == 0)
                        break;
 
index 143f90e2f9f3c631616d4af52f0fe3fa08f44af9..5cdf13069dd99d41e3f4d2061e4d82474296e80f 100644 (file)
@@ -13,7 +13,6 @@
 #include <linux/init.h>
 #include <linux/sched.h>
 #include <linux/slab.h>
-#include <linux/kallsyms.h>
 #include <linux/sort.h>
 
 #include <linux/uaccess.h>
@@ -25,7 +24,7 @@
 
 /* #define DEBUG 1 */
 #ifdef DEBUG
-#define dbg(x...) printk(x)
+#define dbg(x...) pr_debug(x)
 #else
 #define dbg(x...)
 #endif
@@ -117,7 +116,8 @@ unwind_table_init(struct unwind_table *table, const char *name,
        for (; start <= end; start++) {
                if (start < end && 
                    start->region_end > (start+1)->region_start) {
-                       printk("WARNING: Out of order unwind entry! %p and %p\n", start, start+1);
+                       pr_warn("Out of order unwind entry! %px and %px\n",
+                               start, start+1);
                }
 
                start->region_start += base_addr;
@@ -182,7 +182,7 @@ int __init unwind_init(void)
        start = (long)&__start___unwind[0];
        stop = (long)&__stop___unwind[0];
 
-       printk("unwind_init: start = 0x%lx, end = 0x%lx, entries = %lu\n", 
+       dbg("unwind_init: start = 0x%lx, end = 0x%lx, entries = %lu\n",
            start, stop,
            (stop - start) / sizeof(struct unwind_table_entry));
 
@@ -203,25 +203,60 @@ int __init unwind_init(void)
        return 0;
 }
 
-#ifdef CONFIG_64BIT
-#define get_func_addr(fptr) fptr[2]
-#else
-#define get_func_addr(fptr) fptr[0]
-#endif
-
 static int unwind_special(struct unwind_frame_info *info, unsigned long pc, int frame_size)
 {
-       extern void handle_interruption(int, struct pt_regs *);
-       static unsigned long *hi = (unsigned long *)&handle_interruption;
-
-       if (pc == get_func_addr(hi)) {
+       /*
+        * We have to use void * instead of a function pointer, because
+        * function pointers aren't a pointer to the function on 64-bit.
+        * Make them const so the compiler knows they live in .text
+        */
+       extern void * const handle_interruption;
+       extern void * const ret_from_kernel_thread;
+       extern void * const syscall_exit;
+       extern void * const intr_return;
+       extern void * const _switch_to_ret;
+#ifdef CONFIG_IRQSTACKS
+       extern void * const call_on_stack;
+#endif /* CONFIG_IRQSTACKS */
+
+       if (pc == (unsigned long) &handle_interruption) {
                struct pt_regs *regs = (struct pt_regs *)(info->sp - frame_size - PT_SZ_ALGN);
                dbg("Unwinding through handle_interruption()\n");
                info->prev_sp = regs->gr[30];
                info->prev_ip = regs->iaoq[0];
+               return 1;
+       }
+
+       if (pc == (unsigned long) &ret_from_kernel_thread ||
+           pc == (unsigned long) &syscall_exit) {
+               info->prev_sp = info->prev_ip = 0;
+               return 1;
+       }
+
+       if (pc == (unsigned long) &intr_return) {
+               struct pt_regs *regs;
+
+               dbg("Found intr_return()\n");
+               regs = (struct pt_regs *)(info->sp - PT_SZ_ALGN);
+               info->prev_sp = regs->gr[30];
+               info->prev_ip = regs->iaoq[0];
+               info->rp = regs->gr[2];
+               return 1;
+       }
+
+       if (pc == (unsigned long) &_switch_to_ret) {
+               info->prev_sp = info->sp - CALLEE_SAVE_FRAME_SIZE;
+               info->prev_ip = *(unsigned long *)(info->prev_sp - RP_OFFSET);
+               return 1;
+       }
 
+#ifdef CONFIG_IRQSTACKS
+       if (pc == (unsigned long) &call_on_stack) {
+               info->prev_sp = *(unsigned long *)(info->sp - FRAME_SIZE - REG_SZ);
+               info->prev_ip = *(unsigned long *)(info->sp - FRAME_SIZE - RP_OFFSET);
                return 1;
        }
+#endif
 
        return 0;
 }
@@ -238,34 +273,8 @@ static void unwind_frame_regs(struct unwind_frame_info *info)
        if (e == NULL) {
                unsigned long sp;
 
-               dbg("Cannot find unwind entry for 0x%lx; forced unwinding\n", info->ip);
-
-#ifdef CONFIG_KALLSYMS
-               /* Handle some frequent special cases.... */
-               {
-                       char symname[KSYM_NAME_LEN];
-                       char *modname;
-
-                       kallsyms_lookup(info->ip, NULL, NULL, &modname,
-                               symname);
-
-                       dbg("info->ip = 0x%lx, name = %s\n", info->ip, symname);
-
-                       if (strcmp(symname, "_switch_to_ret") == 0) {
-                               info->prev_sp = info->sp - CALLEE_SAVE_FRAME_SIZE;
-                               info->prev_ip = *(unsigned long *)(info->prev_sp - RP_OFFSET);
-                               dbg("_switch_to_ret @ %lx - setting "
-                                   "prev_sp=%lx prev_ip=%lx\n", 
-                                   info->ip, info->prev_sp, 
-                                   info->prev_ip);
-                               return;
-                       } else if (strcmp(symname, "ret_from_kernel_thread") == 0 ||
-                                  strcmp(symname, "syscall_exit") == 0) {
-                               info->prev_ip = info->prev_sp = 0;
-                               return;
-                       }
-               }
-#endif
+               dbg("Cannot find unwind entry for %pS; forced unwinding\n",
+                       (void *) info->ip);
 
                /* Since we are doing the unwinding blind, we don't know if
                   we are adjusting the stack correctly or extracting the rp
@@ -439,8 +448,8 @@ unsigned long return_address(unsigned int level)
        /* initialize unwind info */
        asm volatile ("copy %%r30, %0" : "=r"(sp));
        memset(&r, 0, sizeof(struct pt_regs));
-       r.iaoq[0] = (unsigned long) current_text_addr();
-       r.gr[2] = (unsigned long) __builtin_return_address(0);
+       r.iaoq[0] = _THIS_IP_;
+       r.gr[2] = _RET_IP_;
        r.gr[30] = sp;
        unwind_frame_init(&info, current, &r);
 
index d4fe19806d57764144e0b52e9feddc83a18fc160..b53fb6fedf06c466f8ad8ed738c49171a4bcdda5 100644 (file)
@@ -64,9 +64,6 @@
         */
 
 ENTRY_CFI(lclear_user)
-       .proc
-       .callinfo NO_CALLS
-       .entry
        comib,=,n   0,%r25,$lclu_done
        get_sr
 $lclu_loop:
@@ -81,13 +78,9 @@ $lclu_done:
        ldo         1(%r25),%r25
 
        ASM_EXCEPTIONTABLE_ENTRY(1b,2b)
-
-       .exit
 ENDPROC_CFI(lclear_user)
 
 
-       .procend
-
        /*
         * long lstrnlen_user(char *s, long n)
         *
@@ -97,9 +90,6 @@ ENDPROC_CFI(lclear_user)
         */
 
 ENTRY_CFI(lstrnlen_user)
-       .proc
-       .callinfo NO_CALLS
-       .entry
        comib,=     0,%r25,$lslen_nzero
        copy        %r26,%r24
        get_sr
@@ -111,7 +101,6 @@ $lslen_loop:
 $lslen_done:
        bv          %r0(%r2)
        sub         %r26,%r24,%r28
-       .exit
 
 $lslen_nzero:
        b           $lslen_done
@@ -125,9 +114,6 @@ $lslen_nzero:
 
 ENDPROC_CFI(lstrnlen_user)
 
-       .procend
-
-
 
 /*
  * unsigned long pa_memcpy(void *dstp, const void *srcp, unsigned long len)
@@ -186,10 +172,6 @@ ENDPROC_CFI(lstrnlen_user)
        save_len = r31
 
 ENTRY_CFI(pa_memcpy)
-       .proc
-       .callinfo NO_CALLS
-       .entry
-
        /* Last destination address */
        add     dst,len,end
 
@@ -439,9 +421,6 @@ ENTRY_CFI(pa_memcpy)
        b       .Lcopy_done
 10:    stw,ma  t1,4(dstspc,dst)
        ASM_EXCEPTIONTABLE_ENTRY(10b,.Lcopy_done)
-
-       .exit
 ENDPROC_CFI(pa_memcpy)
-       .procend
 
        .end
index 2607d2d33405fb422ca7ef1e9bf9b0a0df6f3aa9..74842d28a7a16e9deeda1ff3a38446605805b04a 100644 (file)
@@ -19,7 +19,6 @@
 #include <linux/gfp.h>
 #include <linux/delay.h>
 #include <linux/init.h>
-#include <linux/pci.h>         /* for hppa_dma_ops and pcxl_dma_ops */
 #include <linux/initrd.h>
 #include <linux/swap.h>
 #include <linux/unistd.h>
@@ -616,17 +615,13 @@ void __init mem_init(void)
        free_all_bootmem();
 
 #ifdef CONFIG_PA11
-       if (hppa_dma_ops == &pcxl_dma_ops) {
+       if (boot_cpu_data.cpu_type == pcxl2 || boot_cpu_data.cpu_type == pcxl) {
                pcxl_dma_start = (unsigned long)SET_MAP_OFFSET(MAP_START);
                parisc_vmalloc_start = SET_MAP_OFFSET(pcxl_dma_start
                                                + PCXL_DMA_MAP_SIZE);
-       } else {
-               pcxl_dma_start = 0;
-               parisc_vmalloc_start = SET_MAP_OFFSET(MAP_START);
-       }
-#else
-       parisc_vmalloc_start = SET_MAP_OFFSET(MAP_START);
+       } else
 #endif
+               parisc_vmalloc_start = SET_MAP_OFFSET(MAP_START);
 
        mem_init_print_info(NULL);
 
index bd06a3ccda312a0a645cd0dbff887924f691d2ce..fb96206de3175d65f86a63d9a7db0a815300a791 100644 (file)
@@ -243,7 +243,9 @@ endif
 cpu-as-$(CONFIG_4xx)           += -Wa,-m405
 cpu-as-$(CONFIG_ALTIVEC)       += $(call as-option,-Wa$(comma)-maltivec)
 cpu-as-$(CONFIG_E200)          += -Wa,-me200
+cpu-as-$(CONFIG_E500)          += -Wa,-me500
 cpu-as-$(CONFIG_PPC_BOOK3S_64) += -Wa,-mpower4
+cpu-as-$(CONFIG_PPC_E500MC)    += $(call as-option,-Wa$(comma)-me500mc)
 
 KBUILD_AFLAGS += $(cpu-as-y)
 KBUILD_CFLAGS += $(cpu-as-y)
index 682b3e6a1e212d7ab0c38246ec7168ca7fbdf805..963abf8bf1c0e52c66478b0fe597f74ada7520d3 100644 (file)
  * a "bne-" instruction at the end, so an isync is enough as a acquire barrier
  * on the platform without lwsync.
  */
-#define __atomic_op_acquire(op, args...)                               \
-({                                                                     \
-       typeof(op##_relaxed(args)) __ret  = op##_relaxed(args);         \
-       __asm__ __volatile__(PPC_ACQUIRE_BARRIER "" : : : "memory");    \
-       __ret;                                                          \
-})
-
-#define __atomic_op_release(op, args...)                               \
-({                                                                     \
-       __asm__ __volatile__(PPC_RELEASE_BARRIER "" : : : "memory");    \
-       op##_relaxed(args);                                             \
-})
+#define __atomic_acquire_fence()                                       \
+       __asm__ __volatile__(PPC_ACQUIRE_BARRIER "" : : : "memory")
+
+#define __atomic_release_fence()                                       \
+       __asm__ __volatile__(PPC_RELEASE_BARRIER "" : : : "memory")
 
 static __inline__ int atomic_read(const atomic_t *v)
 {
@@ -129,8 +122,6 @@ ATOMIC_OPS(xor, xor)
 #undef ATOMIC_OP_RETURN_RELAXED
 #undef ATOMIC_OP
 
-#define atomic_add_negative(a, v)      (atomic_add_return((a), (v)) < 0)
-
 static __inline__ void atomic_inc(atomic_t *v)
 {
        int t;
@@ -145,6 +136,7 @@ static __inline__ void atomic_inc(atomic_t *v)
        : "r" (&v->counter)
        : "cc", "xer");
 }
+#define atomic_inc atomic_inc
 
 static __inline__ int atomic_inc_return_relaxed(atomic_t *v)
 {
@@ -163,16 +155,6 @@ static __inline__ int atomic_inc_return_relaxed(atomic_t *v)
        return t;
 }
 
-/*
- * atomic_inc_and_test - increment and test
- * @v: pointer of type atomic_t
- *
- * Atomically increments @v by 1
- * and returns true if the result is zero, or false for all
- * other cases.
- */
-#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
-
 static __inline__ void atomic_dec(atomic_t *v)
 {
        int t;
@@ -187,6 +169,7 @@ static __inline__ void atomic_dec(atomic_t *v)
        : "r" (&v->counter)
        : "cc", "xer");
 }
+#define atomic_dec atomic_dec
 
 static __inline__ int atomic_dec_return_relaxed(atomic_t *v)
 {
@@ -218,7 +201,7 @@ static __inline__ int atomic_dec_return_relaxed(atomic_t *v)
 #define atomic_xchg_relaxed(v, new) xchg_relaxed(&((v)->counter), (new))
 
 /**
- * __atomic_add_unless - add unless the number is a given value
+ * atomic_fetch_add_unless - add unless the number is a given value
  * @v: pointer of type atomic_t
  * @a: the amount to add to v...
  * @u: ...unless v is equal to u.
@@ -226,13 +209,13 @@ static __inline__ int atomic_dec_return_relaxed(atomic_t *v)
  * Atomically adds @a to @v, so long as it was not @u.
  * Returns the old value of @v.
  */
-static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
+static __inline__ int atomic_fetch_add_unless(atomic_t *v, int a, int u)
 {
        int t;
 
        __asm__ __volatile__ (
        PPC_ATOMIC_ENTRY_BARRIER
-"1:    lwarx   %0,0,%1         # __atomic_add_unless\n\
+"1:    lwarx   %0,0,%1         # atomic_fetch_add_unless\n\
        cmpw    0,%0,%3 \n\
        beq     2f \n\
        add     %0,%2,%0 \n"
@@ -248,6 +231,7 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
 
        return t;
 }
+#define atomic_fetch_add_unless atomic_fetch_add_unless
 
 /**
  * atomic_inc_not_zero - increment unless the number is zero
@@ -280,9 +264,6 @@ static __inline__ int atomic_inc_not_zero(atomic_t *v)
 }
 #define atomic_inc_not_zero(v) atomic_inc_not_zero((v))
 
-#define atomic_sub_and_test(a, v)      (atomic_sub_return((a), (v)) == 0)
-#define atomic_dec_and_test(v)         (atomic_dec_return((v)) == 0)
-
 /*
  * Atomically test *v and decrement if it is greater than 0.
  * The function returns the old value of *v minus 1, even if
@@ -412,8 +393,6 @@ ATOMIC64_OPS(xor, xor)
 #undef ATOMIC64_OP_RETURN_RELAXED
 #undef ATOMIC64_OP
 
-#define atomic64_add_negative(a, v)    (atomic64_add_return((a), (v)) < 0)
-
 static __inline__ void atomic64_inc(atomic64_t *v)
 {
        long t;
@@ -427,6 +406,7 @@ static __inline__ void atomic64_inc(atomic64_t *v)
        : "r" (&v->counter)
        : "cc", "xer");
 }
+#define atomic64_inc atomic64_inc
 
 static __inline__ long atomic64_inc_return_relaxed(atomic64_t *v)
 {
@@ -444,16 +424,6 @@ static __inline__ long atomic64_inc_return_relaxed(atomic64_t *v)
        return t;
 }
 
-/*
- * atomic64_inc_and_test - increment and test
- * @v: pointer of type atomic64_t
- *
- * Atomically increments @v by 1
- * and returns true if the result is zero, or false for all
- * other cases.
- */
-#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
-
 static __inline__ void atomic64_dec(atomic64_t *v)
 {
        long t;
@@ -467,6 +437,7 @@ static __inline__ void atomic64_dec(atomic64_t *v)
        : "r" (&v->counter)
        : "cc", "xer");
 }
+#define atomic64_dec atomic64_dec
 
 static __inline__ long atomic64_dec_return_relaxed(atomic64_t *v)
 {
@@ -487,9 +458,6 @@ static __inline__ long atomic64_dec_return_relaxed(atomic64_t *v)
 #define atomic64_inc_return_relaxed atomic64_inc_return_relaxed
 #define atomic64_dec_return_relaxed atomic64_dec_return_relaxed
 
-#define atomic64_sub_and_test(a, v)    (atomic64_sub_return((a), (v)) == 0)
-#define atomic64_dec_and_test(v)       (atomic64_dec_return((v)) == 0)
-
 /*
  * Atomically test *v and decrement if it is greater than 0.
  * The function returns the old value of *v minus 1.
@@ -513,6 +481,7 @@ static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
 
        return t;
 }
+#define atomic64_dec_if_positive atomic64_dec_if_positive
 
 #define atomic64_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
 #define atomic64_cmpxchg_relaxed(v, o, n) \
@@ -524,7 +493,7 @@ static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
 #define atomic64_xchg_relaxed(v, new) xchg_relaxed(&((v)->counter), (new))
 
 /**
- * atomic64_add_unless - add unless the number is a given value
+ * atomic64_fetch_add_unless - add unless the number is a given value
  * @v: pointer of type atomic64_t
  * @a: the amount to add to v...
  * @u: ...unless v is equal to u.
@@ -532,13 +501,13 @@ static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
  * Atomically adds @a to @v, so long as it was not @u.
  * Returns the old value of @v.
  */
-static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
+static __inline__ long atomic64_fetch_add_unless(atomic64_t *v, long a, long u)
 {
        long t;
 
        __asm__ __volatile__ (
        PPC_ATOMIC_ENTRY_BARRIER
-"1:    ldarx   %0,0,%1         # __atomic_add_unless\n\
+"1:    ldarx   %0,0,%1         # atomic64_fetch_add_unless\n\
        cmpd    0,%0,%3 \n\
        beq     2f \n\
        add     %0,%2,%0 \n"
@@ -551,8 +520,9 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
        : "r" (&v->counter), "r" (a), "r" (u)
        : "cc", "memory");
 
-       return t != u;
+       return t;
 }
+#define atomic64_fetch_add_unless atomic64_fetch_add_unless
 
 /**
  * atomic_inc64_not_zero - increment unless the number is zero
@@ -582,6 +552,7 @@ static __inline__ int atomic64_inc_not_zero(atomic64_t *v)
 
        return t1 != 0;
 }
+#define atomic64_inc_not_zero(v) atomic64_inc_not_zero((v))
 
 #endif /* __powerpc64__ */
 
index 6a6673907e45eeb934e66023e8630fe21d8fd31d..82e44b1a00ae91219f482afa654f2d0440c5aa78 100644 (file)
@@ -108,6 +108,7 @@ static inline void pgtable_free(void *table, unsigned index_size)
 }
 
 #define check_pgt_cache()      do { } while (0)
+#define get_hugepd_cache_index(x)  (x)
 
 #ifdef CONFIG_SMP
 static inline void pgtable_free_tlb(struct mmu_gather *tlb,
@@ -137,7 +138,6 @@ static inline void pgtable_free_tlb(struct mmu_gather *tlb,
 static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
                                  unsigned long address)
 {
-       pgtable_page_dtor(table);
        pgtable_free_tlb(tlb, page_address(table), 0);
 }
 #endif /* _ASM_POWERPC_BOOK3S_32_PGALLOC_H */
index af5f2baac80f991951ac77dc3b3eaeb1e72aee46..a069dfcac9a94a94efe66a162cbbff88f1596934 100644 (file)
@@ -49,6 +49,27 @@ static inline int hugepd_ok(hugepd_t hpd)
 }
 #define is_hugepd(hpd)         (hugepd_ok(hpd))
 
+/*
+ * 16M and 16G huge page directory tables are allocated from slab cache
+ *
+ */
+#define H_16M_CACHE_INDEX (PAGE_SHIFT + H_PTE_INDEX_SIZE + H_PMD_INDEX_SIZE - 24)
+#define H_16G_CACHE_INDEX                                                      \
+       (PAGE_SHIFT + H_PTE_INDEX_SIZE + H_PMD_INDEX_SIZE + H_PUD_INDEX_SIZE - 34)
+
+static inline int get_hugepd_cache_index(int index)
+{
+       switch (index) {
+       case H_16M_CACHE_INDEX:
+               return HTLB_16M_INDEX;
+       case H_16G_CACHE_INDEX:
+               return HTLB_16G_INDEX;
+       default:
+               BUG();
+       }
+       /* should not reach */
+}
+
 #else /* !CONFIG_HUGETLB_PAGE */
 static inline int pmd_huge(pmd_t pmd) { return 0; }
 static inline int pud_huge(pud_t pud) { return 0; }
index fb4b3ba52339e9233207ce7345e2f9d920835f97..d7ee249d6890cb30fcf10ebd608665d57ba2f781 100644 (file)
@@ -45,8 +45,17 @@ static inline int hugepd_ok(hugepd_t hpd)
 {
        return 0;
 }
+
 #define is_hugepd(pdep)                        0
 
+/*
+ * This should never get called
+ */
+static inline int get_hugepd_cache_index(int index)
+{
+       BUG();
+}
+
 #else /* !CONFIG_HUGETLB_PAGE */
 static inline int pmd_huge(pmd_t pmd) { return 0; }
 static inline int pud_huge(pud_t pud) { return 0; }
index 63cee159022b51400fbc52dd21ebd31f55f3db67..42aafba7a30834db7643213a3aec583a3cdd1b6a 100644 (file)
@@ -287,6 +287,11 @@ enum pgtable_index {
        PMD_INDEX,
        PUD_INDEX,
        PGD_INDEX,
+       /*
+        * Below are used with 4k page size and hugetlb
+        */
+       HTLB_16M_INDEX,
+       HTLB_16G_INDEX,
 };
 
 extern unsigned long __vmalloc_start;
index 8e7b09703ca45dbe2592441e5520176024baee24..27d6e3c8fde9b92eb489be59ed27769edb30c939 100644 (file)
@@ -52,6 +52,7 @@ struct arch_hw_breakpoint {
 #include <asm/reg.h>
 #include <asm/debug.h>
 
+struct perf_event_attr;
 struct perf_event;
 struct pmu;
 struct perf_sample_data;
@@ -60,8 +61,10 @@ struct perf_sample_data;
 
 extern int hw_breakpoint_slots(int type);
 extern int arch_bp_generic_fields(int type, int *gen_bp_type);
-extern int arch_check_bp_in_kernelspace(struct perf_event *bp);
-extern int arch_validate_hwbkpt_settings(struct perf_event *bp);
+extern int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw);
+extern int hw_breakpoint_arch_parse(struct perf_event *bp,
+                                   const struct perf_event_attr *attr,
+                                   struct arch_hw_breakpoint *hw);
 extern int hw_breakpoint_exceptions_notify(struct notifier_block *unused,
                                                unsigned long val, void *data);
 int arch_install_hw_breakpoint(struct perf_event *bp);
index 9f3be5c8a4a391a10d7e4d4367313998b41c255d..785c464b65888d575fce38c0732250a5bbea81f8 100644 (file)
@@ -88,7 +88,6 @@ struct prev_kprobe {
 struct kprobe_ctlblk {
        unsigned long kprobe_status;
        unsigned long kprobe_saved_msr;
-       struct pt_regs jprobe_saved_regs;
        struct prev_kprobe prev_kprobe;
 };
 
@@ -103,17 +102,6 @@ extern int kprobe_exceptions_notify(struct notifier_block *self,
 extern int kprobe_fault_handler(struct pt_regs *regs, int trapnr);
 extern int kprobe_handler(struct pt_regs *regs);
 extern int kprobe_post_handler(struct pt_regs *regs);
-#ifdef CONFIG_KPROBES_ON_FTRACE
-extern int __is_active_jprobe(unsigned long addr);
-extern int skip_singlestep(struct kprobe *p, struct pt_regs *regs,
-                          struct kprobe_ctlblk *kcb);
-#else
-static inline int skip_singlestep(struct kprobe *p, struct pt_regs *regs,
-                                 struct kprobe_ctlblk *kcb)
-{
-       return 0;
-}
-#endif
 #else
 static inline int kprobe_handler(struct pt_regs *regs) { return 0; }
 static inline int kprobe_post_handler(struct pt_regs *regs) { return 0; }
index 896efa55999694cdad22f92615d073468d7cfe3b..b2f89b621b159148c6d836e3c9c3d47a89aafe47 100644 (file)
@@ -35,9 +35,9 @@ extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup_rm(
 extern struct mm_iommu_table_group_mem_t *mm_iommu_find(struct mm_struct *mm,
                unsigned long ua, unsigned long entries);
 extern long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
-               unsigned long ua, unsigned long *hpa);
+               unsigned long ua, unsigned int pageshift, unsigned long *hpa);
 extern long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem,
-               unsigned long ua, unsigned long *hpa);
+               unsigned long ua, unsigned int pageshift, unsigned long *hpa);
 extern long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem);
 extern void mm_iommu_mapped_dec(struct mm_iommu_table_group_mem_t *mem);
 #endif
@@ -143,24 +143,33 @@ static inline void mm_context_remove_copro(struct mm_struct *mm)
 {
        int c;
 
-       c = atomic_dec_if_positive(&mm->context.copros);
-
-       /* Detect imbalance between add and remove */
-       WARN_ON(c < 0);
-
        /*
-        * Need to broadcast a global flush of the full mm before
-        * decrementing active_cpus count, as the next TLBI may be
-        * local and the nMMU and/or PSL need to be cleaned up.
-        * Should be rare enough so that it's acceptable.
+        * When removing the last copro, we need to broadcast a global
+        * flush of the full mm, as the next TLBI may be local and the
+        * nMMU and/or PSL need to be cleaned up.
+        *
+        * Both the 'copros' and 'active_cpus' counts are looked at in
+        * flush_all_mm() to determine the scope (local/global) of the
+        * TLBIs, so we need to flush first before decrementing
+        * 'copros'. If this API is used by several callers for the
+        * same context, it can lead to over-flushing. It's hopefully
+        * not common enough to be a problem.
         *
         * Skip on hash, as we don't know how to do the proper flush
         * for the time being. Invalidations will remain global if
-        * used on hash.
+        * used on hash. Note that we can't drop 'copros' either, as
+        * it could make some invalidations local with no flush
+        * in-between.
         */
-       if (c == 0 && radix_enabled()) {
+       if (radix_enabled()) {
                flush_all_mm(mm);
-               dec_mm_active_cpus(mm);
+
+               c = atomic_dec_if_positive(&mm->context.copros);
+               /* Detect imbalance between add and remove */
+               WARN_ON(c < 0);
+
+               if (c == 0)
+                       dec_mm_active_cpus(mm);
        }
 }
 #else
index 0f571e0ebca19ccdc8b89540324ccc71849b75e5..bd9ba8defd7258ab6e853be0c39d7290f9f02393 100644 (file)
@@ -8,7 +8,7 @@ extern void arch_touch_nmi_watchdog(void);
 static inline void arch_touch_nmi_watchdog(void) {}
 #endif
 
-#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_STACKTRACE)
+#if defined(CONFIG_NMI_IPI) && defined(CONFIG_STACKTRACE)
 extern void arch_trigger_cpumask_backtrace(const cpumask_t *mask,
                                           bool exclude_self);
 #define arch_trigger_cpumask_backtrace arch_trigger_cpumask_backtrace
index 1707781d2f208096517859d94f30c6533e0a3771..8825953c225b2e48e9e0cd7938d2185b5e821977 100644 (file)
@@ -109,6 +109,7 @@ static inline void pgtable_free(void *table, unsigned index_size)
 }
 
 #define check_pgt_cache()      do { } while (0)
+#define get_hugepd_cache_index(x)      (x)
 
 #ifdef CONFIG_SMP
 static inline void pgtable_free_tlb(struct mmu_gather *tlb,
@@ -139,7 +140,6 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
                                  unsigned long address)
 {
        tlb_flush_pgtable(tlb, address);
-       pgtable_page_dtor(table);
        pgtable_free_tlb(tlb, page_address(table), 0);
 }
 #endif /* _ASM_POWERPC_PGALLOC_32_H */
index 0e693f322cb2e03a353e3803517820f4c324498b..e2d62d033708c4494a5e95d941b8d34cad3ec3e0 100644 (file)
@@ -141,6 +141,7 @@ static inline void pgtable_free(void *table, int shift)
        }
 }
 
+#define get_hugepd_cache_index(x)      (x)
 #ifdef CONFIG_SMP
 static inline void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift)
 {
index cfcf6a874cfab3a094d4c931bdc7cade28184665..01b5171ea189994ab394685f8f4645a3fd86594c 100644 (file)
@@ -393,3 +393,4 @@ SYSCALL(pkey_alloc)
 SYSCALL(pkey_free)
 SYSCALL(pkey_mprotect)
 SYSCALL(rseq)
+COMPAT_SYS(io_pgetevents)
index 1e9708632dce30e1093d48dbed2db8d0d90a4e89..c19379f0a32e2b0fe59a9634140582a8afbc291e 100644 (file)
@@ -12,7 +12,7 @@
 #include <uapi/asm/unistd.h>
 
 
-#define NR_syscalls            388
+#define NR_syscalls            389
 
 #define __NR__exit __NR_exit
 
index ac5ba55066dd76a26f133d91623309036bcad4c8..985534d0b448b7ae7b9d4cad7c3f9257d4ce0789 100644 (file)
 #define __NR_pkey_free         385
 #define __NR_pkey_mprotect     386
 #define __NR_rseq              387
+#define __NR_io_pgetevents     388
 
 #endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */
index 4be1c0de9406b159eede5503b3a8044645dac7fa..96dd3d871986428dadcbc9bb350c1b876fde8ab4 100644 (file)
@@ -711,7 +711,8 @@ static __init void cpufeatures_cpu_quirks(void)
                cur_cpu_spec->cpu_features |= CPU_FTR_P9_TM_HV_ASSIST;
                cur_cpu_spec->cpu_features |= CPU_FTR_P9_TM_XER_SO_BUG;
                cur_cpu_spec->cpu_features |= CPU_FTR_POWER9_DD2_1;
-       } else /* DD2.1 and up have DD2_1 */
+       } else if ((version & 0xffff0000) == 0x004e0000)
+               /* DD2.1 and up have DD2_1 */
                cur_cpu_spec->cpu_features |= CPU_FTR_POWER9_DD2_1;
 
        if ((version & 0xffff0000) == 0x004e0000) {
index 80547dad37daee37e4f05a211802ba981a515ab3..fec8a6773119ff809524ba0119ef079189f7442a 100644 (file)
@@ -119,11 +119,9 @@ void arch_unregister_hw_breakpoint(struct perf_event *bp)
 /*
  * Check for virtual address in kernel space.
  */
-int arch_check_bp_in_kernelspace(struct perf_event *bp)
+int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw)
 {
-       struct arch_hw_breakpoint *info = counter_arch_bp(bp);
-
-       return is_kernel_addr(info->address);
+       return is_kernel_addr(hw->address);
 }
 
 int arch_bp_generic_fields(int type, int *gen_bp_type)
@@ -141,30 +139,31 @@ int arch_bp_generic_fields(int type, int *gen_bp_type)
 /*
  * Validate the arch-specific HW Breakpoint register settings
  */
-int arch_validate_hwbkpt_settings(struct perf_event *bp)
+int hw_breakpoint_arch_parse(struct perf_event *bp,
+                            const struct perf_event_attr *attr,
+                            struct arch_hw_breakpoint *hw)
 {
        int ret = -EINVAL, length_max;
-       struct arch_hw_breakpoint *info = counter_arch_bp(bp);
 
        if (!bp)
                return ret;
 
-       info->type = HW_BRK_TYPE_TRANSLATE;
-       if (bp->attr.bp_type & HW_BREAKPOINT_R)
-               info->type |= HW_BRK_TYPE_READ;
-       if (bp->attr.bp_type & HW_BREAKPOINT_W)
-               info->type |= HW_BRK_TYPE_WRITE;
-       if (info->type == HW_BRK_TYPE_TRANSLATE)
+       hw->type = HW_BRK_TYPE_TRANSLATE;
+       if (attr->bp_type & HW_BREAKPOINT_R)
+               hw->type |= HW_BRK_TYPE_READ;
+       if (attr->bp_type & HW_BREAKPOINT_W)
+               hw->type |= HW_BRK_TYPE_WRITE;
+       if (hw->type == HW_BRK_TYPE_TRANSLATE)
                /* must set alteast read or write */
                return ret;
-       if (!(bp->attr.exclude_user))
-               info->type |= HW_BRK_TYPE_USER;
-       if (!(bp->attr.exclude_kernel))
-               info->type |= HW_BRK_TYPE_KERNEL;
-       if (!(bp->attr.exclude_hv))
-               info->type |= HW_BRK_TYPE_HYP;
-       info->address = bp->attr.bp_addr;
-       info->len = bp->attr.bp_len;
+       if (!attr->exclude_user)
+               hw->type |= HW_BRK_TYPE_USER;
+       if (!attr->exclude_kernel)
+               hw->type |= HW_BRK_TYPE_KERNEL;
+       if (!attr->exclude_hv)
+               hw->type |= HW_BRK_TYPE_HYP;
+       hw->address = attr->bp_addr;
+       hw->len = attr->bp_len;
 
        /*
         * Since breakpoint length can be a maximum of HW_BREAKPOINT_LEN(8)
@@ -178,12 +177,12 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
        if (cpu_has_feature(CPU_FTR_DAWR)) {
                length_max = 512 ; /* 64 doublewords */
                /* DAWR region can't cross 512 boundary */
-               if ((bp->attr.bp_addr >> 9) !=
-                   ((bp->attr.bp_addr + bp->attr.bp_len - 1) >> 9))
+               if ((attr->bp_addr >> 9) !=
+                   ((attr->bp_addr + attr->bp_len - 1) >> 9))
                        return -EINVAL;
        }
-       if (info->len >
-           (length_max - (info->address & HW_BREAKPOINT_ALIGN)))
+       if (hw->len >
+           (length_max - (hw->address & HW_BREAKPOINT_ALIGN)))
                return -EINVAL;
        return 0;
 }
index e734f6e45abc1ecb64cc8fe68b88054210e30bd3..689306118b48641495ea3b7ad9f7b058b4ce14c8 100644 (file)
@@ -144,7 +144,9 @@ power9_restore_additional_sprs:
        mtspr   SPRN_MMCR1, r4
 
        ld      r3, STOP_MMCR2(r13)
+       ld      r4, PACA_SPRG_VDSO(r13)
        mtspr   SPRN_MMCR2, r3
+       mtspr   SPRN_SPRG3, r4
        blr
 
 /*
index 7a1f99f1b47fa50436519f6c2eb1301c3bb5fc19..e4a49c051325d59ce026b2a415fde8c18f1263ce 100644 (file)
 #include <linux/preempt.h>
 #include <linux/ftrace.h>
 
-/*
- * This is called from ftrace code after invoking registered handlers to
- * disambiguate regs->nip changes done by jprobes and livepatch. We check if
- * there is an active jprobe at the provided address (mcount location).
- */
-int __is_active_jprobe(unsigned long addr)
-{
-       if (!preemptible()) {
-               struct kprobe *p = raw_cpu_read(current_kprobe);
-               return (p && (unsigned long)p->addr == addr) ? 1 : 0;
-       }
-
-       return 0;
-}
-
-static nokprobe_inline
-int __skip_singlestep(struct kprobe *p, struct pt_regs *regs,
-                     struct kprobe_ctlblk *kcb, unsigned long orig_nip)
-{
-       /*
-        * Emulate singlestep (and also recover regs->nip)
-        * as if there is a nop
-        */
-       regs->nip = (unsigned long)p->addr + MCOUNT_INSN_SIZE;
-       if (unlikely(p->post_handler)) {
-               kcb->kprobe_status = KPROBE_HIT_SSDONE;
-               p->post_handler(p, regs, 0);
-       }
-       __this_cpu_write(current_kprobe, NULL);
-       if (orig_nip)
-               regs->nip = orig_nip;
-       return 1;
-}
-
-int skip_singlestep(struct kprobe *p, struct pt_regs *regs,
-                   struct kprobe_ctlblk *kcb)
-{
-       if (kprobe_ftrace(p))
-               return __skip_singlestep(p, regs, kcb, 0);
-       else
-               return 0;
-}
-NOKPROBE_SYMBOL(skip_singlestep);
-
 /* Ftrace callback handler for kprobes */
 void kprobe_ftrace_handler(unsigned long nip, unsigned long parent_nip,
                           struct ftrace_ops *ops, struct pt_regs *regs)
@@ -76,18 +32,14 @@ void kprobe_ftrace_handler(unsigned long nip, unsigned long parent_nip,
        struct kprobe *p;
        struct kprobe_ctlblk *kcb;
 
-       preempt_disable();
-
        p = get_kprobe((kprobe_opcode_t *)nip);
        if (unlikely(!p) || kprobe_disabled(p))
-               goto end;
+               return;
 
        kcb = get_kprobe_ctlblk();
        if (kprobe_running()) {
                kprobes_inc_nmissed_count(p);
        } else {
-               unsigned long orig_nip = regs->nip;
-
                /*
                 * On powerpc, NIP is *before* this instruction for the
                 * pre handler
@@ -96,19 +48,23 @@ void kprobe_ftrace_handler(unsigned long nip, unsigned long parent_nip,
 
                __this_cpu_write(current_kprobe, p);
                kcb->kprobe_status = KPROBE_HIT_ACTIVE;
-               if (!p->pre_handler || !p->pre_handler(p, regs))
-                       __skip_singlestep(p, regs, kcb, orig_nip);
-               else {
+               if (!p->pre_handler || !p->pre_handler(p, regs)) {
                        /*
-                        * If pre_handler returns !0, it sets regs->nip and
-                        * resets current kprobe. In this case, we should not
-                        * re-enable preemption.
+                        * Emulate singlestep (and also recover regs->nip)
+                        * as if there is a nop
                         */
-                       return;
+                       regs->nip += MCOUNT_INSN_SIZE;
+                       if (unlikely(p->post_handler)) {
+                               kcb->kprobe_status = KPROBE_HIT_SSDONE;
+                               p->post_handler(p, regs, 0);
+                       }
                }
+               /*
+                * If pre_handler returns !0, it changes regs->nip. We have to
+                * skip emulating post_handler.
+                */
+               __this_cpu_write(current_kprobe, NULL);
        }
-end:
-       preempt_enable_no_resched();
 }
 NOKPROBE_SYMBOL(kprobe_ftrace_handler);
 
index e4c5bf33970bf8d0c025f5c20a968ec5a0f80436..5c60bb0f927f819fea2d57aa98d7ad0a7eec5ebf 100644 (file)
@@ -317,25 +317,17 @@ int kprobe_handler(struct pt_regs *regs)
                        }
                        prepare_singlestep(p, regs);
                        return 1;
-               } else {
-                       if (*addr != BREAKPOINT_INSTRUCTION) {
-                               /* If trap variant, then it belongs not to us */
-                               kprobe_opcode_t cur_insn = *addr;
-                               if (is_trap(cur_insn))
-                                       goto no_kprobe;
-                               /* The breakpoint instruction was removed by
-                                * another cpu right after we hit, no further
-                                * handling of this interrupt is appropriate
-                                */
-                               ret = 1;
+               } else if (*addr != BREAKPOINT_INSTRUCTION) {
+                       /* If trap variant, then it belongs not to us */
+                       kprobe_opcode_t cur_insn = *addr;
+
+                       if (is_trap(cur_insn))
                                goto no_kprobe;
-                       }
-                       p = __this_cpu_read(current_kprobe);
-                       if (p->break_handler && p->break_handler(p, regs)) {
-                               if (!skip_singlestep(p, regs, kcb))
-                                       goto ss_probe;
-                               ret = 1;
-                       }
+                       /* The breakpoint instruction was removed by
+                        * another cpu right after we hit, no further
+                        * handling of this interrupt is appropriate
+                        */
+                       ret = 1;
                }
                goto no_kprobe;
        }
@@ -350,7 +342,7 @@ int kprobe_handler(struct pt_regs *regs)
                         */
                        kprobe_opcode_t cur_insn = *addr;
                        if (is_trap(cur_insn))
-                               goto no_kprobe;
+                               goto no_kprobe;
                        /*
                         * The breakpoint instruction was removed right
                         * after we hit it.  Another cpu has removed
@@ -366,11 +358,13 @@ int kprobe_handler(struct pt_regs *regs)
 
        kcb->kprobe_status = KPROBE_HIT_ACTIVE;
        set_current_kprobe(p, regs, kcb);
-       if (p->pre_handler && p->pre_handler(p, regs))
-               /* handler has already set things up, so skip ss setup */
+       if (p->pre_handler && p->pre_handler(p, regs)) {
+               /* handler changed execution path, so skip ss setup */
+               reset_current_kprobe();
+               preempt_enable_no_resched();
                return 1;
+       }
 
-ss_probe:
        if (p->ainsn.boostable >= 0) {
                ret = try_to_emulate(p, regs);
 
@@ -611,60 +605,6 @@ unsigned long arch_deref_entry_point(void *entry)
 }
 NOKPROBE_SYMBOL(arch_deref_entry_point);
 
-int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
-{
-       struct jprobe *jp = container_of(p, struct jprobe, kp);
-       struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
-
-       memcpy(&kcb->jprobe_saved_regs, regs, sizeof(struct pt_regs));
-
-       /* setup return addr to the jprobe handler routine */
-       regs->nip = arch_deref_entry_point(jp->entry);
-#ifdef PPC64_ELF_ABI_v2
-       regs->gpr[12] = (unsigned long)jp->entry;
-#elif defined(PPC64_ELF_ABI_v1)
-       regs->gpr[2] = (unsigned long)(((func_descr_t *)jp->entry)->toc);
-#endif
-
-       /*
-        * jprobes use jprobe_return() which skips the normal return
-        * path of the function, and this messes up the accounting of the
-        * function graph tracer.
-        *
-        * Pause function graph tracing while performing the jprobe function.
-        */
-       pause_graph_tracing();
-
-       return 1;
-}
-NOKPROBE_SYMBOL(setjmp_pre_handler);
-
-void __used jprobe_return(void)
-{
-       asm volatile("jprobe_return_trap:\n"
-                    "trap\n"
-                    ::: "memory");
-}
-NOKPROBE_SYMBOL(jprobe_return);
-
-int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
-{
-       struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
-
-       if (regs->nip != ppc_kallsyms_lookup_name("jprobe_return_trap")) {
-               pr_debug("longjmp_break_handler NIP (0x%lx) does not match jprobe_return_trap (0x%lx)\n",
-                               regs->nip, ppc_kallsyms_lookup_name("jprobe_return_trap"));
-               return 0;
-       }
-
-       memcpy(regs, &kcb->jprobe_saved_regs, sizeof(struct pt_regs));
-       /* It's OK to start function graph tracing again */
-       unpause_graph_tracing();
-       preempt_enable_no_resched();
-       return 1;
-}
-NOKPROBE_SYMBOL(longjmp_break_handler);
-
 static struct kprobe trampoline_p = {
        .addr = (kprobe_opcode_t *) &kretprobe_trampoline,
        .pre_handler = trampoline_probe_handler
index fe9733ffffaa426f62f2559f66a6771833d0961e..471aac313b8995aa73dce880b8f98d715e59f8d9 100644 (file)
@@ -42,6 +42,8 @@
 #include <asm/ppc-pci.h>
 #include <asm/eeh.h>
 
+#include "../../../drivers/pci/pci.h"
+
 /* hose_spinlock protects accesses to the the phb_bitmap. */
 static DEFINE_SPINLOCK(hose_spinlock);
 LIST_HEAD(hose_list);
@@ -1014,7 +1016,7 @@ void pcibios_setup_bus_devices(struct pci_bus *bus)
                /* Cardbus can call us to add new devices to a bus, so ignore
                 * those who are already fully discovered
                 */
-               if (dev->is_added)
+               if (pci_dev_is_added(dev))
                        continue;
 
                pcibios_setup_device(dev);
index 4f861055a8521276c89c71cd67c41425c38c0ac2..d63b488d34d79033fa7229bfeb4d306cf6b56bc0 100644 (file)
@@ -285,9 +285,6 @@ pci_bus_to_hose(int bus)
  * Note that the returned IO or memory base is a physical address
  */
 
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wpragmas"
-#pragma GCC diagnostic ignored "-Wattribute-alias"
 SYSCALL_DEFINE3(pciconfig_iobase, long, which,
                unsigned long, bus, unsigned long, devfn)
 {
@@ -313,4 +310,3 @@ SYSCALL_DEFINE3(pciconfig_iobase, long, which,
 
        return result;
 }
-#pragma GCC diagnostic pop
index 812171c09f42fecf2c97757e37f7ad45ec9a35d8..dff28f90351245d58f6b77130fb26fcb73351c5d 100644 (file)
@@ -203,9 +203,6 @@ void pcibios_setup_phb_io_space(struct pci_controller *hose)
 #define IOBASE_ISA_IO          3
 #define IOBASE_ISA_MEM         4
 
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wpragmas"
-#pragma GCC diagnostic ignored "-Wattribute-alias"
 SYSCALL_DEFINE3(pciconfig_iobase, long, which, unsigned long, in_bus,
                          unsigned long, in_devfn)
 {
@@ -259,7 +256,6 @@ SYSCALL_DEFINE3(pciconfig_iobase, long, which, unsigned long, in_bus,
 
        return -EOPNOTSUPP;
 }
-#pragma GCC diagnostic pop
 
 #ifdef CONFIG_NUMA
 int pcibus_to_node(struct pci_bus *bus)
index 7fb9f83dcde889f8340daa94ec66cc6b3cb1804b..8afd146bc9c70dc6480e2fff20d6239d327e33d3 100644 (file)
@@ -1051,9 +1051,6 @@ struct pseries_errorlog *get_pseries_errorlog(struct rtas_error_log *log,
 }
 
 /* We assume to be passed big endian arguments */
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wpragmas"
-#pragma GCC diagnostic ignored "-Wattribute-alias"
 SYSCALL_DEFINE1(rtas, struct rtas_args __user *, uargs)
 {
        struct rtas_args args;
@@ -1140,7 +1137,6 @@ SYSCALL_DEFINE1(rtas, struct rtas_args __user *, uargs)
 
        return 0;
 }
-#pragma GCC diagnostic pop
 
 /*
  * Call early during boot, before mem init, to retrieve the RTAS
index 62b1a40d895777a10b3c7279fde05583ae3dc66b..40b44bb53a4efbb8b25c64786262e0123a3da640 100644 (file)
@@ -700,12 +700,19 @@ EXPORT_SYMBOL(check_legacy_ioport);
 static int ppc_panic_event(struct notifier_block *this,
                              unsigned long event, void *ptr)
 {
+       /*
+        * panic does a local_irq_disable, but we really
+        * want interrupts to be hard disabled.
+        */
+       hard_irq_disable();
+
        /*
         * If firmware-assisted dump has been registered then trigger
         * firmware-assisted dump and let firmware handle everything else.
         */
        crash_fadump(NULL, ptr);
-       ppc_md.panic(ptr);  /* May not return */
+       if (ppc_md.panic)
+               ppc_md.panic(ptr);  /* May not return */
        return NOTIFY_DONE;
 }
 
@@ -716,7 +723,8 @@ static struct notifier_block ppc_panic_block = {
 
 void __init setup_panic(void)
 {
-       if (!ppc_md.panic)
+       /* PPC64 always does a hard irq disable in its panic handler */
+       if (!IS_ENABLED(CONFIG_PPC64) && !ppc_md.panic)
                return;
        atomic_notifier_chain_register(&panic_notifier_list, &ppc_panic_block);
 }
index 7a7ce8ad455e1533498fc3c7a5d8a853abb4d9cd..225bc5f91049436277e7c45787d8a7370d6dac78 100644 (file)
@@ -387,6 +387,14 @@ void early_setup_secondary(void)
 
 #endif /* CONFIG_SMP */
 
+void panic_smp_self_stop(void)
+{
+       hard_irq_disable();
+       spin_begin();
+       while (1)
+               spin_cpu_relax();
+}
+
 #if defined(CONFIG_SMP) || defined(CONFIG_KEXEC_CORE)
 static bool use_spinloop(void)
 {
index 17fe4339ba596150e8dc2b0eaf698aee3303ad18..b3e8db376ecde459bb8b5a1cd00b10c9606df289 100644 (file)
@@ -134,7 +134,7 @@ static void do_signal(struct task_struct *tsk)
        /* Re-enable the breakpoints for the signal stack */
        thread_change_pc(tsk, tsk->thread.regs);
 
-       rseq_signal_deliver(tsk->thread.regs);
+       rseq_signal_deliver(&ksig, tsk->thread.regs);
 
        if (is32) {
                if (ksig.ka.sa.sa_flags & SA_SIGINFO)
@@ -170,7 +170,7 @@ void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags)
        if (thread_info_flags & _TIF_NOTIFY_RESUME) {
                clear_thread_flag(TIF_NOTIFY_RESUME);
                tracehook_notify_resume(regs);
-               rseq_handle_notify_resume(regs);
+               rseq_handle_notify_resume(NULL, regs);
        }
 
        user_enter();
index 5eedbb282d42fcf2caed7f3d09d0227b2e9e0734..e6474a45cef50623be68bc1fbf0b83635275dceb 100644 (file)
@@ -1038,9 +1038,6 @@ static int do_setcontext_tm(struct ucontext __user *ucp,
 }
 #endif
 
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wpragmas"
-#pragma GCC diagnostic ignored "-Wattribute-alias"
 #ifdef CONFIG_PPC64
 COMPAT_SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx,
                       struct ucontext __user *, new_ctx, int, ctx_size)
@@ -1134,7 +1131,6 @@ SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx,
        set_thread_flag(TIF_RESTOREALL);
        return 0;
 }
-#pragma GCC diagnostic pop
 
 #ifdef CONFIG_PPC64
 COMPAT_SYSCALL_DEFINE0(rt_sigreturn)
@@ -1231,9 +1227,6 @@ SYSCALL_DEFINE0(rt_sigreturn)
        return 0;
 }
 
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wpragmas"
-#pragma GCC diagnostic ignored "-Wattribute-alias"
 #ifdef CONFIG_PPC32
 SYSCALL_DEFINE3(debug_setcontext, struct ucontext __user *, ctx,
                         int, ndbg, struct sig_dbg_op __user *, dbg)
@@ -1337,7 +1330,6 @@ SYSCALL_DEFINE3(debug_setcontext, struct ucontext __user *, ctx,
        return 0;
 }
 #endif
-#pragma GCC diagnostic pop
 
 /*
  * OK, we're invoking a handler
index d42b600203892d57d7fb3398f7cad38090df9ce6..83d51bf586c7e1ec3697a424a33a1559579147b8 100644 (file)
@@ -625,9 +625,6 @@ static long setup_trampoline(unsigned int syscall, unsigned int __user *tramp)
 /*
  * Handle {get,set,swap}_context operations
  */
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wpragmas"
-#pragma GCC diagnostic ignored "-Wattribute-alias"
 SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx,
                struct ucontext __user *, new_ctx, long, ctx_size)
 {
@@ -693,7 +690,6 @@ SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx,
        set_thread_flag(TIF_RESTOREALL);
        return 0;
 }
-#pragma GCC diagnostic pop
 
 
 /*
index 5eadfffabe35134f6f34a6acca61c738c4efcbc9..4794d6b4f4d27a4db7f637a309897d64f1ad9e9c 100644 (file)
@@ -600,9 +600,6 @@ static void nmi_stop_this_cpu(struct pt_regs *regs)
        nmi_ipi_busy_count--;
        nmi_ipi_unlock();
 
-       /* Remove this CPU */
-       set_cpu_online(smp_processor_id(), false);
-
        spin_begin();
        while (1)
                spin_cpu_relax();
@@ -617,9 +614,6 @@ void smp_send_stop(void)
 
 static void stop_this_cpu(void *dummy)
 {
-       /* Remove this CPU */
-       set_cpu_online(smp_processor_id(), false);
-
        hard_irq_disable();
        spin_begin();
        while (1)
index 07e97f289c5207389ffb817330e5d66a4beb6e70..e2c50b55138f8ab52eecace4c6aad72c382e6bcd 100644 (file)
@@ -196,7 +196,7 @@ save_stack_trace_tsk_reliable(struct task_struct *tsk,
 EXPORT_SYMBOL_GPL(save_stack_trace_tsk_reliable);
 #endif /* CONFIG_HAVE_RELIABLE_STACKTRACE */
 
-#ifdef CONFIG_PPC_BOOK3S_64
+#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_NMI_IPI)
 static void handle_backtrace_ipi(struct pt_regs *regs)
 {
        nmi_cpu_backtrace(regs);
@@ -242,4 +242,4 @@ void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
 {
        nmi_trigger_cpumask_backtrace(mask, exclude_self, raise_backtrace_ipi);
 }
-#endif /* CONFIG_PPC64 */
+#endif /* defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_NMI_IPI) */
index 083fa06962fda045cb5f00ac0ea5b61046e3d4c4..466216506eb2f4bfa7b6b94ed89140914b1ea682 100644 (file)
@@ -62,9 +62,6 @@ out:
        return ret;
 }
 
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wpragmas"
-#pragma GCC diagnostic ignored "-Wattribute-alias"
 SYSCALL_DEFINE6(mmap2, unsigned long, addr, size_t, len,
                unsigned long, prot, unsigned long, flags,
                unsigned long, fd, unsigned long, pgoff)
@@ -78,7 +75,6 @@ SYSCALL_DEFINE6(mmap, unsigned long, addr, size_t, len,
 {
        return do_mmap2(addr, len, prot, flags, fd, offset, PAGE_SHIFT);
 }
-#pragma GCC diagnostic pop
 
 #ifdef CONFIG_PPC32
 /*
index 9a5b5a513604018b3ff878b41c2871e135d1e579..32476a6e4e9cea8e8a09bb8da4b80cbd8af5fef8 100644 (file)
@@ -104,39 +104,13 @@ ftrace_regs_call:
        bl      ftrace_stub
        nop
 
-       /* Load the possibly modified NIP */
-       ld      r15, _NIP(r1)
-
+       /* Load ctr with the possibly modified NIP */
+       ld      r3, _NIP(r1)
+       mtctr   r3
 #ifdef CONFIG_LIVEPATCH
-       cmpd    r14, r15        /* has NIP been altered? */
+       cmpd    r14, r        /* has NIP been altered? */
 #endif
 
-#if defined(CONFIG_LIVEPATCH) && defined(CONFIG_KPROBES_ON_FTRACE)
-       /* NIP has not been altered, skip over further checks */
-       beq     1f
-
-       /* Check if there is an active jprobe on us */
-       subi    r3, r14, 4
-       bl      __is_active_jprobe
-       nop
-
-       /*
-        * If r3 == 1, then this is a kprobe/jprobe.
-        * else, this is livepatched function.
-        *
-        * The conditional branch for livepatch_handler below will use the
-        * result of this comparison. For kprobe/jprobe, we just need to branch to
-        * the new NIP, not call livepatch_handler. The branch below is bne, so we
-        * want CR0[EQ] to be true if this is a kprobe/jprobe. Which means we want
-        * CR0[EQ] = (r3 == 1).
-        */
-       cmpdi   r3, 1
-1:
-#endif
-
-       /* Load CTR with the possibly modified NIP */
-       mtctr   r15
-
        /* Restore gprs */
        REST_GPR(0,r1)
        REST_10GPRS(2,r1)
@@ -154,10 +128,7 @@ ftrace_regs_call:
        addi r1, r1, SWITCH_FRAME_SIZE
 
 #ifdef CONFIG_LIVEPATCH
-        /*
-        * Based on the cmpd or cmpdi above, if the NIP was altered and we're
-        * not on a kprobe/jprobe, then handle livepatch.
-        */
+        /* Based on the cmpd above, if the NIP was altered handle livepatch */
        bne-    livepatch_handler
 #endif
 
index d066e37551ec861c1d71a8a958784fd792e2dae6..8c456fa691a586d95127ad8cc54214cf3daf5ce4 100644 (file)
@@ -449,7 +449,7 @@ long kvmppc_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl,
                /* This only handles v2 IOMMU type, v1 is handled via ioctl() */
                return H_TOO_HARD;
 
-       if (WARN_ON_ONCE(mm_iommu_ua_to_hpa(mem, ua, &hpa)))
+       if (WARN_ON_ONCE(mm_iommu_ua_to_hpa(mem, ua, tbl->it_page_shift, &hpa)))
                return H_HARDWARE;
 
        if (mm_iommu_mapped_inc(mem))
index 925fc316a104cc1ce33b6630cf9aaa46ffde7c0c..5b298f5a1a14ee65ed0be2ad3a85c692ef9b5e8a 100644 (file)
@@ -279,7 +279,8 @@ static long kvmppc_rm_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl,
        if (!mem)
                return H_TOO_HARD;
 
-       if (WARN_ON_ONCE_RM(mm_iommu_ua_to_hpa_rm(mem, ua, &hpa)))
+       if (WARN_ON_ONCE_RM(mm_iommu_ua_to_hpa_rm(mem, ua, tbl->it_page_shift,
+                       &hpa)))
                return H_HARDWARE;
 
        pua = (void *) vmalloc_to_phys(pua);
@@ -469,7 +470,8 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
 
                mem = mm_iommu_lookup_rm(vcpu->kvm->mm, ua, IOMMU_PAGE_SIZE_4K);
                if (mem)
-                       prereg = mm_iommu_ua_to_hpa_rm(mem, ua, &tces) == 0;
+                       prereg = mm_iommu_ua_to_hpa_rm(mem, ua,
+                                       IOMMU_PAGE_SHIFT_4K, &tces) == 0;
        }
 
        if (!prereg) {
index de686b340f4aa4ccccaf47e3349eba94d6fddda2..ee4a8854985e2bba1e83b79fba0133d49176f4ea 100644 (file)
@@ -216,7 +216,7 @@ static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu)
 
        wqp = kvm_arch_vcpu_wq(vcpu);
        if (swq_has_sleeper(wqp)) {
-               swake_up(wqp);
+               swake_up_one(wqp);
                ++vcpu->stat.halt_wakeup;
        }
 
@@ -3188,7 +3188,7 @@ static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc)
                }
        }
 
-       prepare_to_swait(&vc->wq, &wait, TASK_INTERRUPTIBLE);
+       prepare_to_swait_exclusive(&vc->wq, &wait, TASK_INTERRUPTIBLE);
 
        if (kvmppc_vcore_check_block(vc)) {
                finish_swait(&vc->wq, &wait);
@@ -3311,7 +3311,7 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
                        kvmppc_start_thread(vcpu, vc);
                        trace_kvm_guest_enter(vcpu);
                } else if (vc->vcore_state == VCORE_SLEEPING) {
-                       swake_up(&vc->wq);
+                       swake_up_one(&vc->wq);
                }
 
        }
index 7c5f479c5c00fb0f562801285e3795400edab084..8a9a49c138652ba2b971a265db233988e01aa7b1 100644 (file)
@@ -337,7 +337,8 @@ static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshif
        if (shift >= pdshift)
                hugepd_free(tlb, hugepte);
        else
-               pgtable_free_tlb(tlb, hugepte, pdshift - shift);
+               pgtable_free_tlb(tlb, hugepte,
+                                get_hugepd_cache_index(pdshift - shift));
 }
 
 static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
index abb43646927aa9575c3aeec91ae55905545b6026..a4ca576125580d5449d7fadb87decb06d03c41e9 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/hugetlb.h>
 #include <linux/swap.h>
 #include <asm/mmu_context.h>
+#include <asm/pte-walk.h>
 
 static DEFINE_MUTEX(mem_list_mutex);
 
@@ -27,6 +28,7 @@ struct mm_iommu_table_group_mem_t {
        struct rcu_head rcu;
        unsigned long used;
        atomic64_t mapped;
+       unsigned int pageshift;
        u64 ua;                 /* userspace address */
        u64 entries;            /* number of entries in hpas[] */
        u64 *hpas;              /* vmalloc'ed */
@@ -125,6 +127,8 @@ long mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries,
 {
        struct mm_iommu_table_group_mem_t *mem;
        long i, j, ret = 0, locked_entries = 0;
+       unsigned int pageshift;
+       unsigned long flags;
        struct page *page = NULL;
 
        mutex_lock(&mem_list_mutex);
@@ -159,6 +163,12 @@ long mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries,
                goto unlock_exit;
        }
 
+       /*
+        * For a starting point for a maximum page size calculation
+        * we use @ua and @entries natural alignment to allow IOMMU pages
+        * smaller than huge pages but still bigger than PAGE_SIZE.
+        */
+       mem->pageshift = __ffs(ua | (entries << PAGE_SHIFT));
        mem->hpas = vzalloc(array_size(entries, sizeof(mem->hpas[0])));
        if (!mem->hpas) {
                kfree(mem);
@@ -199,6 +209,23 @@ long mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries,
                        }
                }
 populate:
+               pageshift = PAGE_SHIFT;
+               if (PageCompound(page)) {
+                       pte_t *pte;
+                       struct page *head = compound_head(page);
+                       unsigned int compshift = compound_order(head);
+
+                       local_irq_save(flags); /* disables as well */
+                       pte = find_linux_pte(mm->pgd, ua, NULL, &pageshift);
+                       local_irq_restore(flags);
+
+                       /* Double check it is still the same pinned page */
+                       if (pte && pte_page(*pte) == head &&
+                                       pageshift == compshift)
+                               pageshift = max_t(unsigned int, pageshift,
+                                               PAGE_SHIFT);
+               }
+               mem->pageshift = min(mem->pageshift, pageshift);
                mem->hpas[i] = page_to_pfn(page) << PAGE_SHIFT;
        }
 
@@ -349,7 +376,7 @@ struct mm_iommu_table_group_mem_t *mm_iommu_find(struct mm_struct *mm,
 EXPORT_SYMBOL_GPL(mm_iommu_find);
 
 long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
-               unsigned long ua, unsigned long *hpa)
+               unsigned long ua, unsigned int pageshift, unsigned long *hpa)
 {
        const long entry = (ua - mem->ua) >> PAGE_SHIFT;
        u64 *va = &mem->hpas[entry];
@@ -357,6 +384,9 @@ long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
        if (entry >= mem->entries)
                return -EFAULT;
 
+       if (pageshift > mem->pageshift)
+               return -EFAULT;
+
        *hpa = *va | (ua & ~PAGE_MASK);
 
        return 0;
@@ -364,7 +394,7 @@ long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
 EXPORT_SYMBOL_GPL(mm_iommu_ua_to_hpa);
 
 long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem,
-               unsigned long ua, unsigned long *hpa)
+               unsigned long ua, unsigned int pageshift, unsigned long *hpa)
 {
        const long entry = (ua - mem->ua) >> PAGE_SHIFT;
        void *va = &mem->hpas[entry];
@@ -373,6 +403,9 @@ long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem,
        if (entry >= mem->entries)
                return -EFAULT;
 
+       if (pageshift > mem->pageshift)
+               return -EFAULT;
+
        pa = (void *) vmalloc_to_phys(va);
        if (!pa)
                return -EFAULT;
index c1f4ca45c93a488df07d66525f0d935ca342f84c..4afbfbb64bfd0a21254a177f4fa3df3c37bff6ea 100644 (file)
@@ -409,6 +409,18 @@ static inline void pgtable_free(void *table, int index)
        case PUD_INDEX:
                kmem_cache_free(PGT_CACHE(PUD_CACHE_INDEX), table);
                break;
+#if defined(CONFIG_PPC_4K_PAGES) && defined(CONFIG_HUGETLB_PAGE)
+               /* 16M hugepd directory at pud level */
+       case HTLB_16M_INDEX:
+               BUILD_BUG_ON(H_16M_CACHE_INDEX <= 0);
+               kmem_cache_free(PGT_CACHE(H_16M_CACHE_INDEX), table);
+               break;
+               /* 16G hugepd directory at the pgd level */
+       case HTLB_16G_INDEX:
+               BUILD_BUG_ON(H_16G_CACHE_INDEX <= 0);
+               kmem_cache_free(PGT_CACHE(H_16G_CACHE_INDEX), table);
+               break;
+#endif
                /* We don't free pgd table via RCU callback */
        default:
                BUG();
index 75cb646a79c383bc39c578a49ddf48a23ee9c44b..9d16ee251fc0131118c375282b2c3e103a2e0b0f 100644 (file)
@@ -186,9 +186,6 @@ static void subpage_mark_vma_nohuge(struct mm_struct *mm, unsigned long addr,
  * in a 2-bit field won't allow writes to a page that is otherwise
  * write-protected.
  */
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wpragmas"
-#pragma GCC diagnostic ignored "-Wattribute-alias"
 SYSCALL_DEFINE3(subpage_prot, unsigned long, addr,
                unsigned long, len, u32 __user *, map)
 {
@@ -272,4 +269,3 @@ SYSCALL_DEFINE3(subpage_prot, unsigned long, addr,
        up_write(&mm->mmap_sem);
        return err;
 }
-#pragma GCC diagnostic pop
index 67a6e86d3e7efb25e170af7218453230703aa4a5..1135b43a597c5045be9a0425b67a5e5edd17d876 100644 (file)
@@ -689,22 +689,17 @@ EXPORT_SYMBOL(radix__flush_tlb_kernel_range);
 static unsigned long tlb_single_page_flush_ceiling __read_mostly = 33;
 static unsigned long tlb_local_single_page_flush_ceiling __read_mostly = POWER9_TLB_SETS_RADIX * 2;
 
-void radix__flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
-                    unsigned long end)
+static inline void __radix__flush_tlb_range(struct mm_struct *mm,
+                                       unsigned long start, unsigned long end,
+                                       bool flush_all_sizes)
 
 {
-       struct mm_struct *mm = vma->vm_mm;
        unsigned long pid;
        unsigned int page_shift = mmu_psize_defs[mmu_virtual_psize].shift;
        unsigned long page_size = 1UL << page_shift;
        unsigned long nr_pages = (end - start) >> page_shift;
        bool local, full;
 
-#ifdef CONFIG_HUGETLB_PAGE
-       if (is_vm_hugetlb_page(vma))
-               return radix__flush_hugetlb_tlb_range(vma, start, end);
-#endif
-
        pid = mm->context.id;
        if (unlikely(pid == MMU_NO_CONTEXT))
                return;
@@ -738,37 +733,64 @@ is_local:
                                _tlbie_pid(pid, RIC_FLUSH_TLB);
                }
        } else {
-               bool hflush = false;
+               bool hflush = flush_all_sizes;
+               bool gflush = flush_all_sizes;
                unsigned long hstart, hend;
+               unsigned long gstart, gend;
 
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-               hstart = (start + HPAGE_PMD_SIZE - 1) >> HPAGE_PMD_SHIFT;
-               hend = end >> HPAGE_PMD_SHIFT;
-               if (hstart < hend) {
-                       hstart <<= HPAGE_PMD_SHIFT;
-                       hend <<= HPAGE_PMD_SHIFT;
+               if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
                        hflush = true;
+
+               if (hflush) {
+                       hstart = (start + PMD_SIZE - 1) & PMD_MASK;
+                       hend = end & PMD_MASK;
+                       if (hstart == hend)
+                               hflush = false;
+               }
+
+               if (gflush) {
+                       gstart = (start + PUD_SIZE - 1) & PUD_MASK;
+                       gend = end & PUD_MASK;
+                       if (gstart == gend)
+                               gflush = false;
                }
-#endif
 
                asm volatile("ptesync": : :"memory");
                if (local) {
                        __tlbiel_va_range(start, end, pid, page_size, mmu_virtual_psize);
                        if (hflush)
                                __tlbiel_va_range(hstart, hend, pid,
-                                               HPAGE_PMD_SIZE, MMU_PAGE_2M);
+                                               PMD_SIZE, MMU_PAGE_2M);
+                       if (gflush)
+                               __tlbiel_va_range(gstart, gend, pid,
+                                               PUD_SIZE, MMU_PAGE_1G);
                        asm volatile("ptesync": : :"memory");
                } else {
                        __tlbie_va_range(start, end, pid, page_size, mmu_virtual_psize);
                        if (hflush)
                                __tlbie_va_range(hstart, hend, pid,
-                                               HPAGE_PMD_SIZE, MMU_PAGE_2M);
+                                               PMD_SIZE, MMU_PAGE_2M);
+                       if (gflush)
+                               __tlbie_va_range(gstart, gend, pid,
+                                               PUD_SIZE, MMU_PAGE_1G);
                        fixup_tlbie();
                        asm volatile("eieio; tlbsync; ptesync": : :"memory");
                }
        }
        preempt_enable();
 }
+
+void radix__flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
+                    unsigned long end)
+
+{
+#ifdef CONFIG_HUGETLB_PAGE
+       if (is_vm_hugetlb_page(vma))
+               return radix__flush_hugetlb_tlb_range(vma, start, end);
+#endif
+
+       __radix__flush_tlb_range(vma->vm_mm, start, end, false);
+}
 EXPORT_SYMBOL(radix__flush_tlb_range);
 
 static int radix_get_mmu_psize(int page_size)
@@ -837,6 +859,8 @@ void radix__tlb_flush(struct mmu_gather *tlb)
        int psize = 0;
        struct mm_struct *mm = tlb->mm;
        int page_size = tlb->page_size;
+       unsigned long start = tlb->start;
+       unsigned long end = tlb->end;
 
        /*
         * if page size is not something we understand, do a full mm flush
@@ -847,15 +871,45 @@ void radix__tlb_flush(struct mmu_gather *tlb)
         */
        if (tlb->fullmm) {
                __flush_all_mm(mm, true);
+#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE)
+       } else if (mm_tlb_flush_nested(mm)) {
+               /*
+                * If there is a concurrent invalidation that is clearing ptes,
+                * then it's possible this invalidation will miss one of those
+                * cleared ptes and miss flushing the TLB. If this invalidate
+                * returns before the other one flushes TLBs, that can result
+                * in it returning while there are still valid TLBs inside the
+                * range to be invalidated.
+                *
+                * See mm/memory.c:tlb_finish_mmu() for more details.
+                *
+                * The solution to this is ensure the entire range is always
+                * flushed here. The problem for powerpc is that the flushes
+                * are page size specific, so this "forced flush" would not
+                * do the right thing if there are a mix of page sizes in
+                * the range to be invalidated. So use __flush_tlb_range
+                * which invalidates all possible page sizes in the range.
+                *
+                * PWC flush probably is not be required because the core code
+                * shouldn't free page tables in this path, but accounting
+                * for the possibility makes us a bit more robust.
+                *
+                * need_flush_all is an uncommon case because page table
+                * teardown should be done with exclusive locks held (but
+                * after locks are dropped another invalidate could come
+                * in), it could be optimized further if necessary.
+                */
+               if (!tlb->need_flush_all)
+                       __radix__flush_tlb_range(mm, start, end, true);
+               else
+                       radix__flush_all_mm(mm);
+#endif
        } else if ( (psize = radix_get_mmu_psize(page_size)) == -1) {
                if (!tlb->need_flush_all)
                        radix__flush_tlb_mm(mm);
                else
                        radix__flush_all_mm(mm);
        } else {
-               unsigned long start = tlb->start;
-               unsigned long end = tlb->end;
-
                if (!tlb->need_flush_all)
                        radix__flush_tlb_range_psize(mm, start, end, psize);
                else
@@ -1043,6 +1097,8 @@ extern void radix_kvm_prefetch_workaround(struct mm_struct *mm)
                for (; sib <= cpu_last_thread_sibling(cpu) && !flush; sib++) {
                        if (sib == cpu)
                                continue;
+                       if (!cpu_possible(sib))
+                               continue;
                        if (paca_ptrs[sib]->kvm_hstate.kvm_vcpu)
                                flush = true;
                }
index 380cbf9a40d98f76718f22d6e6b5ffa6c1cd25bc..c0a9bcd28356dfcacacb6bae4db7b2f8d18b85f0 100644 (file)
@@ -286,6 +286,7 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
                u64 imm64;
                u8 *func;
                u32 true_cond;
+               u32 tmp_idx;
 
                /*
                 * addrs[] maps a BPF bytecode address into a real offset from
@@ -637,11 +638,7 @@ emit_clear:
                case BPF_STX | BPF_XADD | BPF_W:
                        /* Get EA into TMP_REG_1 */
                        PPC_ADDI(b2p[TMP_REG_1], dst_reg, off);
-                       /* error if EA is not word-aligned */
-                       PPC_ANDI(b2p[TMP_REG_2], b2p[TMP_REG_1], 0x03);
-                       PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + 12);
-                       PPC_LI(b2p[BPF_REG_0], 0);
-                       PPC_JMP(exit_addr);
+                       tmp_idx = ctx->idx * 4;
                        /* load value from memory into TMP_REG_2 */
                        PPC_BPF_LWARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0);
                        /* add value from src_reg into this */
@@ -649,32 +646,16 @@ emit_clear:
                        /* store result back */
                        PPC_BPF_STWCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]);
                        /* we're done if this succeeded */
-                       PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + (7*4));
-                       /* otherwise, let's try once more */
-                       PPC_BPF_LWARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0);
-                       PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg);
-                       PPC_BPF_STWCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]);
-                       /* exit if the store was not successful */
-                       PPC_LI(b2p[BPF_REG_0], 0);
-                       PPC_BCC(COND_NE, exit_addr);
+                       PPC_BCC_SHORT(COND_NE, tmp_idx);
                        break;
                /* *(u64 *)(dst + off) += src */
                case BPF_STX | BPF_XADD | BPF_DW:
                        PPC_ADDI(b2p[TMP_REG_1], dst_reg, off);
-                       /* error if EA is not doubleword-aligned */
-                       PPC_ANDI(b2p[TMP_REG_2], b2p[TMP_REG_1], 0x07);
-                       PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + (3*4));
-                       PPC_LI(b2p[BPF_REG_0], 0);
-                       PPC_JMP(exit_addr);
-                       PPC_BPF_LDARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0);
-                       PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg);
-                       PPC_BPF_STDCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]);
-                       PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + (7*4));
+                       tmp_idx = ctx->idx * 4;
                        PPC_BPF_LDARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0);
                        PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg);
                        PPC_BPF_STDCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]);
-                       PPC_LI(b2p[BPF_REG_0], 0);
-                       PPC_BCC(COND_NE, exit_addr);
+                       PPC_BCC_SHORT(COND_NE, tmp_idx);
                        break;
 
                /*
index 3f66fcf8ad99ba06a45af3ff7f9f74c254fe3acd..19d8ab49d1bd706e325a9547226b198a7e778892 100644 (file)
@@ -1469,7 +1469,7 @@ static int collect_events(struct perf_event *group, int max_count,
 }
 
 /*
- * Add a event to the PMU.
+ * Add an event to the PMU.
  * If all events are not already frozen, then we disable and
  * re-enable the PMU in order to get hw_perf_enable to do the
  * actual work of reconfiguring the PMU.
@@ -1548,7 +1548,7 @@ nocheck:
 }
 
 /*
- * Remove a event from the PMU.
+ * Remove an event from the PMU.
  */
 static void power_pmu_del(struct perf_event *event, int ef_flags)
 {
@@ -1742,7 +1742,7 @@ static int power_pmu_commit_txn(struct pmu *pmu)
 /*
  * Return 1 if we might be able to put event on a limited PMC,
  * or 0 if not.
- * A event can only go on a limited PMC if it counts something
+ * An event can only go on a limited PMC if it counts something
  * that a limited PMC can count, doesn't require interrupts, and
  * doesn't exclude any processor mode.
  */
index 7c968e46736faa598861259d5a3781577256a7ff..12e6e4d3060236a05fcd987690b385576d8821cb 100644 (file)
 #define DBG(x...)
 #endif
 
-/* Apparently the RTC stores seconds since 1 Jan 1904 */
+/*
+ * Offset between Unix time (1970-based) and Mac time (1904-based). Cuda and PMU
+ * times wrap in 2040. If we need to handle later times, the read_time functions
+ * need to be changed to interpret wrapped times as post-2040.
+ */
 #define RTC_OFFSET     2082844800
 
 /*
@@ -97,8 +101,11 @@ static time64_t cuda_get_time(void)
        if (req.reply_len != 7)
                printk(KERN_ERR "cuda_get_time: got %d byte reply\n",
                       req.reply_len);
-       now = (req.reply[3] << 24) + (req.reply[4] << 16)
-               + (req.reply[5] << 8) + req.reply[6];
+       now = (u32)((req.reply[3] << 24) + (req.reply[4] << 16) +
+                   (req.reply[5] << 8) + req.reply[6]);
+       /* it's either after year 2040, or the RTC has gone backwards */
+       WARN_ON(now < RTC_OFFSET);
+
        return now - RTC_OFFSET;
 }
 
@@ -106,10 +113,10 @@ static time64_t cuda_get_time(void)
 
 static int cuda_set_rtc_time(struct rtc_time *tm)
 {
-       time64_t nowtime;
+       u32 nowtime;
        struct adb_request req;
 
-       nowtime = rtc_tm_to_time64(tm) + RTC_OFFSET;
+       nowtime = lower_32_bits(rtc_tm_to_time64(tm) + RTC_OFFSET);
        if (cuda_request(&req, NULL, 6, CUDA_PACKET, CUDA_SET_TIME,
                         nowtime >> 24, nowtime >> 16, nowtime >> 8,
                         nowtime) < 0)
@@ -140,8 +147,12 @@ static time64_t pmu_get_time(void)
        if (req.reply_len != 4)
                printk(KERN_ERR "pmu_get_time: got %d byte reply from PMU\n",
                       req.reply_len);
-       now = (req.reply[0] << 24) + (req.reply[1] << 16)
-               + (req.reply[2] << 8) + req.reply[3];
+       now = (u32)((req.reply[0] << 24) + (req.reply[1] << 16) +
+                   (req.reply[2] << 8) + req.reply[3]);
+
+       /* it's either after year 2040, or the RTC has gone backwards */
+       WARN_ON(now < RTC_OFFSET);
+
        return now - RTC_OFFSET;
 }
 
@@ -149,10 +160,10 @@ static time64_t pmu_get_time(void)
 
 static int pmu_set_rtc_time(struct rtc_time *tm)
 {
-       time64_t nowtime;
+       u32 nowtime;
        struct adb_request req;
 
-       nowtime = rtc_tm_to_time64(tm) + RTC_OFFSET;
+       nowtime = lower_32_bits(rtc_tm_to_time64(tm) + RTC_OFFSET);
        if (pmu_request(&req, NULL, 5, PMU_SET_RTC, nowtime >> 24,
                        nowtime >> 16, nowtime >> 8, nowtime) < 0)
                return -ENXIO;
index 5bd0eb6681bcbc12c58a21c257cf7202a2d4c5a6..70b2e1e0f23c2c58432152267ff8f23d31772c0b 100644 (file)
@@ -46,6 +46,7 @@
 
 #include "powernv.h"
 #include "pci.h"
+#include "../../../../drivers/pci/pci.h"
 
 #define PNV_IODA1_M64_NUM      16      /* Number of M64 BARs   */
 #define PNV_IODA1_M64_SEGS     8       /* Segments per M64 BAR */
@@ -3138,7 +3139,7 @@ static void pnv_pci_ioda_fixup_iov_resources(struct pci_dev *pdev)
        struct pci_dn *pdn;
        int mul, total_vfs;
 
-       if (!pdev->is_physfn || pdev->is_added)
+       if (!pdev->is_physfn || pci_dev_is_added(pdev))
                return;
 
        pdn = pci_get_pdn(pdev);
index 139f0af6c3d9126f6c766f0958429fba721a018e..8a4868a3964bae4143a79ecfd8db5780f1ee7f19 100644 (file)
@@ -71,6 +71,7 @@
 #include <asm/security_features.h>
 
 #include "pseries.h"
+#include "../../../../drivers/pci/pci.h"
 
 int CMO_PrPSP = -1;
 int CMO_SecPSP = -1;
@@ -664,7 +665,7 @@ static void pseries_pci_fixup_iov_resources(struct pci_dev *pdev)
        const int *indexes;
        struct device_node *dn = pci_device_to_OF_node(pdev);
 
-       if (!pdev->is_physfn || pdev->is_added)
+       if (!pdev->is_physfn || pci_dev_is_added(pdev))
                return;
        /*Firmware must support open sriov otherwise dont configure*/
        indexes = of_get_property(dn, "ibm,open-sriov-vf-bar-info", NULL);
index 47166ad2a669186c98e4ddb656a1edf5665e66ef..196978733e6407d05b0b2c97f7d4d980a2ce9f05 100644 (file)
@@ -2734,7 +2734,7 @@ generic_inst_dump(unsigned long adr, long count, int praddr,
 {
        int nr, dotted;
        unsigned long first_adr;
-       unsigned long inst, last_inst = 0;
+       unsigned int inst, last_inst = 0;
        unsigned char val[4];
 
        dotted = 0;
@@ -2758,7 +2758,7 @@ generic_inst_dump(unsigned long adr, long count, int praddr,
                dotted = 0;
                last_inst = inst;
                if (praddr)
-                       printf(REG"  %.8lx", adr, inst);
+                       printf(REG"  %.8x", adr, inst);
                printf("\t");
                dump_func(inst, adr);
                printf("\n");
index f12680c9b9475e2b130da3369644e797575f7a80..4764fdeb4f1f6837c771e42f9e84ca5bd8291af6 100644 (file)
@@ -107,6 +107,7 @@ config ARCH_RV32I
        select GENERIC_LIB_ASHLDI3
        select GENERIC_LIB_ASHRDI3
        select GENERIC_LIB_LSHRDI3
+       select GENERIC_LIB_UCMPDI2
 
 config ARCH_RV64I
        bool "RV64I"
index 855115ace98c8cdccb7083ca4aacb3a2b20e3259..c452359c9cb8aec089438cd5ee23f1fac9150a77 100644 (file)
 
 #define ATOMIC_INIT(i) { (i) }
 
-#define __atomic_op_acquire(op, args...)                               \
-({                                                                     \
-       typeof(op##_relaxed(args)) __ret  = op##_relaxed(args);         \
-       __asm__ __volatile__(RISCV_ACQUIRE_BARRIER "" ::: "memory");    \
-       __ret;                                                          \
-})
-
-#define __atomic_op_release(op, args...)                               \
-({                                                                     \
-       __asm__ __volatile__(RISCV_RELEASE_BARRIER "" ::: "memory");    \
-       op##_relaxed(args);                                             \
-})
+#define __atomic_acquire_fence()                                       \
+       __asm__ __volatile__(RISCV_ACQUIRE_BARRIER "" ::: "memory")
+
+#define __atomic_release_fence()                                       \
+       __asm__ __volatile__(RISCV_RELEASE_BARRIER "" ::: "memory");
 
 static __always_inline int atomic_read(const atomic_t *v)
 {
@@ -209,130 +202,8 @@ ATOMIC_OPS(xor, xor, i)
 #undef ATOMIC_FETCH_OP
 #undef ATOMIC_OP_RETURN
 
-/*
- * The extra atomic operations that are constructed from one of the core
- * AMO-based operations above (aside from sub, which is easier to fit above).
- * These are required to perform a full barrier, but they're OK this way
- * because atomic_*_return is also required to perform a full barrier.
- *
- */
-#define ATOMIC_OP(op, func_op, comp_op, I, c_type, prefix)             \
-static __always_inline                                                 \
-bool atomic##prefix##_##op(c_type i, atomic##prefix##_t *v)            \
-{                                                                      \
-       return atomic##prefix##_##func_op##_return(i, v) comp_op I;     \
-}
-
-#ifdef CONFIG_GENERIC_ATOMIC64
-#define ATOMIC_OPS(op, func_op, comp_op, I)                            \
-        ATOMIC_OP(op, func_op, comp_op, I,  int,   )
-#else
-#define ATOMIC_OPS(op, func_op, comp_op, I)                            \
-        ATOMIC_OP(op, func_op, comp_op, I,  int,   )                   \
-        ATOMIC_OP(op, func_op, comp_op, I, long, 64)
-#endif
-
-ATOMIC_OPS(add_and_test, add, ==, 0)
-ATOMIC_OPS(sub_and_test, sub, ==, 0)
-ATOMIC_OPS(add_negative, add,  <, 0)
-
-#undef ATOMIC_OP
-#undef ATOMIC_OPS
-
-#define ATOMIC_OP(op, func_op, I, c_type, prefix)                      \
-static __always_inline                                                 \
-void atomic##prefix##_##op(atomic##prefix##_t *v)                      \
-{                                                                      \
-       atomic##prefix##_##func_op(I, v);                               \
-}
-
-#define ATOMIC_FETCH_OP(op, func_op, I, c_type, prefix)                        \
-static __always_inline                                                 \
-c_type atomic##prefix##_fetch_##op##_relaxed(atomic##prefix##_t *v)    \
-{                                                                      \
-       return atomic##prefix##_fetch_##func_op##_relaxed(I, v);        \
-}                                                                      \
-static __always_inline                                                 \
-c_type atomic##prefix##_fetch_##op(atomic##prefix##_t *v)              \
-{                                                                      \
-       return atomic##prefix##_fetch_##func_op(I, v);                  \
-}
-
-#define ATOMIC_OP_RETURN(op, asm_op, c_op, I, c_type, prefix)          \
-static __always_inline                                                 \
-c_type atomic##prefix##_##op##_return_relaxed(atomic##prefix##_t *v)   \
-{                                                                      \
-        return atomic##prefix##_fetch_##op##_relaxed(v) c_op I;                \
-}                                                                      \
-static __always_inline                                                 \
-c_type atomic##prefix##_##op##_return(atomic##prefix##_t *v)           \
-{                                                                      \
-        return atomic##prefix##_fetch_##op(v) c_op I;                  \
-}
-
-#ifdef CONFIG_GENERIC_ATOMIC64
-#define ATOMIC_OPS(op, asm_op, c_op, I)                                        \
-        ATOMIC_OP(       op, asm_op,       I,  int,   )                        \
-        ATOMIC_FETCH_OP( op, asm_op,       I,  int,   )                        \
-        ATOMIC_OP_RETURN(op, asm_op, c_op, I,  int,   )
-#else
-#define ATOMIC_OPS(op, asm_op, c_op, I)                                        \
-        ATOMIC_OP(       op, asm_op,       I,  int,   )                        \
-        ATOMIC_FETCH_OP( op, asm_op,       I,  int,   )                        \
-        ATOMIC_OP_RETURN(op, asm_op, c_op, I,  int,   )                        \
-        ATOMIC_OP(       op, asm_op,       I, long, 64)                        \
-        ATOMIC_FETCH_OP( op, asm_op,       I, long, 64)                        \
-        ATOMIC_OP_RETURN(op, asm_op, c_op, I, long, 64)
-#endif
-
-ATOMIC_OPS(inc, add, +,  1)
-ATOMIC_OPS(dec, add, +, -1)
-
-#define atomic_inc_return_relaxed      atomic_inc_return_relaxed
-#define atomic_dec_return_relaxed      atomic_dec_return_relaxed
-#define atomic_inc_return              atomic_inc_return
-#define atomic_dec_return              atomic_dec_return
-
-#define atomic_fetch_inc_relaxed       atomic_fetch_inc_relaxed
-#define atomic_fetch_dec_relaxed       atomic_fetch_dec_relaxed
-#define atomic_fetch_inc               atomic_fetch_inc
-#define atomic_fetch_dec               atomic_fetch_dec
-
-#ifndef CONFIG_GENERIC_ATOMIC64
-#define atomic64_inc_return_relaxed    atomic64_inc_return_relaxed
-#define atomic64_dec_return_relaxed    atomic64_dec_return_relaxed
-#define atomic64_inc_return            atomic64_inc_return
-#define atomic64_dec_return            atomic64_dec_return
-
-#define atomic64_fetch_inc_relaxed     atomic64_fetch_inc_relaxed
-#define atomic64_fetch_dec_relaxed     atomic64_fetch_dec_relaxed
-#define atomic64_fetch_inc             atomic64_fetch_inc
-#define atomic64_fetch_dec             atomic64_fetch_dec
-#endif
-
-#undef ATOMIC_OPS
-#undef ATOMIC_OP
-#undef ATOMIC_FETCH_OP
-#undef ATOMIC_OP_RETURN
-
-#define ATOMIC_OP(op, func_op, comp_op, I, prefix)                     \
-static __always_inline                                                 \
-bool atomic##prefix##_##op(atomic##prefix##_t *v)                      \
-{                                                                      \
-       return atomic##prefix##_##func_op##_return(v) comp_op I;        \
-}
-
-ATOMIC_OP(inc_and_test, inc, ==, 0,   )
-ATOMIC_OP(dec_and_test, dec, ==, 0,   )
-#ifndef CONFIG_GENERIC_ATOMIC64
-ATOMIC_OP(inc_and_test, inc, ==, 0, 64)
-ATOMIC_OP(dec_and_test, dec, ==, 0, 64)
-#endif
-
-#undef ATOMIC_OP
-
 /* This is required to provide a full barrier on success. */
-static __always_inline int __atomic_add_unless(atomic_t *v, int a, int u)
+static __always_inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
 {
        int prev, rc;
 
@@ -349,9 +220,10 @@ static __always_inline int __atomic_add_unless(atomic_t *v, int a, int u)
                : "memory");
        return prev;
 }
+#define atomic_fetch_add_unless atomic_fetch_add_unless
 
 #ifndef CONFIG_GENERIC_ATOMIC64
-static __always_inline long __atomic64_add_unless(atomic64_t *v, long a, long u)
+static __always_inline long atomic64_fetch_add_unless(atomic64_t *v, long a, long u)
 {
        long prev, rc;
 
@@ -368,27 +240,7 @@ static __always_inline long __atomic64_add_unless(atomic64_t *v, long a, long u)
                : "memory");
        return prev;
 }
-
-static __always_inline int atomic64_add_unless(atomic64_t *v, long a, long u)
-{
-       return __atomic64_add_unless(v, a, u) != u;
-}
-#endif
-
-/*
- * The extra atomic operations that are constructed from one of the core
- * LR/SC-based operations above.
- */
-static __always_inline int atomic_inc_not_zero(atomic_t *v)
-{
-        return __atomic_add_unless(v, 1, 0);
-}
-
-#ifndef CONFIG_GENERIC_ATOMIC64
-static __always_inline long atomic64_inc_not_zero(atomic64_t *v)
-{
-        return atomic64_add_unless(v, 1, 0);
-}
+#define atomic64_fetch_add_unless atomic64_fetch_add_unless
 #endif
 
 /*
index 5cae4c30cd8e2e2147b59285f9eccaaae7e9a789..1e0dfc36aab9e597aaf0d0fb3c99b2ac3dcae750 100644 (file)
@@ -21,8 +21,13 @@ typedef struct user_regs_struct elf_gregset_t;
 
 typedef union __riscv_fp_state elf_fpregset_t;
 
-#define ELF_RISCV_R_SYM(r_info) ((r_info) >> 32)
-#define ELF_RISCV_R_TYPE(r_info) ((r_info) & 0xffffffff)
+#if __riscv_xlen == 64
+#define ELF_RISCV_R_SYM(r_info)                ELF64_R_SYM(r_info)
+#define ELF_RISCV_R_TYPE(r_info)       ELF64_R_TYPE(r_info)
+#else
+#define ELF_RISCV_R_SYM(r_info)                ELF32_R_SYM(r_info)
+#define ELF_RISCV_R_TYPE(r_info)       ELF32_R_TYPE(r_info)
+#endif
 
 /*
  * RISC-V relocation types
index b74cbfbce2d0dd9df65ba779ab9dd8feb0d38eed..7bcdaed15703be6d8f141a8582abd024dc966fec 100644 (file)
 #include <linux/irqchip.h>
 #include <linux/irqdomain.h>
 
-#ifdef CONFIG_RISCV_INTC
-#include <linux/irqchip/irq-riscv-intc.h>
-#endif
-
 void __init init_IRQ(void)
 {
        irqchip_init();
index 1d5e9b934b8ca5b5b78a64af5e1c06e334b7e5f2..3303ed2cd4193f82c51730a992d6c875b361ff80 100644 (file)
@@ -37,7 +37,7 @@ static int apply_r_riscv_64_rela(struct module *me, u32 *location, Elf_Addr v)
 static int apply_r_riscv_branch_rela(struct module *me, u32 *location,
                                     Elf_Addr v)
 {
-       s64 offset = (void *)v - (void *)location;
+       ptrdiff_t offset = (void *)v - (void *)location;
        u32 imm12 = (offset & 0x1000) << (31 - 12);
        u32 imm11 = (offset & 0x800) >> (11 - 7);
        u32 imm10_5 = (offset & 0x7e0) << (30 - 10);
@@ -50,7 +50,7 @@ static int apply_r_riscv_branch_rela(struct module *me, u32 *location,
 static int apply_r_riscv_jal_rela(struct module *me, u32 *location,
                                  Elf_Addr v)
 {
-       s64 offset = (void *)v - (void *)location;
+       ptrdiff_t offset = (void *)v - (void *)location;
        u32 imm20 = (offset & 0x100000) << (31 - 20);
        u32 imm19_12 = (offset & 0xff000);
        u32 imm11 = (offset & 0x800) << (20 - 11);
@@ -63,7 +63,7 @@ static int apply_r_riscv_jal_rela(struct module *me, u32 *location,
 static int apply_r_riscv_rcv_branch_rela(struct module *me, u32 *location,
                                         Elf_Addr v)
 {
-       s64 offset = (void *)v - (void *)location;
+       ptrdiff_t offset = (void *)v - (void *)location;
        u16 imm8 = (offset & 0x100) << (12 - 8);
        u16 imm7_6 = (offset & 0xc0) >> (6 - 5);
        u16 imm5 = (offset & 0x20) >> (5 - 2);
@@ -78,7 +78,7 @@ static int apply_r_riscv_rcv_branch_rela(struct module *me, u32 *location,
 static int apply_r_riscv_rvc_jump_rela(struct module *me, u32 *location,
                                       Elf_Addr v)
 {
-       s64 offset = (void *)v - (void *)location;
+       ptrdiff_t offset = (void *)v - (void *)location;
        u16 imm11 = (offset & 0x800) << (12 - 11);
        u16 imm10 = (offset & 0x400) >> (10 - 8);
        u16 imm9_8 = (offset & 0x300) << (12 - 11);
@@ -96,7 +96,7 @@ static int apply_r_riscv_rvc_jump_rela(struct module *me, u32 *location,
 static int apply_r_riscv_pcrel_hi20_rela(struct module *me, u32 *location,
                                         Elf_Addr v)
 {
-       s64 offset = (void *)v - (void *)location;
+       ptrdiff_t offset = (void *)v - (void *)location;
        s32 hi20;
 
        if (offset != (s32)offset) {
@@ -178,7 +178,7 @@ static int apply_r_riscv_lo12_s_rela(struct module *me, u32 *location,
 static int apply_r_riscv_got_hi20_rela(struct module *me, u32 *location,
                                       Elf_Addr v)
 {
-       s64 offset = (void *)v - (void *)location;
+       ptrdiff_t offset = (void *)v - (void *)location;
        s32 hi20;
 
        /* Always emit the got entry */
@@ -200,7 +200,7 @@ static int apply_r_riscv_got_hi20_rela(struct module *me, u32 *location,
 static int apply_r_riscv_call_plt_rela(struct module *me, u32 *location,
                                       Elf_Addr v)
 {
-       s64 offset = (void *)v - (void *)location;
+       ptrdiff_t offset = (void *)v - (void *)location;
        s32 fill_v = offset;
        u32 hi20, lo12;
 
@@ -227,7 +227,7 @@ static int apply_r_riscv_call_plt_rela(struct module *me, u32 *location,
 static int apply_r_riscv_call_rela(struct module *me, u32 *location,
                                   Elf_Addr v)
 {
-       s64 offset = (void *)v - (void *)location;
+       ptrdiff_t offset = (void *)v - (void *)location;
        s32 fill_v = offset;
        u32 hi20, lo12;
 
@@ -263,14 +263,14 @@ static int apply_r_riscv_align_rela(struct module *me, u32 *location,
 static int apply_r_riscv_add32_rela(struct module *me, u32 *location,
                                    Elf_Addr v)
 {
-       *(u32 *)location += (*(u32 *)v);
+       *(u32 *)location += (u32)v;
        return 0;
 }
 
 static int apply_r_riscv_sub32_rela(struct module *me, u32 *location,
                                    Elf_Addr v)
 {
-       *(u32 *)location -= (*(u32 *)v);
+       *(u32 *)location -= (u32)v;
        return 0;
 }
 
@@ -347,7 +347,7 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
                        unsigned int j;
 
                        for (j = 0; j < sechdrs[relsec].sh_size / sizeof(*rel); j++) {
-                               u64 hi20_loc =
+                               unsigned long hi20_loc =
                                        sechdrs[sechdrs[relsec].sh_info].sh_addr
                                        + rel[j].r_offset;
                                u32 hi20_type = ELF_RISCV_R_TYPE(rel[j].r_info);
@@ -360,12 +360,12 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
                                        Elf_Sym *hi20_sym =
                                                (Elf_Sym *)sechdrs[symindex].sh_addr
                                                + ELF_RISCV_R_SYM(rel[j].r_info);
-                                       u64 hi20_sym_val =
+                                       unsigned long hi20_sym_val =
                                                hi20_sym->st_value
                                                + rel[j].r_addend;
 
                                        /* Calculate lo12 */
-                                       u64 offset = hi20_sym_val - hi20_loc;
+                                       size_t offset = hi20_sym_val - hi20_loc;
                                        if (IS_ENABLED(CONFIG_MODULE_SECTIONS)
                                            && hi20_type == R_RISCV_GOT_HI20) {
                                                offset = module_emit_got_entry(
index ba3e80712797c8ece03b07930f2ffb4b370588cc..9f82a7e34c648a370ec42f2e0bad711058e9baf2 100644 (file)
@@ -50,7 +50,7 @@ static int riscv_gpr_set(struct task_struct *target,
        struct pt_regs *regs;
 
        regs = task_pt_regs(target);
-       ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &regs, 0, -1);
+       ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, regs, 0, -1);
        return ret;
 }
 
index ee44a48faf79dfd8e01cc07db341cacbeb9feed6..f0d2070866d49b170da74ae0e20e779f55a02199 100644 (file)
@@ -220,8 +220,3 @@ void __init setup_arch(char **cmdline_p)
        riscv_fill_hwcap();
 }
 
-static int __init riscv_device_init(void)
-{
-       return of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
-}
-subsys_initcall_sync(riscv_device_init);
index c77df8142be2eaa9525130b3cbea70e191a20aba..58a522f9bcc319ae5d40a8ae15da5d9021921ebd 100644 (file)
@@ -28,7 +28,9 @@ static void __init zone_sizes_init(void)
 {
        unsigned long max_zone_pfns[MAX_NR_ZONES] = { 0, };
 
+#ifdef CONFIG_ZONE_DMA32
        max_zone_pfns[ZONE_DMA32] = PFN_DOWN(min(4UL * SZ_1G, max_low_pfn));
+#endif
        max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
 
        free_area_init_nodes(max_zone_pfns);
index baed39772c845d74d91c292aba3b3ea4063aa130..515240576930731b497a70ed94be5c3ed2aff485 100644 (file)
@@ -106,7 +106,6 @@ config S390
        select ARCH_USE_BUILTIN_BSWAP
        select ARCH_USE_CMPXCHG_LOCKREF
        select ARCH_WANTS_DYNAMIC_TASK_STRUCT
-       select ARCH_WANTS_UBSAN_NO_NULL
        select ARCH_WANT_IPC_PARSE_VERSION
        select BUILDTIME_EXTABLE_SORT
        select CLONE_BACKWARDS2
@@ -140,12 +139,13 @@ config S390
        select HAVE_FUNCTION_GRAPH_TRACER
        select HAVE_FUNCTION_TRACER
        select HAVE_FUTEX_CMPXCHG if FUTEX
-       select HAVE_GCC_PLUGINS
+       select HAVE_GCC_PLUGINS if BROKEN
        select HAVE_KERNEL_BZIP2
        select HAVE_KERNEL_GZIP
        select HAVE_KERNEL_LZ4
        select HAVE_KERNEL_LZMA
        select HAVE_KERNEL_LZO
+       select HAVE_KERNEL_UNCOMPRESSED
        select HAVE_KERNEL_XZ
        select HAVE_KPROBES
        select HAVE_KRETPROBES
@@ -160,6 +160,7 @@ config S390
        select HAVE_OPROFILE
        select HAVE_PERF_EVENTS
        select HAVE_REGS_AND_STACK_ACCESS_API
+       select HAVE_RSEQ
        select HAVE_SYSCALL_TRACEPOINTS
        select HAVE_VIRT_CPU_ACCOUNTING
        select MODULES_USE_ELF_RELA
index 68a690442be052cd4eeaf565442c2b67e9367f74..eee6703093c32e884f00bd07554511a8ccb090f6 100644 (file)
@@ -14,8 +14,18 @@ LD_BFD               := elf64-s390
 LDFLAGS                := -m elf64_s390
 KBUILD_AFLAGS_MODULE += -fPIC
 KBUILD_CFLAGS_MODULE += -fPIC
-KBUILD_CFLAGS  += -m64
 KBUILD_AFLAGS  += -m64
+KBUILD_CFLAGS  += -m64
+aflags_dwarf   := -Wa,-gdwarf-2
+KBUILD_AFLAGS_DECOMPRESSOR := -m64 -D__ASSEMBLY__
+KBUILD_AFLAGS_DECOMPRESSOR += $(if $(CONFIG_DEBUG_INFO),$(aflags_dwarf))
+KBUILD_CFLAGS_DECOMPRESSOR := -m64 -O2
+KBUILD_CFLAGS_DECOMPRESSOR += -DDISABLE_BRANCH_PROFILING -D__NO_FORTIFY
+KBUILD_CFLAGS_DECOMPRESSOR += -fno-delete-null-pointer-checks -msoft-float
+KBUILD_CFLAGS_DECOMPRESSOR += -fno-asynchronous-unwind-tables
+KBUILD_CFLAGS_DECOMPRESSOR += $(call cc-option,-ffreestanding)
+KBUILD_CFLAGS_DECOMPRESSOR += $(if $(CONFIG_DEBUG_INFO),-g)
+KBUILD_CFLAGS_DECOMPRESSOR += $(if $(CONFIG_DEBUG_INFO_DWARF4), $(call cc-option, -gdwarf-4,))
 UTS_MACHINE    := s390x
 STACK_SIZE     := 16384
 CHECKFLAGS     += -D__s390__ -D__s390x__
@@ -52,18 +62,14 @@ cflags-y += -Wa,-I$(srctree)/arch/$(ARCH)/include
 #
 cflags-$(CONFIG_FRAME_POINTER) += -fno-optimize-sibling-calls
 
-# old style option for packed stacks
-ifeq ($(call cc-option-yn,-mkernel-backchain),y)
-cflags-$(CONFIG_PACK_STACK)  += -mkernel-backchain -D__PACK_STACK
-aflags-$(CONFIG_PACK_STACK)  += -D__PACK_STACK
-endif
-
-# new style option for packed stacks
 ifeq ($(call cc-option-yn,-mpacked-stack),y)
 cflags-$(CONFIG_PACK_STACK)  += -mpacked-stack -D__PACK_STACK
 aflags-$(CONFIG_PACK_STACK)  += -D__PACK_STACK
 endif
 
+KBUILD_AFLAGS_DECOMPRESSOR += $(aflags-y)
+KBUILD_CFLAGS_DECOMPRESSOR += $(cflags-y)
+
 ifeq ($(call cc-option-yn,-mstack-size=8192 -mstack-guard=128),y)
 cflags-$(CONFIG_CHECK_STACK) += -mstack-size=$(STACK_SIZE)
 ifneq ($(call cc-option-yn,-mstack-size=8192),y)
@@ -71,8 +77,11 @@ cflags-$(CONFIG_CHECK_STACK) += -mstack-guard=$(CONFIG_STACK_GUARD)
 endif
 endif
 
-ifeq ($(call cc-option-yn,-mwarn-dynamicstack),y)
-cflags-$(CONFIG_WARN_DYNAMIC_STACK) += -mwarn-dynamicstack
+ifdef CONFIG_WARN_DYNAMIC_STACK
+  ifeq ($(call cc-option-yn,-mwarn-dynamicstack),y)
+    KBUILD_CFLAGS += -mwarn-dynamicstack
+    KBUILD_CFLAGS_DECOMPRESSOR += -mwarn-dynamicstack
+  endif
 endif
 
 ifdef CONFIG_EXPOLINE
@@ -82,6 +91,7 @@ ifdef CONFIG_EXPOLINE
     CC_FLAGS_EXPOLINE += -mindirect-branch-table
     export CC_FLAGS_EXPOLINE
     cflags-y += $(CC_FLAGS_EXPOLINE) -DCC_USING_EXPOLINE
+    aflags-y += -DCC_USING_EXPOLINE
   endif
 endif
 
@@ -102,11 +112,12 @@ KBUILD_CFLAGS     += -mbackchain -msoft-float $(cflags-y)
 KBUILD_CFLAGS  += -pipe -fno-strength-reduce -Wno-sign-compare
 KBUILD_CFLAGS  += -fno-asynchronous-unwind-tables $(cfi)
 KBUILD_AFLAGS  += $(aflags-y) $(cfi)
+export KBUILD_AFLAGS_DECOMPRESSOR
+export KBUILD_CFLAGS_DECOMPRESSOR
 
 OBJCOPYFLAGS   := -O binary
 
-head-y         := arch/s390/kernel/head.o
-head-y         += arch/s390/kernel/head64.o
+head-y         := arch/s390/kernel/head64.o
 
 # See arch/s390/Kbuild for content of core part of the kernel
 core-y         += arch/s390/
@@ -121,7 +132,7 @@ boot                := arch/s390/boot
 syscalls       := arch/s390/kernel/syscalls
 tools          := arch/s390/tools
 
-all: image bzImage
+all: bzImage
 
 #KBUILD_IMAGE is necessary for packaging targets like rpm-pkg, deb-pkg...
 KBUILD_IMAGE   := $(boot)/bzImage
@@ -129,7 +140,7 @@ KBUILD_IMAGE        := $(boot)/bzImage
 install: vmlinux
        $(Q)$(MAKE) $(build)=$(boot) $@
 
-image bzImage: vmlinux
+bzImage: vmlinux
        $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
 
 zfcpdump:
@@ -152,8 +163,7 @@ archprepare:
 
 # Don't use tabs in echo arguments
 define archhelp
-  echo  '* image           - Kernel image for IPL ($(boot)/image)'
-  echo '* bzImage         - Compressed kernel image for IPL ($(boot)/bzImage)'
+  echo '* bzImage         - Kernel image for IPL ($(boot)/bzImage)'
   echo '  install         - Install kernel using'
   echo '                    (your) ~/bin/$(INSTALLKERNEL) or'
   echo '                    (distribution) /sbin/$(INSTALLKERNEL) or'
index ee6a9c387c8796cb8bc035c052d0806f6cec4dae..9bf8489df6e62d00c804189285f71269cb53a634 100644 (file)
@@ -206,35 +206,28 @@ static int
 appldata_timer_handler(struct ctl_table *ctl, int write,
                           void __user *buffer, size_t *lenp, loff_t *ppos)
 {
-       unsigned int len;
-       char buf[2];
+       int timer_active = appldata_timer_active;
+       int zero = 0;
+       int one = 1;
+       int rc;
+       struct ctl_table ctl_entry = {
+               .procname       = ctl->procname,
+               .data           = &timer_active,
+               .maxlen         = sizeof(int),
+               .extra1         = &zero,
+               .extra2         = &one,
+       };
+
+       rc = proc_douintvec_minmax(&ctl_entry, write, buffer, lenp, ppos);
+       if (rc < 0 || !write)
+               return rc;
 
-       if (!*lenp || *ppos) {
-               *lenp = 0;
-               return 0;
-       }
-       if (!write) {
-               strncpy(buf, appldata_timer_active ? "1\n" : "0\n",
-                       ARRAY_SIZE(buf));
-               len = strnlen(buf, ARRAY_SIZE(buf));
-               if (len > *lenp)
-                       len = *lenp;
-               if (copy_to_user(buffer, buf, len))
-                       return -EFAULT;
-               goto out;
-       }
-       len = *lenp;
-       if (copy_from_user(buf, buffer, len > sizeof(buf) ? sizeof(buf) : len))
-               return -EFAULT;
        spin_lock(&appldata_timer_lock);
-       if (buf[0] == '1')
+       if (timer_active)
                __appldata_vtimer_setup(APPLDATA_ADD_TIMER);
-       else if (buf[0] == '0')
+       else
                __appldata_vtimer_setup(APPLDATA_DEL_TIMER);
        spin_unlock(&appldata_timer_lock);
-out:
-       *lenp = len;
-       *ppos += len;
        return 0;
 }
 
@@ -248,37 +241,24 @@ static int
 appldata_interval_handler(struct ctl_table *ctl, int write,
                           void __user *buffer, size_t *lenp, loff_t *ppos)
 {
-       unsigned int len;
-       int interval;
-       char buf[16];
+       int interval = appldata_interval;
+       int one = 1;
+       int rc;
+       struct ctl_table ctl_entry = {
+               .procname       = ctl->procname,
+               .data           = &interval,
+               .maxlen         = sizeof(int),
+               .extra1         = &one,
+       };
 
-       if (!*lenp || *ppos) {
-               *lenp = 0;
-               return 0;
-       }
-       if (!write) {
-               len = sprintf(buf, "%i\n", appldata_interval);
-               if (len > *lenp)
-                       len = *lenp;
-               if (copy_to_user(buffer, buf, len))
-                       return -EFAULT;
-               goto out;
-       }
-       len = *lenp;
-       if (copy_from_user(buf, buffer, len > sizeof(buf) ? sizeof(buf) : len))
-               return -EFAULT;
-       interval = 0;
-       sscanf(buf, "%i", &interval);
-       if (interval <= 0)
-               return -EINVAL;
+       rc = proc_dointvec_minmax(&ctl_entry, write, buffer, lenp, ppos);
+       if (rc < 0 || !write)
+               return rc;
 
        spin_lock(&appldata_timer_lock);
        appldata_interval = interval;
        __appldata_vtimer_setup(APPLDATA_MOD_TIMER);
        spin_unlock(&appldata_timer_lock);
-out:
-       *lenp = len;
-       *ppos += len;
        return 0;
 }
 
@@ -293,10 +273,17 @@ appldata_generic_handler(struct ctl_table *ctl, int write,
                           void __user *buffer, size_t *lenp, loff_t *ppos)
 {
        struct appldata_ops *ops = NULL, *tmp_ops;
-       unsigned int len;
-       int rc, found;
-       char buf[2];
        struct list_head *lh;
+       int rc, found;
+       int active;
+       int zero = 0;
+       int one = 1;
+       struct ctl_table ctl_entry = {
+               .data           = &active,
+               .maxlen         = sizeof(int),
+               .extra1         = &zero,
+               .extra2         = &one,
+       };
 
        found = 0;
        mutex_lock(&appldata_ops_mutex);
@@ -317,31 +304,15 @@ appldata_generic_handler(struct ctl_table *ctl, int write,
        }
        mutex_unlock(&appldata_ops_mutex);
 
-       if (!*lenp || *ppos) {
-               *lenp = 0;
+       active = ops->active;
+       rc = proc_douintvec_minmax(&ctl_entry, write, buffer, lenp, ppos);
+       if (rc < 0 || !write) {
                module_put(ops->owner);
-               return 0;
-       }
-       if (!write) {
-               strncpy(buf, ops->active ? "1\n" : "0\n", ARRAY_SIZE(buf));
-               len = strnlen(buf, ARRAY_SIZE(buf));
-               if (len > *lenp)
-                       len = *lenp;
-               if (copy_to_user(buffer, buf, len)) {
-                       module_put(ops->owner);
-                       return -EFAULT;
-               }
-               goto out;
-       }
-       len = *lenp;
-       if (copy_from_user(buf, buffer,
-                          len > sizeof(buf) ? sizeof(buf) : len)) {
-               module_put(ops->owner);
-               return -EFAULT;
+               return rc;
        }
 
        mutex_lock(&appldata_ops_mutex);
-       if ((buf[0] == '1') && (ops->active == 0)) {
+       if (active && (ops->active == 0)) {
                // protect work queue callback
                if (!try_module_get(ops->owner)) {
                        mutex_unlock(&appldata_ops_mutex);
@@ -359,7 +330,7 @@ appldata_generic_handler(struct ctl_table *ctl, int write,
                        module_put(ops->owner);
                } else
                        ops->active = 1;
-       } else if ((buf[0] == '0') && (ops->active == 1)) {
+       } else if (!active && (ops->active == 1)) {
                ops->active = 0;
                rc = appldata_diag(ops->record_nr, APPLDATA_STOP_REC,
                                (unsigned long) ops->data, ops->size,
@@ -370,9 +341,6 @@ appldata_generic_handler(struct ctl_table *ctl, int write,
                module_put(ops->owner);
        }
        mutex_unlock(&appldata_ops_mutex);
-out:
-       *lenp = len;
-       *ppos += len;
        module_put(ops->owner);
        return 0;
 }
index d1fa37fcce833887850d5891970e23f0e65375a1..9e6668ee93de83122fbfc1c3ade9ab9b0519d87c 100644 (file)
@@ -3,19 +3,52 @@
 # Makefile for the linux s390-specific parts of the memory manager.
 #
 
-targets := image
-targets += bzImage
-subdir- := compressed
+KCOV_INSTRUMENT := n
+GCOV_PROFILE := n
+UBSAN_SANITIZE := n
 
-$(obj)/image: vmlinux FORCE
-       $(call if_changed,objcopy)
+KBUILD_AFLAGS := $(KBUILD_AFLAGS_DECOMPRESSOR)
+KBUILD_CFLAGS := $(KBUILD_CFLAGS_DECOMPRESSOR)
+
+#
+# Use -march=z900 for als.c to be able to print an error
+# message if the kernel is started on a machine which is too old
+#
+ifneq ($(CC_FLAGS_MARCH),-march=z900)
+AFLAGS_REMOVE_head.o           += $(CC_FLAGS_MARCH)
+AFLAGS_head.o                  += -march=z900
+AFLAGS_REMOVE_mem.o            += $(CC_FLAGS_MARCH)
+AFLAGS_mem.o                   += -march=z900
+CFLAGS_REMOVE_als.o            += $(CC_FLAGS_MARCH)
+CFLAGS_als.o                   += -march=z900
+CFLAGS_REMOVE_sclp_early_core.o        += $(CC_FLAGS_MARCH)
+CFLAGS_sclp_early_core.o       += -march=z900
+endif
+
+CFLAGS_sclp_early_core.o += -I$(srctree)/drivers/s390/char
+
+obj-y  := head.o als.o ebcdic.o sclp_early_core.o mem.o
+targets        := bzImage startup.a $(obj-y)
+subdir-        := compressed
+
+OBJECTS := $(addprefix $(obj)/,$(obj-y))
 
 $(obj)/bzImage: $(obj)/compressed/vmlinux FORCE
        $(call if_changed,objcopy)
 
-$(obj)/compressed/vmlinux: FORCE
+$(obj)/compressed/vmlinux: $(obj)/startup.a FORCE
        $(Q)$(MAKE) $(build)=$(obj)/compressed $@
 
+quiet_cmd_ar = AR      $@
+      cmd_ar = rm -f $@; $(AR) rcsTP$(KBUILD_ARFLAGS) $@ $(filter $(OBJECTS), $^)
+
+$(obj)/startup.a: $(OBJECTS) FORCE
+       $(call if_changed,ar)
+
 install: $(CONFIGURE) $(obj)/bzImage
        sh -x  $(srctree)/$(obj)/install.sh $(KERNELRELEASE) $(obj)/bzImage \
              System.map "$(INSTALL_PATH)"
+
+chkbss := $(OBJECTS)
+chkbss-target := $(obj)/startup.a
+include $(srctree)/arch/s390/scripts/Makefile.chkbss
similarity index 83%
rename from arch/s390/kernel/als.c
rename to arch/s390/boot/als.c
index d1892bf36cabfa8166aec5c332918ad885f71b35..d592e0d90d9fbbae85e41719674467e725b54f67 100644 (file)
@@ -3,12 +3,10 @@
  *    Copyright IBM Corp. 2016
  */
 #include <linux/kernel.h>
-#include <linux/init.h>
 #include <asm/processor.h>
 #include <asm/facility.h>
 #include <asm/lowcore.h>
 #include <asm/sclp.h>
-#include "entry.h"
 
 /*
  * The code within this file will be called very early. It may _not_
@@ -18,9 +16,9 @@
  * For temporary objects the stack (16k) should be used.
  */
 
-static unsigned long als[] __initdata = { FACILITIES_ALS };
+static unsigned long als[] = { FACILITIES_ALS };
 
-static void __init u16_to_hex(char *str, u16 val)
+static void u16_to_hex(char *str, u16 val)
 {
        int i, num;
 
@@ -33,9 +31,9 @@ static void __init u16_to_hex(char *str, u16 val)
        *str = '\0';
 }
 
-static void __init print_machine_type(void)
+static void print_machine_type(void)
 {
-       static char mach_str[80] __initdata = "Detected machine-type number: ";
+       static char mach_str[80] = "Detected machine-type number: ";
        char type_str[5];
        struct cpuid id;
 
@@ -46,7 +44,7 @@ static void __init print_machine_type(void)
        sclp_early_printk(mach_str);
 }
 
-static void __init u16_to_decimal(char *str, u16 val)
+static void u16_to_decimal(char *str, u16 val)
 {
        int div = 1;
 
@@ -60,9 +58,9 @@ static void __init u16_to_decimal(char *str, u16 val)
        *str = '\0';
 }
 
-static void __init print_missing_facilities(void)
+static void print_missing_facilities(void)
 {
-       static char als_str[80] __initdata = "Missing facilities: ";
+       static char als_str[80] = "Missing facilities: ";
        unsigned long val;
        char val_str[6];
        int i, j, first;
@@ -95,7 +93,7 @@ static void __init print_missing_facilities(void)
        sclp_early_printk("See Principles of Operations for facility bits\n");
 }
 
-static void __init facility_mismatch(void)
+static void facility_mismatch(void)
 {
        sclp_early_printk("The Linux kernel requires more recent processor hardware\n");
        print_machine_type();
@@ -103,7 +101,7 @@ static void __init facility_mismatch(void)
        disabled_wait(0x8badcccc);
 }
 
-void __init verify_facilities(void)
+void verify_facilities(void)
 {
        int i;
 
index 2088cc14062911752b034486ce2712bdd930b6b4..45aeb4f087520e643091021ffdcdca3ee8de44a0 100644 (file)
@@ -1,4 +1,5 @@
 sizes.h
 vmlinux
 vmlinux.lds
+vmlinux.scr.lds
 vmlinux.bin.full
index 5766f7b9b2710e11c8676eb9dc13e3b2bd8969be..04609478d18b99303909a01d7b313df8c799873b 100644 (file)
@@ -6,39 +6,29 @@
 #
 
 KCOV_INSTRUMENT := n
+GCOV_PROFILE := n
+UBSAN_SANITIZE := n
 
+obj-y  := $(if $(CONFIG_KERNEL_UNCOMPRESSED),,head.o misc.o) piggy.o
 targets        := vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2
 targets += vmlinux.bin.xz vmlinux.bin.lzma vmlinux.bin.lzo vmlinux.bin.lz4
-targets += misc.o piggy.o sizes.h head.o
-
-KBUILD_CFLAGS := -m64 -D__KERNEL__ -O2
-KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING -D__NO_FORTIFY
-KBUILD_CFLAGS += $(cflags-y) -fno-delete-null-pointer-checks -msoft-float
-KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
-KBUILD_CFLAGS += $(call cc-option,-mpacked-stack)
-KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
+targets += vmlinux.scr.lds $(obj-y) $(if $(CONFIG_KERNEL_UNCOMPRESSED),,sizes.h)
 
-GCOV_PROFILE := n
-UBSAN_SANITIZE := n
+KBUILD_AFLAGS := $(KBUILD_AFLAGS_DECOMPRESSOR)
+KBUILD_CFLAGS := $(KBUILD_CFLAGS_DECOMPRESSOR)
 
-OBJECTS := $(addprefix $(objtree)/arch/s390/kernel/, head.o ebcdic.o als.o)
-OBJECTS += $(objtree)/drivers/s390/char/sclp_early_core.o
-OBJECTS += $(obj)/head.o $(obj)/misc.o $(obj)/piggy.o
+OBJECTS := $(addprefix $(obj)/,$(obj-y))
 
 LDFLAGS_vmlinux := --oformat $(LD_BFD) -e startup -T
-$(obj)/vmlinux: $(obj)/vmlinux.lds $(OBJECTS)
+$(obj)/vmlinux: $(obj)/vmlinux.lds $(objtree)/arch/s390/boot/startup.a $(OBJECTS)
        $(call if_changed,ld)
 
-TRIM_HEAD_SIZE := 0x11000
-
-sed-sizes := -e 's/^\([0-9a-fA-F]*\) . \(__bss_start\|_end\)$$/\#define SZ\2 (0x\1 - $(TRIM_HEAD_SIZE))/p'
+# extract required uncompressed vmlinux symbols and adjust them to reflect offsets inside vmlinux.bin
+sed-sizes := -e 's/^\([0-9a-fA-F]*\) . \(__bss_start\|_end\)$$/\#define SZ\2 (0x\1 - 0x100000)/p'
 
 quiet_cmd_sizes = GEN     $@
       cmd_sizes = $(NM) $< | sed -n $(sed-sizes) > $@
 
-quiet_cmd_trim_head = TRIM    $@
-      cmd_trim_head = tail -c +$$(($(TRIM_HEAD_SIZE) + 1)) $< > $@
-
 $(obj)/sizes.h: vmlinux
        $(call if_changed,sizes)
 
@@ -48,21 +38,18 @@ $(obj)/head.o: $(obj)/sizes.h
 CFLAGS_misc.o += -I$(objtree)/$(obj)
 $(obj)/misc.o: $(obj)/sizes.h
 
-OBJCOPYFLAGS_vmlinux.bin.full :=  -R .comment -S
-$(obj)/vmlinux.bin.full: vmlinux
+OBJCOPYFLAGS_vmlinux.bin :=  -R .comment -S
+$(obj)/vmlinux.bin: vmlinux
        $(call if_changed,objcopy)
 
-$(obj)/vmlinux.bin: $(obj)/vmlinux.bin.full
-       $(call if_changed,trim_head)
-
 vmlinux.bin.all-y := $(obj)/vmlinux.bin
 
-suffix-$(CONFIG_KERNEL_GZIP)  := gz
-suffix-$(CONFIG_KERNEL_BZIP2) := bz2
-suffix-$(CONFIG_KERNEL_LZ4)  := lz4
-suffix-$(CONFIG_KERNEL_LZMA)  := lzma
-suffix-$(CONFIG_KERNEL_LZO)  := lzo
-suffix-$(CONFIG_KERNEL_XZ)  := xz
+suffix-$(CONFIG_KERNEL_GZIP)  := .gz
+suffix-$(CONFIG_KERNEL_BZIP2) := .bz2
+suffix-$(CONFIG_KERNEL_LZ4)  := .lz4
+suffix-$(CONFIG_KERNEL_LZMA)  := .lzma
+suffix-$(CONFIG_KERNEL_LZO)  := .lzo
+suffix-$(CONFIG_KERNEL_XZ)  := .xz
 
 $(obj)/vmlinux.bin.gz: $(vmlinux.bin.all-y)
        $(call if_changed,gzip)
@@ -78,5 +65,9 @@ $(obj)/vmlinux.bin.xz: $(vmlinux.bin.all-y)
        $(call if_changed,xzkern)
 
 LDFLAGS_piggy.o := -r --format binary --oformat $(LD_BFD) -T
-$(obj)/piggy.o: $(obj)/vmlinux.scr $(obj)/vmlinux.bin.$(suffix-y)
+$(obj)/piggy.o: $(obj)/vmlinux.scr.lds $(obj)/vmlinux.bin$(suffix-y)
        $(call if_changed,ld)
+
+chkbss := $(filter-out $(obj)/misc.o $(obj)/piggy.o,$(OBJECTS))
+chkbss-target := $(obj)/vmlinux.bin
+include $(srctree)/arch/s390/scripts/Makefile.chkbss
index 9f94eca0f467c8bd6aad88b9140e37b96ecb71a6..df8dbbc17bccdd26a9de71ef251c9662de7dc6bd 100644 (file)
@@ -15,7 +15,7 @@
 #include "sizes.h"
 
 __HEAD
-ENTRY(startup_continue)
+ENTRY(startup_decompressor)
        basr    %r13,0                  # get base
 .LPG1:
        # setup stack
@@ -23,7 +23,7 @@ ENTRY(startup_continue)
        aghi    %r15,-160
        brasl   %r14,decompress_kernel
        # Set up registers for memory mover. We move the decompressed image to
-       # 0x11000, where startup_continue of the decompressed image is supposed
+       # 0x100000, where startup_continue of the decompressed image is supposed
        # to be.
        lgr     %r4,%r2
        lg      %r2,.Loffset-.LPG1(%r13)
@@ -33,7 +33,7 @@ ENTRY(startup_continue)
        la      %r1,0x200
        mvc     0(mover_end-mover,%r1),mover-.LPG1(%r13)
        # When the memory mover is done we pass control to
-       # arch/s390/kernel/head64.S:startup_continue which lives at 0x11000 in
+       # arch/s390/kernel/head64.S:startup_continue which lives at 0x100000 in
        # the decompressed image.
        lgr     %r6,%r2
        br      %r1
@@ -47,6 +47,6 @@ mover_end:
 .Lstack:
        .quad   0x8000 + (1<<(PAGE_SHIFT+THREAD_SIZE_ORDER))
 .Loffset:
-       .quad   0x11000
+       .quad   0x100000
 .Lmvsize:
        .quad   SZ__bss_start
index 511b2cc9b91ad4bcac34e0ee7268bd59eb26d472..f66ad73c205b79889554f3ccae735e102df617df 100644 (file)
@@ -71,43 +71,6 @@ static int puts(const char *s)
        return 0;
 }
 
-void *memset(void *s, int c, size_t n)
-{
-       char *xs;
-
-       xs = s;
-       while (n--)
-               *xs++ = c;
-       return s;
-}
-
-void *memcpy(void *dest, const void *src, size_t n)
-{
-       const char *s = src;
-       char *d = dest;
-
-       while (n--)
-               *d++ = *s++;
-       return dest;
-}
-
-void *memmove(void *dest, const void *src, size_t n)
-{
-       const char *s = src;
-       char *d = dest;
-
-       if (d <= s) {
-               while (n--)
-                       *d++ = *s++;
-       } else {
-               d += n;
-               s += n;
-               while (n--)
-                       *--d = *--s;
-       }
-       return dest;
-}
-
 static void error(char *x)
 {
        unsigned long long psw = 0x000a0000deadbeefULL;
index d43c2db12d306bd744eaa78df6405b3e0f905183..b16ac8b3c439390e35fa83ccb8ba17e35d3e2901 100644 (file)
@@ -23,13 +23,10 @@ SECTIONS
                *(.text.*)
                _etext = . ;
        }
-       .rodata.compressed : {
-               *(.rodata.compressed)
-       }
        .rodata : {
                _rodata = . ;
                *(.rodata)       /* read-only data */
-               *(.rodata.*)
+               *(EXCLUDE_FILE (*piggy.o) .rodata.compressed)
                _erodata = . ;
        }
        .data : {
@@ -38,6 +35,15 @@ SECTIONS
                *(.data.*)
                _edata = . ;
        }
+       startup_continue = 0x100000;
+#ifdef CONFIG_KERNEL_UNCOMPRESSED
+       . = 0x100000;
+#else
+       . = ALIGN(8);
+#endif
+       .rodata.compressed : {
+               *(.rodata.compressed)
+       }
        . = ALIGN(256);
        .bss : {
                _bss = . ;
@@ -54,5 +60,6 @@ SECTIONS
                *(.eh_frame)
                *(__ex_table)
                *(*__ksymtab*)
+               *(___kcrctab*)
        }
 }
similarity index 70%
rename from arch/s390/boot/compressed/vmlinux.scr
rename to arch/s390/boot/compressed/vmlinux.scr.lds.S
index 42a242597f346345c50d9be5422e74db79ad8496..ff01d18c922205d08e9f7afed0b0a66401b431f6 100644 (file)
@@ -2,10 +2,14 @@
 SECTIONS
 {
   .rodata.compressed : {
+#ifndef CONFIG_KERNEL_UNCOMPRESSED
        input_len = .;
        LONG(input_data_end - input_data) input_data = .;
+#endif
        *(.data)
+#ifndef CONFIG_KERNEL_UNCOMPRESSED
        output_len = . - 4;
        input_data_end = .;
+#endif
        }
 }
diff --git a/arch/s390/boot/ebcdic.c b/arch/s390/boot/ebcdic.c
new file mode 100644 (file)
index 0000000..7391e7d
--- /dev/null
@@ -0,0 +1,2 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "../kernel/ebcdic.c"
similarity index 98%
rename from arch/s390/kernel/head.S
rename to arch/s390/boot/head.S
index 5c42f16a54c4e1eece462172af417918dcc0a6ee..f721913b73f10c10c7a89d9a400dacb8ff42902a 100644 (file)
@@ -272,14 +272,14 @@ iplstart:
        .org    0x10000
 ENTRY(startup)
        j       .Lep_startup_normal
-       .org    0x10008
+       .org    EP_OFFSET
 #
 # This is a list of s390 kernel entry points. At address 0x1000f the number of
 # valid entry points is stored.
 #
 # IMPORTANT: Do not change this table, it is s390 kernel ABI!
 #
-       .ascii  "S390EP"
+       .ascii  EP_STRING
        .byte   0x00,0x01
 #
 # kdump startup-code at 0x10010, running in 64 bit absolute addressing mode
@@ -310,10 +310,11 @@ ENTRY(startup_kdump)
        l       %r15,.Lstack-.LPG0(%r13)
        ahi     %r15,-STACK_FRAME_OVERHEAD
        brasl   %r14,verify_facilities
-# For uncompressed images, continue in
-# arch/s390/kernel/head64.S. For compressed images, continue in
-# arch/s390/boot/compressed/head.S.
+#ifdef CONFIG_KERNEL_UNCOMPRESSED
        jg      startup_continue
+#else
+       jg      startup_decompressor
+#endif
 
 .Lstack:
        .long   0x8000 + (1<<(PAGE_SHIFT+THREAD_SIZE_ORDER))
diff --git a/arch/s390/boot/mem.S b/arch/s390/boot/mem.S
new file mode 100644 (file)
index 0000000..b334636
--- /dev/null
@@ -0,0 +1,2 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include "../lib/mem.S"
diff --git a/arch/s390/boot/sclp_early_core.c b/arch/s390/boot/sclp_early_core.c
new file mode 100644 (file)
index 0000000..5a19fd7
--- /dev/null
@@ -0,0 +1,2 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "../../../drivers/s390/char/sclp_early_core.c"
index a2945b289a2928b5fd94b2ad0a85a93920423ed7..3452e18bb1ca8a241677cab4fd761f13758531dd 100644 (file)
@@ -497,7 +497,7 @@ static int hypfs_create_cpu_files(struct dentry *cpus_dir, void *cpu_info)
        }
        diag224_idx2name(cpu_info__ctidx(diag204_info_type, cpu_info), buffer);
        rc = hypfs_create_str(cpu_dir, "type", buffer);
-       return PTR_RET(rc);
+       return PTR_ERR_OR_ZERO(rc);
 }
 
 static void *hypfs_create_lpar_files(struct dentry *systems_dir, void *part_hdr)
@@ -544,7 +544,7 @@ static int hypfs_create_phys_cpu_files(struct dentry *cpus_dir, void *cpu_info)
                return PTR_ERR(rc);
        diag224_idx2name(phys_cpu__ctidx(diag204_info_type, cpu_info), buffer);
        rc = hypfs_create_str(cpu_dir, "type", buffer);
-       return PTR_RET(rc);
+       return PTR_ERR_OR_ZERO(rc);
 }
 
 static void *hypfs_create_phys_files(struct dentry *parent_dir, void *phys_hdr)
index 06b513d192b9bc5cd3e589a7a220d126744cadf4..c681329fdeec6ba42c95cb321b4ea8ae23b0df06 100644 (file)
@@ -36,7 +36,7 @@ struct hypfs_sb_info {
        kuid_t uid;                     /* uid used for files and dirs */
        kgid_t gid;                     /* gid used for files and dirs */
        struct dentry *update_file;     /* file to trigger update */
-       time_t last_update;             /* last update time in secs since 1970 */
+       time64_t last_update;           /* last update, CLOCK_MONOTONIC time */
        struct mutex lock;              /* lock to protect update process */
 };
 
@@ -52,7 +52,7 @@ static void hypfs_update_update(struct super_block *sb)
        struct hypfs_sb_info *sb_info = sb->s_fs_info;
        struct inode *inode = d_inode(sb_info->update_file);
 
-       sb_info->last_update = get_seconds();
+       sb_info->last_update = ktime_get_seconds();
        inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
 }
 
@@ -179,7 +179,7 @@ static ssize_t hypfs_write_iter(struct kiocb *iocb, struct iov_iter *from)
         *    to restart data collection in this case.
         */
        mutex_lock(&fs_info->lock);
-       if (fs_info->last_update == get_seconds()) {
+       if (fs_info->last_update == ktime_get_seconds()) {
                rc = -EBUSY;
                goto out;
        }
index c1bedb4c8de083fa3fa83e874044b12bf4897e8e..046e044a48d096890067560f4c80e0cafa2879dc 100644 (file)
@@ -46,6 +46,50 @@ struct ap_queue_status {
        unsigned int _pad2              : 16;
 };
 
+/**
+ * ap_intructions_available() - Test if AP instructions are available.
+ *
+ * Returns 0 if the AP instructions are installed.
+ */
+static inline int ap_instructions_available(void)
+{
+       register unsigned long reg0 asm ("0") = AP_MKQID(0, 0);
+       register unsigned long reg1 asm ("1") = -ENODEV;
+       register unsigned long reg2 asm ("2");
+
+       asm volatile(
+               "   .long 0xb2af0000\n"         /* PQAP(TAPQ) */
+               "0: la    %0,0\n"
+               "1:\n"
+               EX_TABLE(0b, 1b)
+               : "+d" (reg1), "=d" (reg2)
+               : "d" (reg0)
+               : "cc");
+       return reg1;
+}
+
+/**
+ * ap_tapq(): Test adjunct processor queue.
+ * @qid: The AP queue number
+ * @info: Pointer to queue descriptor
+ *
+ * Returns AP queue status structure.
+ */
+static inline struct ap_queue_status ap_tapq(ap_qid_t qid, unsigned long *info)
+{
+       register unsigned long reg0 asm ("0") = qid;
+       register struct ap_queue_status reg1 asm ("1");
+       register unsigned long reg2 asm ("2");
+
+       asm volatile(".long 0xb2af0000"         /* PQAP(TAPQ) */
+                    : "=d" (reg1), "=d" (reg2)
+                    : "d" (reg0)
+                    : "cc");
+       if (info)
+               *info = reg2;
+       return reg1;
+}
+
 /**
  * ap_test_queue(): Test adjunct processor queue.
  * @qid: The AP queue number
@@ -54,10 +98,57 @@ struct ap_queue_status {
  *
  * Returns AP queue status structure.
  */
-struct ap_queue_status ap_test_queue(ap_qid_t qid,
-                                    int tbit,
-                                    unsigned long *info);
+static inline struct ap_queue_status ap_test_queue(ap_qid_t qid,
+                                                  int tbit,
+                                                  unsigned long *info)
+{
+       if (tbit)
+               qid |= 1UL << 23; /* set T bit*/
+       return ap_tapq(qid, info);
+}
 
+/**
+ * ap_pqap_rapq(): Reset adjunct processor queue.
+ * @qid: The AP queue number
+ *
+ * Returns AP queue status structure.
+ */
+static inline struct ap_queue_status ap_rapq(ap_qid_t qid)
+{
+       register unsigned long reg0 asm ("0") = qid | (1UL << 24);
+       register struct ap_queue_status reg1 asm ("1");
+
+       asm volatile(
+               ".long 0xb2af0000"              /* PQAP(RAPQ) */
+               : "=d" (reg1)
+               : "d" (reg0)
+               : "cc");
+       return reg1;
+}
+
+/**
+ * ap_pqap_zapq(): Reset and zeroize adjunct processor queue.
+ * @qid: The AP queue number
+ *
+ * Returns AP queue status structure.
+ */
+static inline struct ap_queue_status ap_zapq(ap_qid_t qid)
+{
+       register unsigned long reg0 asm ("0") = qid | (2UL << 24);
+       register struct ap_queue_status reg1 asm ("1");
+
+       asm volatile(
+               ".long 0xb2af0000"              /* PQAP(ZAPQ) */
+               : "=d" (reg1)
+               : "d" (reg0)
+               : "cc");
+       return reg1;
+}
+
+/**
+ * struct ap_config_info - convenience struct for AP crypto
+ * config info as returned by the ap_qci() function.
+ */
 struct ap_config_info {
        unsigned int apsc        : 1;   /* S bit */
        unsigned int apxa        : 1;   /* N bit */
@@ -74,50 +165,189 @@ struct ap_config_info {
        unsigned char _reserved4[16];
 } __aligned(8);
 
-/*
- * ap_query_configuration(): Fetch cryptographic config info
+/**
+ * ap_qci(): Get AP configuration data
  *
- * Returns the ap configuration info fetched via PQAP(QCI).
- * On success 0 is returned, on failure a negative errno
- * is returned, e.g. if the PQAP(QCI) instruction is not
- * available, the return value will be -EOPNOTSUPP.
+ * Returns 0 on success, or -EOPNOTSUPP.
  */
-int ap_query_configuration(struct ap_config_info *info);
+static inline int ap_qci(struct ap_config_info *config)
+{
+       register unsigned long reg0 asm ("0") = 4UL << 24;
+       register unsigned long reg1 asm ("1") = -EOPNOTSUPP;
+       register struct ap_config_info *reg2 asm ("2") = config;
+
+       asm volatile(
+               ".long 0xb2af0000\n"            /* PQAP(QCI) */
+               "0: la    %0,0\n"
+               "1:\n"
+               EX_TABLE(0b, 1b)
+               : "+d" (reg1)
+               : "d" (reg0), "d" (reg2)
+               : "cc", "memory");
+
+       return reg1;
+}
 
 /*
  * struct ap_qirq_ctrl - convenient struct for easy invocation
- * of the ap_queue_irq_ctrl() function. This struct is passed
- * as GR1 parameter to the PQAP(AQIC) instruction. For details
- * please see the AR documentation.
+ * of the ap_aqic() function. This struct is passed as GR1
+ * parameter to the PQAP(AQIC) instruction. For details please
+ * see the AR documentation.
  */
 struct ap_qirq_ctrl {
        unsigned int _res1 : 8;
-       unsigned int zone  : 8;  /* zone info */
-       unsigned int ir    : 1;  /* ir flag: enable (1) or disable (0) irq */
+       unsigned int zone  : 8; /* zone info */
+       unsigned int ir    : 1; /* ir flag: enable (1) or disable (0) irq */
        unsigned int _res2 : 4;
-       unsigned int gisc  : 3;  /* guest isc field */
+       unsigned int gisc  : 3; /* guest isc field */
        unsigned int _res3 : 6;
-       unsigned int gf    : 2;  /* gisa format */
+       unsigned int gf    : 2; /* gisa format */
        unsigned int _res4 : 1;
-       unsigned int gisa  : 27; /* gisa origin */
+       unsigned int gisa  : 27;        /* gisa origin */
        unsigned int _res5 : 1;
-       unsigned int isc   : 3;  /* irq sub class */
+       unsigned int isc   : 3; /* irq sub class */
 };
 
 /**
- * ap_queue_irq_ctrl(): Control interruption on a AP queue.
+ * ap_aqic(): Control interruption for a specific AP.
  * @qid: The AP queue number
- * @qirqctrl: struct ap_qirq_ctrl, see above
+ * @qirqctrl: struct ap_qirq_ctrl (64 bit value)
  * @ind: The notification indicator byte
  *
  * Returns AP queue status.
+ */
+static inline struct ap_queue_status ap_aqic(ap_qid_t qid,
+                                            struct ap_qirq_ctrl qirqctrl,
+                                            void *ind)
+{
+       register unsigned long reg0 asm ("0") = qid | (3UL << 24);
+       register struct ap_qirq_ctrl reg1_in asm ("1") = qirqctrl;
+       register struct ap_queue_status reg1_out asm ("1");
+       register void *reg2 asm ("2") = ind;
+
+       asm volatile(
+               ".long 0xb2af0000"              /* PQAP(AQIC) */
+               : "=d" (reg1_out)
+               : "d" (reg0), "d" (reg1_in), "d" (reg2)
+               : "cc");
+       return reg1_out;
+}
+
+/*
+ * union ap_qact_ap_info - used together with the
+ * ap_aqic() function to provide a convenient way
+ * to handle the ap info needed by the qact function.
+ */
+union ap_qact_ap_info {
+       unsigned long val;
+       struct {
+               unsigned int      : 3;
+               unsigned int mode : 3;
+               unsigned int      : 26;
+               unsigned int cat  : 8;
+               unsigned int      : 8;
+               unsigned char ver[2];
+       };
+};
+
+/**
+ * ap_qact(): Query AP combatibility type.
+ * @qid: The AP queue number
+ * @apinfo: On input the info about the AP queue. On output the
+ *         alternate AP queue info provided by the qact function
+ *         in GR2 is stored in.
  *
- * Control interruption on the given AP queue.
- * Just a simple wrapper function for the low level PQAP(AQIC)
- * instruction available for other kernel modules.
+ * Returns AP queue status. Check response_code field for failures.
  */
-struct ap_queue_status ap_queue_irq_ctrl(ap_qid_t qid,
-                                        struct ap_qirq_ctrl qirqctrl,
-                                        void *ind);
+static inline struct ap_queue_status ap_qact(ap_qid_t qid, int ifbit,
+                                            union ap_qact_ap_info *apinfo)
+{
+       register unsigned long reg0 asm ("0") = qid | (5UL << 24)
+               | ((ifbit & 0x01) << 22);
+       register unsigned long reg1_in asm ("1") = apinfo->val;
+       register struct ap_queue_status reg1_out asm ("1");
+       register unsigned long reg2 asm ("2");
+
+       asm volatile(
+               ".long 0xb2af0000"              /* PQAP(QACT) */
+               : "+d" (reg1_in), "=d" (reg1_out), "=d" (reg2)
+               : "d" (reg0)
+               : "cc");
+       apinfo->val = reg2;
+       return reg1_out;
+}
+
+/**
+ * ap_nqap(): Send message to adjunct processor queue.
+ * @qid: The AP queue number
+ * @psmid: The program supplied message identifier
+ * @msg: The message text
+ * @length: The message length
+ *
+ * Returns AP queue status structure.
+ * Condition code 1 on NQAP can't happen because the L bit is 1.
+ * Condition code 2 on NQAP also means the send is incomplete,
+ * because a segment boundary was reached. The NQAP is repeated.
+ */
+static inline struct ap_queue_status ap_nqap(ap_qid_t qid,
+                                            unsigned long long psmid,
+                                            void *msg, size_t length)
+{
+       register unsigned long reg0 asm ("0") = qid | 0x40000000UL;
+       register struct ap_queue_status reg1 asm ("1");
+       register unsigned long reg2 asm ("2") = (unsigned long) msg;
+       register unsigned long reg3 asm ("3") = (unsigned long) length;
+       register unsigned long reg4 asm ("4") = (unsigned int) (psmid >> 32);
+       register unsigned long reg5 asm ("5") = psmid & 0xffffffff;
+
+       asm volatile (
+               "0: .long 0xb2ad0042\n"         /* NQAP */
+               "   brc   2,0b"
+               : "+d" (reg0), "=d" (reg1), "+d" (reg2), "+d" (reg3)
+               : "d" (reg4), "d" (reg5)
+               : "cc", "memory");
+       return reg1;
+}
+
+/**
+ * ap_dqap(): Receive message from adjunct processor queue.
+ * @qid: The AP queue number
+ * @psmid: Pointer to program supplied message identifier
+ * @msg: The message text
+ * @length: The message length
+ *
+ * Returns AP queue status structure.
+ * Condition code 1 on DQAP means the receive has taken place
+ * but only partially. The response is incomplete, hence the
+ * DQAP is repeated.
+ * Condition code 2 on DQAP also means the receive is incomplete,
+ * this time because a segment boundary was reached. Again, the
+ * DQAP is repeated.
+ * Note that gpr2 is used by the DQAP instruction to keep track of
+ * any 'residual' length, in case the instruction gets interrupted.
+ * Hence it gets zeroed before the instruction.
+ */
+static inline struct ap_queue_status ap_dqap(ap_qid_t qid,
+                                            unsigned long long *psmid,
+                                            void *msg, size_t length)
+{
+       register unsigned long reg0 asm("0") = qid | 0x80000000UL;
+       register struct ap_queue_status reg1 asm ("1");
+       register unsigned long reg2 asm("2") = 0UL;
+       register unsigned long reg4 asm("4") = (unsigned long) msg;
+       register unsigned long reg5 asm("5") = (unsigned long) length;
+       register unsigned long reg6 asm("6") = 0UL;
+       register unsigned long reg7 asm("7") = 0UL;
+
+
+       asm volatile(
+               "0: .long 0xb2ae0064\n"         /* DQAP */
+               "   brc   6,0b\n"
+               : "+d" (reg0), "=d" (reg1), "+d" (reg2),
+                 "+d" (reg4), "+d" (reg5), "+d" (reg6), "+d" (reg7)
+               : : "cc", "memory");
+       *psmid = (((unsigned long long) reg6) << 32) + reg7;
+       return reg1;
+}
 
 #endif /* _ASM_S390_AP_H_ */
index 4b55532f15c4252e6ea698154f605a0412edd698..fd20ab5d4cf703ace88de31d8ee43363fa54c8ed 100644 (file)
@@ -55,17 +55,9 @@ static inline void atomic_add(int i, atomic_t *v)
        __atomic_add(i, &v->counter);
 }
 
-#define atomic_add_negative(_i, _v)    (atomic_add_return(_i, _v) < 0)
-#define atomic_inc(_v)                 atomic_add(1, _v)
-#define atomic_inc_return(_v)          atomic_add_return(1, _v)
-#define atomic_inc_and_test(_v)                (atomic_add_return(1, _v) == 0)
 #define atomic_sub(_i, _v)             atomic_add(-(int)(_i), _v)
 #define atomic_sub_return(_i, _v)      atomic_add_return(-(int)(_i), _v)
 #define atomic_fetch_sub(_i, _v)       atomic_fetch_add(-(int)(_i), _v)
-#define atomic_sub_and_test(_i, _v)    (atomic_sub_return(_i, _v) == 0)
-#define atomic_dec(_v)                 atomic_sub(1, _v)
-#define atomic_dec_return(_v)          atomic_sub_return(1, _v)
-#define atomic_dec_and_test(_v)                (atomic_sub_return(1, _v) == 0)
 
 #define ATOMIC_OPS(op)                                                 \
 static inline void atomic_##op(int i, atomic_t *v)                     \
@@ -90,21 +82,6 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
        return __atomic_cmpxchg(&v->counter, old, new);
 }
 
-static inline int __atomic_add_unless(atomic_t *v, int a, int u)
-{
-       int c, old;
-       c = atomic_read(v);
-       for (;;) {
-               if (unlikely(c == u))
-                       break;
-               old = atomic_cmpxchg(v, c, c + a);
-               if (likely(old == c))
-                       break;
-               c = old;
-       }
-       return c;
-}
-
 #define ATOMIC64_INIT(i)  { (i) }
 
 static inline long atomic64_read(const atomic64_t *v)
@@ -168,50 +145,8 @@ ATOMIC64_OPS(xor)
 
 #undef ATOMIC64_OPS
 
-static inline int atomic64_add_unless(atomic64_t *v, long i, long u)
-{
-       long c, old;
-
-       c = atomic64_read(v);
-       for (;;) {
-               if (unlikely(c == u))
-                       break;
-               old = atomic64_cmpxchg(v, c, c + i);
-               if (likely(old == c))
-                       break;
-               c = old;
-       }
-       return c != u;
-}
-
-static inline long atomic64_dec_if_positive(atomic64_t *v)
-{
-       long c, old, dec;
-
-       c = atomic64_read(v);
-       for (;;) {
-               dec = c - 1;
-               if (unlikely(dec < 0))
-                       break;
-               old = atomic64_cmpxchg((v), c, dec);
-               if (likely(old == c))
-                       break;
-               c = old;
-       }
-       return dec;
-}
-
-#define atomic64_add_negative(_i, _v)  (atomic64_add_return(_i, _v) < 0)
-#define atomic64_inc(_v)               atomic64_add(1, _v)
-#define atomic64_inc_return(_v)                atomic64_add_return(1, _v)
-#define atomic64_inc_and_test(_v)      (atomic64_add_return(1, _v) == 0)
 #define atomic64_sub_return(_i, _v)    atomic64_add_return(-(long)(_i), _v)
 #define atomic64_fetch_sub(_i, _v)     atomic64_fetch_add(-(long)(_i), _v)
 #define atomic64_sub(_i, _v)           atomic64_add(-(long)(_i), _v)
-#define atomic64_sub_and_test(_i, _v)  (atomic64_sub_return(_i, _v) == 0)
-#define atomic64_dec(_v)               atomic64_sub(1, _v)
-#define atomic64_dec_return(_v)                atomic64_sub_return(1, _v)
-#define atomic64_dec_and_test(_v)      (atomic64_sub_return(1, _v) == 0)
-#define atomic64_inc_not_zero(v)       atomic64_add_unless((v), 1, 0)
 
 #endif /* __ARCH_S390_ATOMIC__  */
index de023a9a88ca8e35bad7244977af02db2b20c277..bf2cbff926ef8ca60ebccb3c6362ae0323b04503 100644 (file)
@@ -2,7 +2,7 @@
 /*
  * CPU-measurement facilities
  *
- *  Copyright IBM Corp. 2012
+ *  Copyright IBM Corp. 2012, 2018
  *  Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
  *            Jan Glauber <jang@linux.vnet.ibm.com>
  */
@@ -139,8 +139,14 @@ struct hws_trailer_entry {
        unsigned char timestamp[16];     /* 16 - 31 timestamp                 */
        unsigned long long reserved1;    /* 32 -Reserved                      */
        unsigned long long reserved2;    /*                                   */
-       unsigned long long progusage1;   /* 48 - reserved for programming use */
-       unsigned long long progusage2;   /*                                   */
+       union {                          /* 48 - reserved for programming use */
+               struct {
+                       unsigned int clock_base:1; /* in progusage2 */
+                       unsigned long long progusage1:63;
+                       unsigned long long progusage2;
+               };
+               unsigned long long progusage[2];
+       };
 } __packed;
 
 /* Load program parameter */
index 0563fd3e84585769f7acd0f093f30a48eaab9f54..480bb02ccacdd07de17ffda81fae140ee748a505 100644 (file)
@@ -6,36 +6,38 @@
 
 struct css_general_char {
        u64 : 12;
-       u32 dynio : 1;   /* bit 12 */
-       u32 : 4;
-       u32 eadm : 1;    /* bit 17 */
-       u32 : 23;
-       u32 aif : 1;     /* bit 41 */
-       u32 : 3;
-       u32 mcss : 1;    /* bit 45 */
-       u32 fcs : 1;     /* bit 46 */
-       u32 : 1;
-       u32 ext_mb : 1;  /* bit 48 */
-       u32 : 7;
-       u32 aif_tdd : 1; /* bit 56 */
-       u32 : 1;
-       u32 qebsm : 1;   /* bit 58 */
-       u32 : 2;
-       u32 aiv : 1;     /* bit 61 */
-       u32 : 5;
-       u32 aif_osa : 1; /* bit 67 */
-       u32 : 12;
-       u32 eadm_rf : 1; /* bit 80 */
-       u32 : 1;
-       u32 cib : 1;     /* bit 82 */
-       u32 : 5;
-       u32 fcx : 1;     /* bit 88 */
-       u32 : 19;
-       u32 alt_ssi : 1; /* bit 108 */
-       u32 : 1;
-       u32 narf : 1;    /* bit 110 */
-       u32 : 12;
-       u32 util_str : 1;/* bit 123 */
+       u64 dynio : 1;   /* bit 12 */
+       u64 : 4;
+       u64 eadm : 1;    /* bit 17 */
+       u64 : 23;
+       u64 aif : 1;     /* bit 41 */
+       u64 : 3;
+       u64 mcss : 1;    /* bit 45 */
+       u64 fcs : 1;     /* bit 46 */
+       u64 : 1;
+       u64 ext_mb : 1;  /* bit 48 */
+       u64 : 7;
+       u64 aif_tdd : 1; /* bit 56 */
+       u64 : 1;
+       u64 qebsm : 1;   /* bit 58 */
+       u64 : 2;
+       u64 aiv : 1;     /* bit 61 */
+       u64 : 2;
+
+       u64 : 3;
+       u64 aif_osa : 1; /* bit 67 */
+       u64 : 12;
+       u64 eadm_rf : 1; /* bit 80 */
+       u64 : 1;
+       u64 cib : 1;     /* bit 82 */
+       u64 : 5;
+       u64 fcx : 1;     /* bit 88 */
+       u64 : 19;
+       u64 alt_ssi : 1; /* bit 108 */
+       u64 : 1;
+       u64 narf : 1;    /* bit 110 */
+       u64 : 12;
+       u64 util_str : 1;/* bit 123 */
 } __packed;
 
 extern struct css_general_char css_general_characteristics;
index e07cce88dfb0d9baf83cbc257c1b89a818888b0b..fcbd638fb9f4c353523c7a3f1f7dd1b56ecfd0fe 100644 (file)
@@ -9,6 +9,14 @@
 #ifndef _ASM_S390_GMAP_H
 #define _ASM_S390_GMAP_H
 
+/* Generic bits for GMAP notification on DAT table entry changes. */
+#define GMAP_NOTIFY_SHADOW     0x2
+#define GMAP_NOTIFY_MPROT      0x1
+
+/* Status bits only for huge segment entries */
+#define _SEGMENT_ENTRY_GMAP_IN         0x8000  /* invalidation notify bit */
+#define _SEGMENT_ENTRY_GMAP_UC         0x4000  /* dirty (migration) */
+
 /**
  * struct gmap_struct - guest address space
  * @list: list head for the mm->context gmap list
@@ -132,4 +140,6 @@ void gmap_pte_notify(struct mm_struct *, unsigned long addr, pte_t *,
 int gmap_mprotect_notify(struct gmap *, unsigned long start,
                         unsigned long len, int prot);
 
+void gmap_sync_dirty_log_pmd(struct gmap *gmap, unsigned long dirty_bitmap[4],
+                            unsigned long gaddr, unsigned long vmaddr);
 #endif /* _ASM_S390_GMAP_H */
index 9c5fc50204dd636cf9483d18f795c616a3b10b53..2d1afa58a4b6bf92c9fab01c021f7923dc199bc3 100644 (file)
@@ -37,7 +37,10 @@ static inline int prepare_hugepage_range(struct file *file,
        return 0;
 }
 
-#define arch_clear_hugepage_flags(page)                do { } while (0)
+static inline void arch_clear_hugepage_flags(struct page *page)
+{
+       clear_bit(PG_arch_1, &page->flags);
+}
 
 static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
                                  pte_t *ptep, unsigned long sz)
index 13de80cf741c09d94a4996a5ca18d883cbe0eba7..b106aa29bf55c461edb59860d84b8d1b4645c7e8 100644 (file)
@@ -68,8 +68,6 @@ struct kprobe_ctlblk {
        unsigned long kprobe_saved_imask;
        unsigned long kprobe_saved_ctl[3];
        struct prev_kprobe prev_kprobe;
-       struct pt_regs jprobe_saved_regs;
-       kprobe_opcode_t jprobes_stack[MAX_STACK_SIZE];
 };
 
 void arch_remove_kprobe(struct kprobe *p);
index 5bc888841eafe8a478e02d2167cd4744db3626c5..406d940173ab7ad229076e55291e05975b6da0a5 100644 (file)
@@ -185,7 +185,7 @@ struct lowcore {
        /* Transaction abort diagnostic block */
        __u8    pgm_tdb[256];                   /* 0x1800 */
        __u8    pad_0x1900[0x2000-0x1900];      /* 0x1900 */
-} __packed;
+} __packed __aligned(8192);
 
 #define S390_lowcore (*((struct lowcore *) 0))
 
index f5ff9dbad8ac9590a1bbdfc80979bc1c74cf8ede..f31a15044c24a56875661aa6d3e195d75bd9ef9c 100644 (file)
@@ -24,6 +24,8 @@ typedef struct {
        unsigned int uses_skeys:1;
        /* The mmu context uses CMM. */
        unsigned int uses_cmm:1;
+       /* The gmaps associated with this context are allowed to use huge pages. */
+       unsigned int allow_gmap_hpage_1m:1;
 } mm_context_t;
 
 #define INIT_MM_CONTEXT(name)                                             \
index d16bc79c30bbfe216b4d7f662e972f60631f055f..0717ee76885d634cfc10dd0ce790004639737dd2 100644 (file)
@@ -32,6 +32,7 @@ static inline int init_new_context(struct task_struct *tsk,
        mm->context.has_pgste = 0;
        mm->context.uses_skeys = 0;
        mm->context.uses_cmm = 0;
+       mm->context.allow_gmap_hpage_1m = 0;
 #endif
        switch (mm->context.asce_limit) {
        case _REGION2_SIZE:
index a01f81186e86aceade03e7df8f8872d307a0d516..123dac3717b33f35a5810b14f06093d15b30b95e 100644 (file)
@@ -8,7 +8,7 @@
 
 #ifdef __ASSEMBLY__
 
-#ifdef CONFIG_EXPOLINE
+#ifdef CC_USING_EXPOLINE
 
 _LC_BR_R1 = __LC_BR_R1
 
@@ -189,7 +189,7 @@ _LC_BR_R1 = __LC_BR_R1
        .macro BASR_EX rsave,rtarget,ruse=%r1
        basr    \rsave,\rtarget
        .endm
-#endif
+#endif /* CC_USING_EXPOLINE */
 
 #endif /* __ASSEMBLY__ */
 
index 94f8db468c9b46c2cae33251e7e73fa49260c4f4..10fe982f2b4bf9ad1479bee371ecb89ddaaaca9c 100644 (file)
@@ -51,6 +51,10 @@ struct zpci_fmb_fmt2 {
        u64 max_work_units;
 };
 
+struct zpci_fmb_fmt3 {
+       u64 tx_bytes;
+};
+
 struct zpci_fmb {
        u32 format      : 8;
        u32 fmt_ind     : 24;
@@ -66,6 +70,7 @@ struct zpci_fmb {
                struct zpci_fmb_fmt0 fmt0;
                struct zpci_fmb_fmt1 fmt1;
                struct zpci_fmb_fmt2 fmt2;
+               struct zpci_fmb_fmt3 fmt3;
        };
 } __packed __aligned(128);
 
index 5ab636089c6052c51cb5ac15046ee0975bcfd026..0e7cb0dc9c33b7f5a8187df912aefb8b15c100fc 100644 (file)
@@ -268,8 +268,10 @@ static inline int is_module_addr(void *addr)
 #define _REGION_ENTRY_BITS_LARGE 0xffffffff8000fe2fUL
 
 /* Bits in the segment table entry */
-#define _SEGMENT_ENTRY_BITS    0xfffffffffffffe33UL
-#define _SEGMENT_ENTRY_BITS_LARGE 0xfffffffffff0ff33UL
+#define _SEGMENT_ENTRY_BITS                    0xfffffffffffffe33UL
+#define _SEGMENT_ENTRY_BITS_LARGE              0xfffffffffff0ff33UL
+#define _SEGMENT_ENTRY_HARDWARE_BITS           0xfffffffffffffe30UL
+#define _SEGMENT_ENTRY_HARDWARE_BITS_LARGE     0xfffffffffff00730UL
 #define _SEGMENT_ENTRY_ORIGIN_LARGE ~0xfffffUL /* large page address       */
 #define _SEGMENT_ENTRY_ORIGIN  ~0x7ffUL/* page table origin                */
 #define _SEGMENT_ENTRY_PROTECT 0x200   /* segment protection bit           */
@@ -1101,7 +1103,8 @@ int ptep_shadow_pte(struct mm_struct *mm, unsigned long saddr,
                    pte_t *sptep, pte_t *tptep, pte_t pte);
 void ptep_unshadow_pte(struct mm_struct *mm, unsigned long saddr, pte_t *ptep);
 
-bool test_and_clear_guest_dirty(struct mm_struct *mm, unsigned long address);
+bool ptep_test_and_clear_uc(struct mm_struct *mm, unsigned long address,
+                           pte_t *ptep);
 int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
                          unsigned char key, bool nq);
 int cond_set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
@@ -1116,6 +1119,10 @@ int set_pgste_bits(struct mm_struct *mm, unsigned long addr,
 int get_pgste(struct mm_struct *mm, unsigned long hva, unsigned long *pgstep);
 int pgste_perform_essa(struct mm_struct *mm, unsigned long hva, int orc,
                        unsigned long *oldpte, unsigned long *oldpgste);
+void gmap_pmdp_csp(struct mm_struct *mm, unsigned long vmaddr);
+void gmap_pmdp_invalidate(struct mm_struct *mm, unsigned long vmaddr);
+void gmap_pmdp_idte_local(struct mm_struct *mm, unsigned long vmaddr);
+void gmap_pmdp_idte_global(struct mm_struct *mm, unsigned long vmaddr);
 
 /*
  * Certain architectures need to do special things when PTEs
index 6090670df51fcee21b2523fc388ba93ca5510cb0..e297bcfc476f65d0c41e591b9df9f43754b03aa7 100644 (file)
 
 int verify_sha256_digest(void);
 
-extern u64 kernel_entry;
-extern u64 kernel_type;
-
-extern u64 crash_start;
-extern u64 crash_size;
-
 #endif /* __ASSEMBLY__ */
 #endif /* _S390_PURGATORY_H_ */
index de11ecc99c7c46cf967a96c573f1a6c6d2cafc46..9c9970a5dfb10798ddd459dbcff7beae2d9ea42c 100644 (file)
@@ -262,7 +262,6 @@ struct qdio_outbuf_state {
        void *user;
 };
 
-#define QDIO_OUTBUF_STATE_FLAG_NONE    0x00
 #define QDIO_OUTBUF_STATE_FLAG_PENDING 0x01
 
 #define CHSC_AC1_INITIATE_INPUTQ       0x80
index 54f81f8ed6622de13648f2f680a867acee7d1230..724faede8ac52d565db7b4f4d5dd40391030e9d6 100644 (file)
@@ -4,6 +4,4 @@
 
 #include <asm-generic/sections.h>
 
-extern char _ehead[];
-
 #endif
index 9c30ebe046f3ec5787f33be17e83fdfa646bbe03..1d66016f417020ee99324db641fc813c56b0cfc5 100644 (file)
@@ -9,8 +9,10 @@
 #include <linux/const.h>
 #include <uapi/asm/setup.h>
 
-
+#define EP_OFFSET              0x10008
+#define EP_STRING              "S390EP"
 #define PARMAREA               0x10400
+#define PARMAREA_END           0x11000
 
 /*
  * Machine features detected in early.c
index dc329aa03f7600e7bb763da5f96a4e56fd7a7ea6..83a574e95b3a87554fe88b121152b5f523cb2b03 100644 (file)
@@ -23,29 +23,29 @@ struct chsc_async_header {
        __u32 key : 4;
        __u32 : 28;
        struct subchannel_id sid;
-} __attribute__ ((packed));
+};
 
 struct chsc_async_area {
        struct chsc_async_header header;
        __u8 data[CHSC_SIZE - sizeof(struct chsc_async_header)];
-} __attribute__ ((packed));
+};
 
 struct chsc_header {
        __u16 length;
        __u16 code;
-} __attribute__ ((packed));
+};
 
 struct chsc_sync_area {
        struct chsc_header header;
        __u8 data[CHSC_SIZE - sizeof(struct chsc_header)];
-} __attribute__ ((packed));
+};
 
 struct chsc_response_struct {
        __u16 length;
        __u16 code;
        __u32 parms;
        __u8 data[CHSC_SIZE - 2 * sizeof(__u16) - sizeof(__u32)];
-} __attribute__ ((packed));
+};
 
 struct chsc_chp_cd {
        struct chp_id chpid;
index 2fed39b26b42e598ac1bdcc8c8a11bcf0dd5279a..dbfd1730e631acfb71d8688ca4383ff98385a106 100644 (file)
@@ -9,38 +9,20 @@ ifdef CONFIG_FUNCTION_TRACER
 CFLAGS_REMOVE_ftrace.o         = $(CC_FLAGS_FTRACE)
 
 # Do not trace early setup code
-CFLAGS_REMOVE_als.o            = $(CC_FLAGS_FTRACE)
 CFLAGS_REMOVE_early.o          = $(CC_FLAGS_FTRACE)
 CFLAGS_REMOVE_early_nobss.o    = $(CC_FLAGS_FTRACE)
 
 endif
 
-GCOV_PROFILE_als.o             := n
 GCOV_PROFILE_early.o           := n
 GCOV_PROFILE_early_nobss.o     := n
 
-KCOV_INSTRUMENT_als.o          := n
 KCOV_INSTRUMENT_early.o                := n
 KCOV_INSTRUMENT_early_nobss.o  := n
 
-UBSAN_SANITIZE_als.o           := n
 UBSAN_SANITIZE_early.o         := n
 UBSAN_SANITIZE_early_nobss.o   := n
 
-#
-# Use -march=z900 for als.c to be able to print an error
-# message if the kernel is started on a machine which is too old
-#
-ifneq ($(CC_FLAGS_MARCH),-march=z900)
-CFLAGS_REMOVE_als.o    += $(CC_FLAGS_MARCH)
-CFLAGS_REMOVE_als.o    += $(CC_FLAGS_EXPOLINE)
-CFLAGS_als.o           += -march=z900
-AFLAGS_REMOVE_head.o   += $(CC_FLAGS_MARCH)
-AFLAGS_head.o          += -march=z900
-endif
-
-CFLAGS_als.o           += -D__NO_FORTIFY
-
 #
 # Passing null pointers is ok for smp code, since we access the lowcore here.
 #
@@ -61,13 +43,13 @@ CFLAGS_ptrace.o             += -DUTS_MACHINE='"$(UTS_MACHINE)"'
 
 obj-y  := traps.o time.o process.o base.o early.o setup.o idle.o vtime.o
 obj-y  += processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o
-obj-y  += debug.o irq.o ipl.o dis.o diag.o vdso.o als.o early_nobss.o
+obj-y  += debug.o irq.o ipl.o dis.o diag.o vdso.o early_nobss.o
 obj-y  += sysinfo.o jump_label.o lgr.o os_info.o machine_kexec.o pgm_check.o
 obj-y  += runtime_instr.o cache.o fpu.o dumpstack.o guarded_storage.o sthyi.o
 obj-y  += entry.o reipl.o relocate_kernel.o kdebugfs.o alternative.o
 obj-y  += nospec-branch.o
 
-extra-y                                += head.o head64.o vmlinux.lds
+extra-y                                += head64.o vmlinux.lds
 
 obj-$(CONFIG_SYSFS)            += nospec-sysfs.o
 CFLAGS_REMOVE_nospec-branch.o  += $(CC_FLAGS_EXPOLINE)
@@ -99,5 +81,5 @@ obj-$(CONFIG_TRACEPOINTS)     += trace.o
 obj-y                          += vdso64/
 obj-$(CONFIG_COMPAT)           += vdso32/
 
-chkbss := head.o head64.o als.o early_nobss.o
+chkbss := head64.o early_nobss.o
 include $(srctree)/arch/s390/scripts/Makefile.chkbss
index 607c5e9fba3ddcdfe762c347c8441c447fc529f5..2ce28bf0c5ec44939d815c5187153b0858475aa7 100644 (file)
@@ -183,3 +183,4 @@ COMPAT_SYSCALL_WRAP2(s390_guarded_storage, int, command, struct gs_cb *, gs_cb);
 COMPAT_SYSCALL_WRAP5(statx, int, dfd, const char __user *, path, unsigned, flags, unsigned, mask, struct statx __user *, buffer);
 COMPAT_SYSCALL_WRAP4(s390_sthyi, unsigned long, code, void __user *, info, u64 __user *, rc, unsigned long, flags);
 COMPAT_SYSCALL_WRAP5(kexec_file_load, int, kernel_fd, int, initrd_fd, unsigned long, cmdline_len, const char __user *, cmdline_ptr, unsigned long, flags)
+COMPAT_SYSCALL_WRAP4(rseq, struct rseq __user *, rseq, u32, rseq_len, int, flags, u32, sig)
index 9f5ea9d870690aea40742e4d5b3181bb637623cc..c3620bafc374d38d42a83d84e20fbb11eb6421a1 100644 (file)
@@ -306,6 +306,15 @@ static void *kzalloc_panic(int len)
        return rc;
 }
 
+static const char *nt_name(Elf64_Word type)
+{
+       const char *name = "LINUX";
+
+       if (type == NT_PRPSINFO || type == NT_PRSTATUS || type == NT_PRFPREG)
+               name = KEXEC_CORE_NOTE_NAME;
+       return name;
+}
+
 /*
  * Initialize ELF note
  */
@@ -332,11 +341,26 @@ static void *nt_init_name(void *buf, Elf64_Word type, void *desc, int d_len,
 
 static inline void *nt_init(void *buf, Elf64_Word type, void *desc, int d_len)
 {
-       const char *note_name = "LINUX";
+       return nt_init_name(buf, type, desc, d_len, nt_name(type));
+}
+
+/*
+ * Calculate the size of ELF note
+ */
+static size_t nt_size_name(int d_len, const char *name)
+{
+       size_t size;
 
-       if (type == NT_PRPSINFO || type == NT_PRSTATUS || type == NT_PRFPREG)
-               note_name = KEXEC_CORE_NOTE_NAME;
-       return nt_init_name(buf, type, desc, d_len, note_name);
+       size = sizeof(Elf64_Nhdr);
+       size += roundup(strlen(name) + 1, 4);
+       size += roundup(d_len, 4);
+
+       return size;
+}
+
+static inline size_t nt_size(Elf64_Word type, int d_len)
+{
+       return nt_size_name(d_len, nt_name(type));
 }
 
 /*
@@ -374,6 +398,29 @@ static void *fill_cpu_elf_notes(void *ptr, int cpu, struct save_area *sa)
        return ptr;
 }
 
+/*
+ * Calculate size of ELF notes per cpu
+ */
+static size_t get_cpu_elf_notes_size(void)
+{
+       struct save_area *sa = NULL;
+       size_t size;
+
+       size =  nt_size(NT_PRSTATUS, sizeof(struct elf_prstatus));
+       size +=  nt_size(NT_PRFPREG, sizeof(elf_fpregset_t));
+       size +=  nt_size(NT_S390_TIMER, sizeof(sa->timer));
+       size +=  nt_size(NT_S390_TODCMP, sizeof(sa->todcmp));
+       size +=  nt_size(NT_S390_TODPREG, sizeof(sa->todpreg));
+       size +=  nt_size(NT_S390_CTRS, sizeof(sa->ctrs));
+       size +=  nt_size(NT_S390_PREFIX, sizeof(sa->prefix));
+       if (MACHINE_HAS_VX) {
+               size += nt_size(NT_S390_VXRS_HIGH, sizeof(sa->vxrs_high));
+               size += nt_size(NT_S390_VXRS_LOW, sizeof(sa->vxrs_low));
+       }
+
+       return size;
+}
+
 /*
  * Initialize prpsinfo note (new kernel)
  */
@@ -429,6 +476,30 @@ static void *nt_vmcoreinfo(void *ptr)
        return nt_init_name(ptr, 0, vmcoreinfo, size, "VMCOREINFO");
 }
 
+static size_t nt_vmcoreinfo_size(void)
+{
+       const char *name = "VMCOREINFO";
+       char nt_name[11];
+       Elf64_Nhdr note;
+       void *addr;
+
+       if (copy_oldmem_kernel(&addr, &S390_lowcore.vmcore_info, sizeof(addr)))
+               return 0;
+
+       if (copy_oldmem_kernel(&note, addr, sizeof(note)))
+               return 0;
+
+       memset(nt_name, 0, sizeof(nt_name));
+       if (copy_oldmem_kernel(nt_name, addr + sizeof(note),
+                              sizeof(nt_name) - 1))
+               return 0;
+
+       if (strcmp(nt_name, name) != 0)
+               return 0;
+
+       return nt_size_name(note.n_descsz, name);
+}
+
 /*
  * Initialize final note (needed for /proc/vmcore code)
  */
@@ -539,6 +610,27 @@ static void *notes_init(Elf64_Phdr *phdr, void *ptr, u64 notes_offset)
        return ptr;
 }
 
+static size_t get_elfcorehdr_size(int mem_chunk_cnt)
+{
+       size_t size;
+
+       size = sizeof(Elf64_Ehdr);
+       /* PT_NOTES */
+       size += sizeof(Elf64_Phdr);
+       /* nt_prpsinfo */
+       size += nt_size(NT_PRPSINFO, sizeof(struct elf_prpsinfo));
+       /* regsets */
+       size += get_cpu_cnt() * get_cpu_elf_notes_size();
+       /* nt_vmcoreinfo */
+       size += nt_vmcoreinfo_size();
+       /* nt_final */
+       size += sizeof(Elf64_Nhdr);
+       /* PT_LOADS */
+       size += mem_chunk_cnt * sizeof(Elf64_Phdr);
+
+       return size;
+}
+
 /*
  * Create ELF core header (new kernel)
  */
@@ -566,8 +658,8 @@ int elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size)
 
        mem_chunk_cnt = get_mem_chunk_cnt();
 
-       alloc_size = 0x1000 + get_cpu_cnt() * 0x4a0 +
-               mem_chunk_cnt * sizeof(Elf64_Phdr);
+       alloc_size = get_elfcorehdr_size(mem_chunk_cnt);
+
        hdr = kzalloc_panic(alloc_size);
        /* Init elf header */
        ptr = ehdr_init(hdr, mem_chunk_cnt);
index 827699eb48fa98c7cbd869a62d1e8e303d293483..5b28b434f8a153d27ca8a7124d156ec05897e71e 100644 (file)
@@ -331,8 +331,20 @@ static void __init setup_boot_command_line(void)
        append_to_cmdline(append_ipl_scpdata);
 }
 
+static void __init check_image_bootable(void)
+{
+       if (!memcmp(EP_STRING, (void *)EP_OFFSET, strlen(EP_STRING)))
+               return;
+
+       sclp_early_printk("Linux kernel boot failure: An attempt to boot a vmlinux ELF image failed.\n");
+       sclp_early_printk("This image does not contain all parts necessary for starting up. Use\n");
+       sclp_early_printk("bzImage or arch/s390/boot/compressed/vmlinux instead.\n");
+       disabled_wait(0xbadb007);
+}
+
 void __init startup_init(void)
 {
+       check_image_bootable();
        time_early_init();
        init_kernel_storage_key();
        lockdep_off();
index f03402efab4b414eefdfd59135f4ee89dda68e8a..150130c897c39938d03d04e497100cca77d0a353 100644 (file)
@@ -357,6 +357,10 @@ ENTRY(system_call)
        stg     %r2,__PT_R2(%r11)               # store return value
 
 .Lsysc_return:
+#ifdef CONFIG_DEBUG_RSEQ
+       lgr     %r2,%r11
+       brasl   %r14,rseq_syscall
+#endif
        LOCKDEP_SYS_EXIT
 .Lsysc_tif:
        TSTMSK  __PT_FLAGS(%r11),_PIF_WORK
@@ -1265,7 +1269,7 @@ cleanup_critical:
        jl      0f
        clg     %r9,BASED(.Lcleanup_table+104)  # .Lload_fpu_regs_end
        jl      .Lcleanup_load_fpu_regs
-0:     BR_EX   %r14
+0:     BR_EX   %r14,%r11
 
        .align  8
 .Lcleanup_table:
@@ -1301,7 +1305,7 @@ cleanup_critical:
        ni      __SIE_PROG0C+3(%r9),0xfe        # no longer in SIE
        lctlg   %c1,%c1,__LC_USER_ASCE          # load primary asce
        larl    %r9,sie_exit                    # skip forward to sie_exit
-       BR_EX   %r14
+       BR_EX   %r14,%r11
 #endif
 
 .Lcleanup_system_call:
index 961abfac2c5fb0b39d6a11a21c1292c6138dbf6c..472fa2f1a4a593f9ac96dfc99089b5bcab732e51 100644 (file)
@@ -83,7 +83,6 @@ long sys_s390_sthyi(unsigned long function_code, void __user *buffer, u64 __user
 
 DECLARE_PER_CPU(u64, mt_cycles[8]);
 
-void verify_facilities(void);
 void gs_load_bc_cb(struct pt_regs *regs);
 void set_fs_fixup(void);
 
index 791cb9000e8658fdb6b17cce1550e88ddf1055cc..6d14ad42ba883b0e1c7a9a3065633aa2b2d40240 100644 (file)
@@ -48,11 +48,23 @@ ENTRY(startup_continue)
 # Early machine initialization and detection functions.
 #
        brasl   %r14,startup_init
-       lpswe   .Lentry-.LPG1(13)       # jump to _stext in primary-space,
-                                       # virtual and never return ...
+
+# check control registers
+       stctg   %c0,%c15,0(%r15)
+       oi      6(%r15),0x60            # enable sigp emergency & external call
+       oi      4(%r15),0x10            # switch on low address proctection
+       lctlg   %c0,%c15,0(%r15)
+
+       lam     0,15,.Laregs-.LPG1(%r13)        # load acrs needed by uaccess
+       brasl   %r14,start_kernel               # go to C code
+#
+# We returned from start_kernel ?!? PANIK
+#
+       basr    %r13,0
+       lpswe   .Ldw-.(%r13)            # load disabled wait psw
+
        .align  16
 .LPG1:
-.Lentry:.quad  0x0000000180000000,_stext
 .Lctl: .quad   0x04040000              # cr0: AFP registers & secondary space
        .quad   0                       # cr1: primary space segment table
        .quad   .Lduct                  # cr2: dispatchable unit control table
@@ -85,30 +97,5 @@ ENTRY(startup_continue)
        .endr
 .Llinkage_stack:
        .long   0,0,0x89000000,0,0,0,0x8a000000,0
-
-ENTRY(_ehead)
-
-       .org    0x100000 - 0x11000      # head.o ends at 0x11000
-#
-# startup-code, running in absolute addressing mode
-#
-ENTRY(_stext)
-       basr    %r13,0                  # get base
-.LPG3:
-# check control registers
-       stctg   %c0,%c15,0(%r15)
-       oi      6(%r15),0x60            # enable sigp emergency & external call
-       oi      4(%r15),0x10            # switch on low address proctection
-       lctlg   %c0,%c15,0(%r15)
-
-       lam     0,15,.Laregs-.LPG3(%r13)        # load acrs needed by uaccess
-       brasl   %r14,start_kernel       # go to C code
-#
-# We returned from start_kernel ?!? PANIK
-#
-       basr    %r13,0
-       lpswe   .Ldw-.(%r13)            # load disabled wait psw
-
-       .align  8
 .Ldw:  .quad   0x0002000180000000,0x0000000000000000
 .Laregs:.long  0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
index 60f60afa645c1eb873f4fa14600f8cf79ef82ae5..7c0a095e9c5f6f3d483ceff698f7d7ffd06e614b 100644 (file)
@@ -321,38 +321,20 @@ static int kprobe_handler(struct pt_regs *regs)
                         * If we have no pre-handler or it returned 0, we
                         * continue with single stepping. If we have a
                         * pre-handler and it returned non-zero, it prepped
-                        * for calling the break_handler below on re-entry
-                        * for jprobe processing, so get out doing nothing
-                        * more here.
+                        * for changing execution path, so get out doing
+                        * nothing more here.
                         */
                        push_kprobe(kcb, p);
                        kcb->kprobe_status = KPROBE_HIT_ACTIVE;
-                       if (p->pre_handler && p->pre_handler(p, regs))
+                       if (p->pre_handler && p->pre_handler(p, regs)) {
+                               pop_kprobe(kcb);
+                               preempt_enable_no_resched();
                                return 1;
+                       }
                        kcb->kprobe_status = KPROBE_HIT_SS;
                }
                enable_singlestep(kcb, regs, (unsigned long) p->ainsn.insn);
                return 1;
-       } else if (kprobe_running()) {
-               p = __this_cpu_read(current_kprobe);
-               if (p->break_handler && p->break_handler(p, regs)) {
-                       /*
-                        * Continuation after the jprobe completed and
-                        * caused the jprobe_return trap. The jprobe
-                        * break_handler "returns" to the original
-                        * function that still has the kprobe breakpoint
-                        * installed. We continue with single stepping.
-                        */
-                       kcb->kprobe_status = KPROBE_HIT_SS;
-                       enable_singlestep(kcb, regs,
-                                         (unsigned long) p->ainsn.insn);
-                       return 1;
-               } /* else:
-                  * No kprobe at this address and the current kprobe
-                  * has no break handler (no jprobe!). The kernel just
-                  * exploded, let the standard trap handler pick up the
-                  * pieces.
-                  */
        } /* else:
           * No kprobe at this address and no active kprobe. The trap has
           * not been caused by a kprobe breakpoint. The race of breakpoint
@@ -452,9 +434,7 @@ static int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
 
        regs->psw.addr = orig_ret_address;
 
-       pop_kprobe(get_kprobe_ctlblk());
        kretprobe_hash_unlock(current, &flags);
-       preempt_enable_no_resched();
 
        hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
                hlist_del(&ri->hlist);
@@ -661,60 +641,6 @@ int kprobe_exceptions_notify(struct notifier_block *self,
 }
 NOKPROBE_SYMBOL(kprobe_exceptions_notify);
 
-int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
-{
-       struct jprobe *jp = container_of(p, struct jprobe, kp);
-       struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
-       unsigned long stack;
-
-       memcpy(&kcb->jprobe_saved_regs, regs, sizeof(struct pt_regs));
-
-       /* setup return addr to the jprobe handler routine */
-       regs->psw.addr = (unsigned long) jp->entry;
-       regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT);
-
-       /* r15 is the stack pointer */
-       stack = (unsigned long) regs->gprs[15];
-
-       memcpy(kcb->jprobes_stack, (void *) stack, MIN_STACK_SIZE(stack));
-
-       /*
-        * jprobes use jprobe_return() which skips the normal return
-        * path of the function, and this messes up the accounting of the
-        * function graph tracer to get messed up.
-        *
-        * Pause function graph tracing while performing the jprobe function.
-        */
-       pause_graph_tracing();
-       return 1;
-}
-NOKPROBE_SYMBOL(setjmp_pre_handler);
-
-void jprobe_return(void)
-{
-       asm volatile(".word 0x0002");
-}
-NOKPROBE_SYMBOL(jprobe_return);
-
-int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
-{
-       struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
-       unsigned long stack;
-
-       /* It's OK to start function graph tracing again */
-       unpause_graph_tracing();
-
-       stack = (unsigned long) kcb->jprobe_saved_regs.gprs[15];
-
-       /* Put the regs back */
-       memcpy(regs, &kcb->jprobe_saved_regs, sizeof(struct pt_regs));
-       /* put the stack back */
-       memcpy((void *) stack, kcb->jprobes_stack, MIN_STACK_SIZE(stack));
-       preempt_enable_no_resched();
-       return 1;
-}
-NOKPROBE_SYMBOL(longjmp_break_handler);
-
 static struct kprobe trampoline = {
        .addr = (kprobe_opcode_t *) &kretprobe_trampoline,
        .pre_handler = trampoline_probe_handler
index 18ae7b9c71d6e3ed6c122d8b6d1bc107dfc4ff2f..bdddaae9655984dfbf59ccee386c7bb3608cb183 100644 (file)
@@ -35,6 +35,8 @@ early_param("nospec", nospec_setup_early);
 
 static int __init nospec_report(void)
 {
+       if (test_facility(156))
+               pr_info("Spectre V2 mitigation: etokens\n");
        if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable)
                pr_info("Spectre V2 mitigation: execute trampolines\n");
        if (__test_facility(82, S390_lowcore.alt_stfle_fac_list))
@@ -56,7 +58,15 @@ early_param("nospectre_v2", nospectre_v2_setup_early);
 
 void __init nospec_auto_detect(void)
 {
-       if (IS_ENABLED(CC_USING_EXPOLINE)) {
+       if (test_facility(156)) {
+               /*
+                * The machine supports etokens.
+                * Disable expolines and disable nobp.
+                */
+               if (IS_ENABLED(CC_USING_EXPOLINE))
+                       nospec_disable = 1;
+               __clear_facility(82, S390_lowcore.alt_stfle_fac_list);
+       } else if (IS_ENABLED(CC_USING_EXPOLINE)) {
                /*
                 * The kernel has been compiled with expolines.
                 * Keep expolines enabled and disable nobp.
index 8affad5f18cb5df637754f192ae5d8bce9387eba..e30e580ae36209d3395c2cf25a412ba5b9385480 100644 (file)
@@ -13,6 +13,8 @@ ssize_t cpu_show_spectre_v1(struct device *dev,
 ssize_t cpu_show_spectre_v2(struct device *dev,
                            struct device_attribute *attr, char *buf)
 {
+       if (test_facility(156))
+               return sprintf(buf, "Mitigation: etokens\n");
        if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable)
                return sprintf(buf, "Mitigation: execute trampolines\n");
        if (__test_facility(82, S390_lowcore.alt_stfle_fac_list))
index 0292d68e7dded707496b7090c9b2d7377aab8b2b..cb198d4a6dca7566b0cfba4f34854f9b6074f972 100644 (file)
@@ -2,7 +2,7 @@
 /*
  * Performance event support for the System z CPU-measurement Sampling Facility
  *
- * Copyright IBM Corp. 2013
+ * Copyright IBM Corp. 2013, 2018
  * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
  */
 #define KMSG_COMPONENT "cpum_sf"
@@ -1587,6 +1587,17 @@ static void aux_buffer_free(void *data)
                            "%lu SDBTs\n", num_sdbt);
 }
 
+static void aux_sdb_init(unsigned long sdb)
+{
+       struct hws_trailer_entry *te;
+
+       te = (struct hws_trailer_entry *)trailer_entry_ptr(sdb);
+
+       /* Save clock base */
+       te->clock_base = 1;
+       memcpy(&te->progusage2, &tod_clock_base[1], 8);
+}
+
 /*
  * aux_buffer_setup() - Setup AUX buffer for diagnostic mode sampling
  * @cpu:       On which to allocate, -1 means current
@@ -1666,6 +1677,7 @@ static void *aux_buffer_setup(int cpu, void **pages, int nr_pages,
                /* Tail is the entry in a SDBT */
                *tail = (unsigned long)pages[i];
                aux->sdb_index[i] = (unsigned long)pages[i];
+               aux_sdb_init((unsigned long)pages[i]);
        }
        sfb->num_sdb = nr_pages;
 
index 54e2d634b849e128c1ec1d1ccc15c600978ed50b..4352a504f2354fe04a5730736958f24c709c7428 100644 (file)
@@ -12,9 +12,6 @@ u64 perf_reg_value(struct pt_regs *regs, int idx)
 {
        freg_t fp;
 
-       if (WARN_ON_ONCE((u32)idx >= PERF_REG_S390_MAX))
-               return 0;
-
        if (idx >= PERF_REG_S390_R0 && idx <= PERF_REG_S390_R15)
                return regs->gprs[idx];
 
@@ -33,7 +30,8 @@ u64 perf_reg_value(struct pt_regs *regs, int idx)
        if (idx == PERF_REG_S390_PC)
                return regs->psw.addr;
 
-       return regs->gprs[idx];
+       WARN_ON_ONCE((u32)idx >= PERF_REG_S390_MAX);
+       return 0;
 }
 
 #define REG_RESERVED (~((1UL << PERF_REG_S390_MAX) - 1))
index d82a9ec64ea9712b2f33572b7baffcbe679ff6bd..c637c12f9e37ccef3c0ab9a35bbe312259f75414 100644 (file)
@@ -674,12 +674,12 @@ static void __init reserve_kernel(void)
 #ifdef CONFIG_DMA_API_DEBUG
        /*
         * DMA_API_DEBUG code stumbles over addresses from the
-        * range [_ehead, _stext]. Mark the memory as reserved
+        * range [PARMAREA_END, _stext]. Mark the memory as reserved
         * so it is not used for CONFIG_DMA_API_DEBUG=y.
         */
        memblock_reserve(0, PFN_PHYS(start_pfn));
 #else
-       memblock_reserve(0, (unsigned long)_ehead);
+       memblock_reserve(0, PARMAREA_END);
        memblock_reserve((unsigned long)_stext, PFN_PHYS(start_pfn)
                         - (unsigned long)_stext);
 #endif
index 2d2960ab3e108ca5b0d6ef06476987ed8d5f4839..22f08245aa5d46ef5f80398ebcc4e064099c91a6 100644 (file)
@@ -498,7 +498,7 @@ void do_signal(struct pt_regs *regs)
                }
                /* No longer in a system call */
                clear_pt_regs_flag(regs, PIF_SYSCALL);
-
+               rseq_signal_deliver(&ksig, regs);
                if (is_compat_task())
                        handle_signal32(&ksig, oldset, regs);
                else
@@ -537,4 +537,5 @@ void do_notify_resume(struct pt_regs *regs)
 {
        clear_thread_flag(TIF_NOTIFY_RESUME);
        tracehook_notify_resume(regs);
+       rseq_handle_notify_resume(NULL, regs);
 }
index 8b210ead79569413ab74e3a1c03b506f48a1622f..022fc099b628292e3c9daeecb2eb18ac54816935 100644 (file)
 379  common    statx                   sys_statx                       compat_sys_statx
 380  common    s390_sthyi              sys_s390_sthyi                  compat_sys_s390_sthyi
 381  common    kexec_file_load         sys_kexec_file_load             compat_sys_kexec_file_load
+382  common    io_pgetevents           sys_io_pgetevents               compat_sys_io_pgetevents
+383  common    rseq                    sys_rseq                        compat_sys_rseq
index 54f5496913fa77d8f12b48b1afcb2f9a1784927b..12f80d1f041513d650991690ca99004ee62f9d9a 100644 (file)
@@ -59,6 +59,8 @@ int stsi(void *sysinfo, int fc, int sel1, int sel2)
 }
 EXPORT_SYMBOL(stsi);
 
+#ifdef CONFIG_PROC_FS
+
 static bool convert_ext_name(unsigned char encoding, char *name, size_t len)
 {
        switch (encoding) {
@@ -301,6 +303,8 @@ static int __init sysinfo_create_proc(void)
 }
 device_initcall(sysinfo_create_proc);
 
+#endif /* CONFIG_PROC_FS */
+
 /*
  * Service levels interface.
  */
index cf561160ea887f9b6395e1d5ec30f9ee02aba77d..e8766beee5ad86c3a6ed212ef9d1b91e6e4578f4 100644 (file)
@@ -221,17 +221,22 @@ void read_persistent_clock64(struct timespec64 *ts)
        ext_to_timespec64(clk, ts);
 }
 
-void read_boot_clock64(struct timespec64 *ts)
+void __init read_persistent_wall_and_boot_offset(struct timespec64 *wall_time,
+                                                struct timespec64 *boot_offset)
 {
        unsigned char clk[STORE_CLOCK_EXT_SIZE];
+       struct timespec64 boot_time;
        __u64 delta;
 
        delta = initial_leap_seconds + TOD_UNIX_EPOCH;
-       memcpy(clk, tod_clock_base, 16);
-       *(__u64 *) &clk[1] -= delta;
-       if (*(__u64 *) &clk[1] > delta)
+       memcpy(clk, tod_clock_base, STORE_CLOCK_EXT_SIZE);
+       *(__u64 *)&clk[1] -= delta;
+       if (*(__u64 *)&clk[1] > delta)
                clk[0]--;
-       ext_to_timespec64(clk, ts);
+       ext_to_timespec64(clk, &boot_time);
+
+       read_persistent_clock64(wall_time);
+       *boot_offset = timespec64_sub(*wall_time, boot_time);
 }
 
 static u64 read_tod_clock(struct clocksource *cs)
index 4b6e0397f66d634738af71bc748e0b4a30177f7b..e8184a15578a332eae08bcb60e21fbb3ec0dd780 100644 (file)
@@ -579,41 +579,33 @@ early_param("topology", topology_setup);
 static int topology_ctl_handler(struct ctl_table *ctl, int write,
                                void __user *buffer, size_t *lenp, loff_t *ppos)
 {
-       unsigned int len;
+       int enabled = topology_is_enabled();
        int new_mode;
-       char buf[2];
+       int zero = 0;
+       int one = 1;
+       int rc;
+       struct ctl_table ctl_entry = {
+               .procname       = ctl->procname,
+               .data           = &enabled,
+               .maxlen         = sizeof(int),
+               .extra1         = &zero,
+               .extra2         = &one,
+       };
+
+       rc = proc_douintvec_minmax(&ctl_entry, write, buffer, lenp, ppos);
+       if (rc < 0 || !write)
+               return rc;
 
-       if (!*lenp || *ppos) {
-               *lenp = 0;
-               return 0;
-       }
-       if (!write) {
-               strncpy(buf, topology_is_enabled() ? "1\n" : "0\n",
-                       ARRAY_SIZE(buf));
-               len = strnlen(buf, ARRAY_SIZE(buf));
-               if (len > *lenp)
-                       len = *lenp;
-               if (copy_to_user(buffer, buf, len))
-                       return -EFAULT;
-               goto out;
-       }
-       len = *lenp;
-       if (copy_from_user(buf, buffer, len > sizeof(buf) ? sizeof(buf) : len))
-               return -EFAULT;
-       if (buf[0] != '0' && buf[0] != '1')
-               return -EINVAL;
        mutex_lock(&smp_cpu_state_mutex);
-       new_mode = topology_get_mode(buf[0] == '1');
+       new_mode = topology_get_mode(enabled);
        if (topology_mode != new_mode) {
                topology_mode = new_mode;
                topology_schedule_update();
        }
        mutex_unlock(&smp_cpu_state_mutex);
        topology_flush_work();
-out:
-       *lenp = len;
-       *ppos += len;
-       return 0;
+
+       return rc;
 }
 
 static struct ctl_table topology_ctl_table[] = {
index 09abae40f9178a9d8789d9ef647fc85b74fcc82b..3031cc6dd0ab48de8ebf3797a2bc748995d67c49 100644 (file)
@@ -47,7 +47,7 @@ static struct page **vdso64_pagelist;
  */
 unsigned int __read_mostly vdso_enabled = 1;
 
-static int vdso_fault(const struct vm_special_mapping *sm,
+static vm_fault_t vdso_fault(const struct vm_special_mapping *sm,
                      struct vm_area_struct *vma, struct vm_fault *vmf)
 {
        struct page **vdso_pagelist;
index f0414f52817b1491ff1437b9ed9a23f798903195..b43f8d33a3697de32e7c9f7dae4cbf2e7cf3bc46 100644 (file)
@@ -19,7 +19,7 @@
 
 OUTPUT_FORMAT("elf64-s390", "elf64-s390", "elf64-s390")
 OUTPUT_ARCH(s390:64-bit)
-ENTRY(startup)
+ENTRY(startup_continue)
 jiffies = jiffies_64;
 
 PHDRS {
@@ -30,16 +30,12 @@ PHDRS {
 
 SECTIONS
 {
-       . = 0x00000000;
+       . = 0x100000;
+       _stext = .;             /* Start of text section */
        .text : {
                /* Text and read-only data */
+               _text = .;
                HEAD_TEXT
-               /*
-                * E.g. perf doesn't like symbols starting at address zero,
-                * therefore skip the initial PSW and channel program located
-                * at address zero and let _text start at 0x200.
-                */
-       _text = 0x200;
                TEXT_TEXT
                SCHED_TEXT
                CPUIDLE_TEXT
@@ -47,6 +43,7 @@ SECTIONS
                KPROBES_TEXT
                IRQENTRY_TEXT
                SOFTIRQENTRY_TEXT
+               *(.text.*_indirect_*)
                *(.fixup)
                *(.gnu.warning)
        } :text = 0x0700
index daa09f89ca2de66af6f1b0fc79da3a71cce4dc27..fcb55b02990ef96e20148472828de2e324c6a56f 100644 (file)
@@ -1145,7 +1145,7 @@ void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu)
                 * yield-candidate.
                 */
                vcpu->preempted = true;
-               swake_up(&vcpu->wq);
+               swake_up_one(&vcpu->wq);
                vcpu->stat.halt_wakeup++;
        }
        /*
index 3b7a5151b6a5effa6dabc5aa95ac027a4af53198..f9d90337e64a6c0d26a102fea2329e74ff917bc6 100644 (file)
@@ -172,6 +172,10 @@ static int nested;
 module_param(nested, int, S_IRUGO);
 MODULE_PARM_DESC(nested, "Nested virtualization support");
 
+/* allow 1m huge page guest backing, if !nested */
+static int hpage;
+module_param(hpage, int, 0444);
+MODULE_PARM_DESC(hpage, "1m huge page backing support");
 
 /*
  * For now we handle at most 16 double words as this is what the s390 base
@@ -475,6 +479,11 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
        case KVM_CAP_S390_AIS_MIGRATION:
                r = 1;
                break;
+       case KVM_CAP_S390_HPAGE_1M:
+               r = 0;
+               if (hpage)
+                       r = 1;
+               break;
        case KVM_CAP_S390_MEM_OP:
                r = MEM_OP_MAX_SIZE;
                break;
@@ -511,19 +520,30 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
 }
 
 static void kvm_s390_sync_dirty_log(struct kvm *kvm,
-                                       struct kvm_memory_slot *memslot)
+                                   struct kvm_memory_slot *memslot)
 {
+       int i;
        gfn_t cur_gfn, last_gfn;
-       unsigned long address;
+       unsigned long gaddr, vmaddr;
        struct gmap *gmap = kvm->arch.gmap;
+       DECLARE_BITMAP(bitmap, _PAGE_ENTRIES);
 
-       /* Loop over all guest pages */
+       /* Loop over all guest segments */
+       cur_gfn = memslot->base_gfn;
        last_gfn = memslot->base_gfn + memslot->npages;
-       for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) {
-               address = gfn_to_hva_memslot(memslot, cur_gfn);
+       for (; cur_gfn <= last_gfn; cur_gfn += _PAGE_ENTRIES) {
+               gaddr = gfn_to_gpa(cur_gfn);
+               vmaddr = gfn_to_hva_memslot(memslot, cur_gfn);
+               if (kvm_is_error_hva(vmaddr))
+                       continue;
+
+               bitmap_zero(bitmap, _PAGE_ENTRIES);
+               gmap_sync_dirty_log_pmd(gmap, bitmap, gaddr, vmaddr);
+               for (i = 0; i < _PAGE_ENTRIES; i++) {
+                       if (test_bit(i, bitmap))
+                               mark_page_dirty(kvm, cur_gfn + i);
+               }
 
-               if (test_and_clear_guest_dirty(gmap->mm, address))
-                       mark_page_dirty(kvm, cur_gfn);
                if (fatal_signal_pending(current))
                        return;
                cond_resched();
@@ -667,6 +687,27 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
                VM_EVENT(kvm, 3, "ENABLE: CAP_S390_GS %s",
                         r ? "(not available)" : "(success)");
                break;
+       case KVM_CAP_S390_HPAGE_1M:
+               mutex_lock(&kvm->lock);
+               if (kvm->created_vcpus)
+                       r = -EBUSY;
+               else if (!hpage || kvm->arch.use_cmma)
+                       r = -EINVAL;
+               else {
+                       r = 0;
+                       kvm->mm->context.allow_gmap_hpage_1m = 1;
+                       /*
+                        * We might have to create fake 4k page
+                        * tables. To avoid that the hardware works on
+                        * stale PGSTEs, we emulate these instructions.
+                        */
+                       kvm->arch.use_skf = 0;
+                       kvm->arch.use_pfmfi = 0;
+               }
+               mutex_unlock(&kvm->lock);
+               VM_EVENT(kvm, 3, "ENABLE: CAP_S390_HPAGE %s",
+                        r ? "(not available)" : "(success)");
+               break;
        case KVM_CAP_S390_USER_STSI:
                VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_STSI");
                kvm->arch.user_stsi = 1;
@@ -714,10 +755,13 @@ static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *att
                if (!sclp.has_cmma)
                        break;
 
-               ret = -EBUSY;
                VM_EVENT(kvm, 3, "%s", "ENABLE: CMMA support");
                mutex_lock(&kvm->lock);
-               if (!kvm->created_vcpus) {
+               if (kvm->created_vcpus)
+                       ret = -EBUSY;
+               else if (kvm->mm->context.allow_gmap_hpage_1m)
+                       ret = -EINVAL;
+               else {
                        kvm->arch.use_cmma = 1;
                        /* Not compatible with cmma. */
                        kvm->arch.use_pfmfi = 0;
@@ -1540,6 +1584,7 @@ static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
        uint8_t *keys;
        uint64_t hva;
        int srcu_idx, i, r = 0;
+       bool unlocked;
 
        if (args->flags != 0)
                return -EINVAL;
@@ -1564,9 +1609,11 @@ static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
        if (r)
                goto out;
 
+       i = 0;
        down_read(&current->mm->mmap_sem);
        srcu_idx = srcu_read_lock(&kvm->srcu);
-       for (i = 0; i < args->count; i++) {
+        while (i < args->count) {
+               unlocked = false;
                hva = gfn_to_hva(kvm, args->start_gfn + i);
                if (kvm_is_error_hva(hva)) {
                        r = -EFAULT;
@@ -1580,8 +1627,14 @@ static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
                }
 
                r = set_guest_storage_key(current->mm, hva, keys[i], 0);
-               if (r)
-                       break;
+               if (r) {
+                       r = fixup_user_fault(current, current->mm, hva,
+                                            FAULT_FLAG_WRITE, &unlocked);
+                       if (r)
+                               break;
+               }
+               if (!r)
+                       i++;
        }
        srcu_read_unlock(&kvm->srcu, srcu_idx);
        up_read(&current->mm->mmap_sem);
@@ -4082,6 +4135,11 @@ static int __init kvm_s390_init(void)
                return -ENODEV;
        }
 
+       if (nested && hpage) {
+               pr_info("nested (vSIE) and hpage (huge page backing) can currently not be activated concurrently");
+               return -EINVAL;
+       }
+
        for (i = 0; i < 16; i++)
                kvm_s390_fac_base[i] |=
                        S390_lowcore.stfle_fac_list[i] & nonhyp_mask(i);
index eb0eb60c7be6a26677f8ed20509aba88df6da337..cfc5a62329f607d6971bc1a18b23132fddbc6ca0 100644 (file)
@@ -246,9 +246,10 @@ static int try_handle_skey(struct kvm_vcpu *vcpu)
 
 static int handle_iske(struct kvm_vcpu *vcpu)
 {
-       unsigned long addr;
+       unsigned long gaddr, vmaddr;
        unsigned char key;
        int reg1, reg2;
+       bool unlocked;
        int rc;
 
        vcpu->stat.instruction_iske++;
@@ -262,18 +263,28 @@ static int handle_iske(struct kvm_vcpu *vcpu)
 
        kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
 
-       addr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
-       addr = kvm_s390_logical_to_effective(vcpu, addr);
-       addr = kvm_s390_real_to_abs(vcpu, addr);
-       addr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(addr));
-       if (kvm_is_error_hva(addr))
+       gaddr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
+       gaddr = kvm_s390_logical_to_effective(vcpu, gaddr);
+       gaddr = kvm_s390_real_to_abs(vcpu, gaddr);
+       vmaddr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(gaddr));
+       if (kvm_is_error_hva(vmaddr))
                return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
-
+retry:
+       unlocked = false;
        down_read(&current->mm->mmap_sem);
-       rc = get_guest_storage_key(current->mm, addr, &key);
-       up_read(&current->mm->mmap_sem);
+       rc = get_guest_storage_key(current->mm, vmaddr, &key);
+
+       if (rc) {
+               rc = fixup_user_fault(current, current->mm, vmaddr,
+                                     FAULT_FLAG_WRITE, &unlocked);
+               if (!rc) {
+                       up_read(&current->mm->mmap_sem);
+                       goto retry;
+               }
+       }
        if (rc)
                return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
+       up_read(&current->mm->mmap_sem);
        vcpu->run->s.regs.gprs[reg1] &= ~0xff;
        vcpu->run->s.regs.gprs[reg1] |= key;
        return 0;
@@ -281,8 +292,9 @@ static int handle_iske(struct kvm_vcpu *vcpu)
 
 static int handle_rrbe(struct kvm_vcpu *vcpu)
 {
-       unsigned long addr;
+       unsigned long vmaddr, gaddr;
        int reg1, reg2;
+       bool unlocked;
        int rc;
 
        vcpu->stat.instruction_rrbe++;
@@ -296,19 +308,27 @@ static int handle_rrbe(struct kvm_vcpu *vcpu)
 
        kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
 
-       addr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
-       addr = kvm_s390_logical_to_effective(vcpu, addr);
-       addr = kvm_s390_real_to_abs(vcpu, addr);
-       addr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(addr));
-       if (kvm_is_error_hva(addr))
+       gaddr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
+       gaddr = kvm_s390_logical_to_effective(vcpu, gaddr);
+       gaddr = kvm_s390_real_to_abs(vcpu, gaddr);
+       vmaddr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(gaddr));
+       if (kvm_is_error_hva(vmaddr))
                return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
-
+retry:
+       unlocked = false;
        down_read(&current->mm->mmap_sem);
-       rc = reset_guest_reference_bit(current->mm, addr);
-       up_read(&current->mm->mmap_sem);
+       rc = reset_guest_reference_bit(current->mm, vmaddr);
+       if (rc < 0) {
+               rc = fixup_user_fault(current, current->mm, vmaddr,
+                                     FAULT_FLAG_WRITE, &unlocked);
+               if (!rc) {
+                       up_read(&current->mm->mmap_sem);
+                       goto retry;
+               }
+       }
        if (rc < 0)
                return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
-
+       up_read(&current->mm->mmap_sem);
        kvm_s390_set_psw_cc(vcpu, rc);
        return 0;
 }
@@ -323,6 +343,7 @@ static int handle_sske(struct kvm_vcpu *vcpu)
        unsigned long start, end;
        unsigned char key, oldkey;
        int reg1, reg2;
+       bool unlocked;
        int rc;
 
        vcpu->stat.instruction_sske++;
@@ -355,19 +376,28 @@ static int handle_sske(struct kvm_vcpu *vcpu)
        }
 
        while (start != end) {
-               unsigned long addr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(start));
+               unsigned long vmaddr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(start));
+               unlocked = false;
 
-               if (kvm_is_error_hva(addr))
+               if (kvm_is_error_hva(vmaddr))
                        return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
 
                down_read(&current->mm->mmap_sem);
-               rc = cond_set_guest_storage_key(current->mm, addr, key, &oldkey,
+               rc = cond_set_guest_storage_key(current->mm, vmaddr, key, &oldkey,
                                                m3 & SSKE_NQ, m3 & SSKE_MR,
                                                m3 & SSKE_MC);
-               up_read(&current->mm->mmap_sem);
-               if (rc < 0)
+
+               if (rc < 0) {
+                       rc = fixup_user_fault(current, current->mm, vmaddr,
+                                             FAULT_FLAG_WRITE, &unlocked);
+                       rc = !rc ? -EAGAIN : rc;
+               }
+               if (rc == -EFAULT)
                        return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
-               start += PAGE_SIZE;
+
+               up_read(&current->mm->mmap_sem);
+               if (rc >= 0)
+                       start += PAGE_SIZE;
        }
 
        if (m3 & (SSKE_MC | SSKE_MR)) {
@@ -948,15 +978,16 @@ static int handle_pfmf(struct kvm_vcpu *vcpu)
        }
 
        while (start != end) {
-               unsigned long useraddr;
+               unsigned long vmaddr;
+               bool unlocked = false;
 
                /* Translate guest address to host address */
-               useraddr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(start));
-               if (kvm_is_error_hva(useraddr))
+               vmaddr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(start));
+               if (kvm_is_error_hva(vmaddr))
                        return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
 
                if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) {
-                       if (clear_user((void __user *)useraddr, PAGE_SIZE))
+                       if (clear_user((void __user *)vmaddr, PAGE_SIZE))
                                return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
                }
 
@@ -966,14 +997,20 @@ static int handle_pfmf(struct kvm_vcpu *vcpu)
                        if (rc)
                                return rc;
                        down_read(&current->mm->mmap_sem);
-                       rc = cond_set_guest_storage_key(current->mm, useraddr,
+                       rc = cond_set_guest_storage_key(current->mm, vmaddr,
                                                        key, NULL, nq, mr, mc);
-                       up_read(&current->mm->mmap_sem);
-                       if (rc < 0)
+                       if (rc < 0) {
+                               rc = fixup_user_fault(current, current->mm, vmaddr,
+                                                     FAULT_FLAG_WRITE, &unlocked);
+                               rc = !rc ? -EAGAIN : rc;
+                       }
+                       if (rc == -EFAULT)
                                return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
-               }
 
-               start += PAGE_SIZE;
+                       up_read(&current->mm->mmap_sem);
+                       if (rc >= 0)
+                               start += PAGE_SIZE;
+               }
        }
        if (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) {
                if (psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_BITS_AMODE_64BIT) {
index 2311f15be9cf04b3bad1f766453e6ff970d53a83..40c4d59c926e52d8a7f3e7c3870dbb69e8091c8b 100644 (file)
@@ -17,7 +17,7 @@
 ENTRY(memmove)
        ltgr    %r4,%r4
        lgr     %r1,%r2
-       bzr     %r14
+       jz      .Lmemmove_exit
        aghi    %r4,-1
        clgr    %r2,%r3
        jnh     .Lmemmove_forward
@@ -36,6 +36,7 @@ ENTRY(memmove)
 .Lmemmove_forward_remainder:
        larl    %r5,.Lmemmove_mvc
        ex      %r4,0(%r5)
+.Lmemmove_exit:
        BR_EX   %r14
 .Lmemmove_reverse:
        ic      %r0,0(%r4,%r3)
@@ -65,7 +66,7 @@ EXPORT_SYMBOL(memmove)
  */
 ENTRY(memset)
        ltgr    %r4,%r4
-       bzr     %r14
+       jz      .Lmemset_exit
        ltgr    %r3,%r3
        jnz     .Lmemset_fill
        aghi    %r4,-1
@@ -80,6 +81,7 @@ ENTRY(memset)
 .Lmemset_clear_remainder:
        larl    %r3,.Lmemset_xc
        ex      %r4,0(%r3)
+.Lmemset_exit:
        BR_EX   %r14
 .Lmemset_fill:
        cghi    %r4,1
@@ -115,7 +117,7 @@ EXPORT_SYMBOL(memset)
  */
 ENTRY(memcpy)
        ltgr    %r4,%r4
-       bzr     %r14
+       jz      .Lmemcpy_exit
        aghi    %r4,-1
        srlg    %r5,%r4,8
        ltgr    %r5,%r5
@@ -124,6 +126,7 @@ ENTRY(memcpy)
 .Lmemcpy_remainder:
        larl    %r5,.Lmemcpy_mvc
        ex      %r4,0(%r5)
+.Lmemcpy_exit:
        BR_EX   %r14
 .Lmemcpy_loop:
        mvc     0(256,%r1),0(%r3)
@@ -145,9 +148,9 @@ EXPORT_SYMBOL(memcpy)
 .macro __MEMSET bits,bytes,insn
 ENTRY(__memset\bits)
        ltgr    %r4,%r4
-       bzr     %r14
+       jz      .L__memset_exit\bits
        cghi    %r4,\bytes
-       je      .L__memset_exit\bits
+       je      .L__memset_store\bits
        aghi    %r4,-(\bytes+1)
        srlg    %r5,%r4,8
        ltgr    %r5,%r5
@@ -163,8 +166,9 @@ ENTRY(__memset\bits)
        larl    %r5,.L__memset_mvc\bits
        ex      %r4,0(%r5)
        BR_EX   %r14
-.L__memset_exit\bits:
+.L__memset_store\bits:
        \insn   %r3,0(%r2)
+.L__memset_exit\bits:
        BR_EX   %r14
 .L__memset_mvc\bits:
        mvc     \bytes(1,%r1),0(%r1)
index 6cf024eb2085d86e6a729e3cfa94e80a50f02a38..510a18299196f3b797be51fd22b2bbfbe5cd003f 100644 (file)
@@ -191,12 +191,7 @@ static void cmm_set_timer(void)
                        del_timer(&cmm_timer);
                return;
        }
-       if (timer_pending(&cmm_timer)) {
-               if (mod_timer(&cmm_timer, jiffies + cmm_timeout_seconds*HZ))
-                       return;
-       }
-       cmm_timer.expires = jiffies + cmm_timeout_seconds*HZ;
-       add_timer(&cmm_timer);
+       mod_timer(&cmm_timer, jiffies + cmm_timeout_seconds * HZ);
 }
 
 static void cmm_timer_fn(struct timer_list *unused)
@@ -251,45 +246,42 @@ static int cmm_skip_blanks(char *cp, char **endp)
        return str != cp;
 }
 
-static struct ctl_table cmm_table[];
-
 static int cmm_pages_handler(struct ctl_table *ctl, int write,
                             void __user *buffer, size_t *lenp, loff_t *ppos)
 {
-       char buf[16], *p;
-       unsigned int len;
-       long nr;
+       long nr = cmm_get_pages();
+       struct ctl_table ctl_entry = {
+               .procname       = ctl->procname,
+               .data           = &nr,
+               .maxlen         = sizeof(long),
+       };
+       int rc;
 
-       if (!*lenp || (*ppos && !write)) {
-               *lenp = 0;
-               return 0;
-       }
+       rc = proc_doulongvec_minmax(&ctl_entry, write, buffer, lenp, ppos);
+       if (rc < 0 || !write)
+               return rc;
 
-       if (write) {
-               len = *lenp;
-               if (copy_from_user(buf, buffer,
-                                  len > sizeof(buf) ? sizeof(buf) : len))
-                       return -EFAULT;
-               buf[sizeof(buf) - 1] = '\0';
-               cmm_skip_blanks(buf, &p);
-               nr = simple_strtoul(p, &p, 0);
-               if (ctl == &cmm_table[0])
-                       cmm_set_pages(nr);
-               else
-                       cmm_add_timed_pages(nr);
-       } else {
-               if (ctl == &cmm_table[0])
-                       nr = cmm_get_pages();
-               else
-                       nr = cmm_get_timed_pages();
-               len = sprintf(buf, "%ld\n", nr);
-               if (len > *lenp)
-                       len = *lenp;
-               if (copy_to_user(buffer, buf, len))
-                       return -EFAULT;
-       }
-       *lenp = len;
-       *ppos += len;
+       cmm_set_pages(nr);
+       return 0;
+}
+
+static int cmm_timed_pages_handler(struct ctl_table *ctl, int write,
+                                  void __user *buffer, size_t *lenp,
+                                  loff_t *ppos)
+{
+       long nr = cmm_get_timed_pages();
+       struct ctl_table ctl_entry = {
+               .procname       = ctl->procname,
+               .data           = &nr,
+               .maxlen         = sizeof(long),
+       };
+       int rc;
+
+       rc = proc_doulongvec_minmax(&ctl_entry, write, buffer, lenp, ppos);
+       if (rc < 0 || !write)
+               return rc;
+
+       cmm_add_timed_pages(nr);
        return 0;
 }
 
@@ -338,7 +330,7 @@ static struct ctl_table cmm_table[] = {
        {
                .procname       = "cmm_timed_pages",
                .mode           = 0644,
-               .proc_handler   = cmm_pages_handler,
+               .proc_handler   = cmm_timed_pages_handler,
        },
        {
                .procname       = "cmm_timeout",
index 6ad15d3fab819d9f17109f7bd08c1d3311ea76ba..84111a43ea2932055fd3e6281eb2289a697da5c2 100644 (file)
@@ -80,7 +80,7 @@ struct qin64 {
 struct dcss_segment {
        struct list_head list;
        char dcss_name[8];
-       char res_name[15];
+       char res_name[16];
        unsigned long start_addr;
        unsigned long end;
        atomic_t ref_count;
@@ -433,7 +433,7 @@ __segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long
        memcpy(&seg->res_name, seg->dcss_name, 8);
        EBCASC(seg->res_name, 8);
        seg->res_name[8] = '\0';
-       strncat(seg->res_name, " (DCSS)", 7);
+       strlcat(seg->res_name, " (DCSS)", sizeof(seg->res_name));
        seg->res->name = seg->res_name;
        rc = seg->vm_segtype;
        if (rc == SEG_TYPE_SC ||
index e074480d3598c031ecd4f01b22f8749578f6d490..4cc3f06b0ab3356c78349f8a2497811eb2af174c 100644 (file)
@@ -502,6 +502,8 @@ retry:
        /* No reason to continue if interrupted by SIGKILL. */
        if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) {
                fault = VM_FAULT_SIGNAL;
+               if (flags & FAULT_FLAG_RETRY_NOWAIT)
+                       goto out_up;
                goto out;
        }
        if (unlikely(fault & VM_FAULT_ERROR))
index bc56ec8abcf7f7ec680568908e05d0de3fd303e4..bb44990c8212080cf24c9409362735fd4f1b569c 100644 (file)
@@ -2,8 +2,10 @@
 /*
  *  KVM guest address space mapping code
  *
- *    Copyright IBM Corp. 2007, 2016
+ *    Copyright IBM Corp. 2007, 2016, 2018
  *    Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
+ *              David Hildenbrand <david@redhat.com>
+ *              Janosch Frank <frankja@linux.vnet.ibm.com>
  */
 
 #include <linux/kernel.h>
@@ -521,6 +523,9 @@ void gmap_unlink(struct mm_struct *mm, unsigned long *table,
        rcu_read_unlock();
 }
 
+static void gmap_pmdp_xchg(struct gmap *gmap, pmd_t *old, pmd_t new,
+                          unsigned long gaddr);
+
 /**
  * gmap_link - set up shadow page tables to connect a host to a guest address
  * @gmap: pointer to guest mapping meta data structure
@@ -541,6 +546,7 @@ int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr)
        p4d_t *p4d;
        pud_t *pud;
        pmd_t *pmd;
+       u64 unprot;
        int rc;
 
        BUG_ON(gmap_is_shadow(gmap));
@@ -584,8 +590,8 @@ int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr)
                return -EFAULT;
        pmd = pmd_offset(pud, vmaddr);
        VM_BUG_ON(pmd_none(*pmd));
-       /* large pmds cannot yet be handled */
-       if (pmd_large(*pmd))
+       /* Are we allowed to use huge pages? */
+       if (pmd_large(*pmd) && !gmap->mm->context.allow_gmap_hpage_1m)
                return -EFAULT;
        /* Link gmap segment table entry location to page table. */
        rc = radix_tree_preload(GFP_KERNEL);
@@ -596,10 +602,22 @@ int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr)
        if (*table == _SEGMENT_ENTRY_EMPTY) {
                rc = radix_tree_insert(&gmap->host_to_guest,
                                       vmaddr >> PMD_SHIFT, table);
-               if (!rc)
-                       *table = pmd_val(*pmd);
-       } else
-               rc = 0;
+               if (!rc) {
+                       if (pmd_large(*pmd)) {
+                               *table = (pmd_val(*pmd) &
+                                         _SEGMENT_ENTRY_HARDWARE_BITS_LARGE)
+                                       | _SEGMENT_ENTRY_GMAP_UC;
+                       } else
+                               *table = pmd_val(*pmd) &
+                                       _SEGMENT_ENTRY_HARDWARE_BITS;
+               }
+       } else if (*table & _SEGMENT_ENTRY_PROTECT &&
+                  !(pmd_val(*pmd) & _SEGMENT_ENTRY_PROTECT)) {
+               unprot = (u64)*table;
+               unprot &= ~_SEGMENT_ENTRY_PROTECT;
+               unprot |= _SEGMENT_ENTRY_GMAP_UC;
+               gmap_pmdp_xchg(gmap, (pmd_t *)table, __pmd(unprot), gaddr);
+       }
        spin_unlock(&gmap->guest_table_lock);
        spin_unlock(ptl);
        radix_tree_preload_end();
@@ -690,6 +708,12 @@ void gmap_discard(struct gmap *gmap, unsigned long from, unsigned long to)
                vmaddr |= gaddr & ~PMD_MASK;
                /* Find vma in the parent mm */
                vma = find_vma(gmap->mm, vmaddr);
+               /*
+                * We do not discard pages that are backed by
+                * hugetlbfs, so we don't have to refault them.
+                */
+               if (vma && is_vm_hugetlb_page(vma))
+                       continue;
                size = min(to - gaddr, PMD_SIZE - (gaddr & ~PMD_MASK));
                zap_page_range(vma, vmaddr, size);
        }
@@ -864,7 +888,128 @@ static int gmap_pte_op_fixup(struct gmap *gmap, unsigned long gaddr,
  */
 static void gmap_pte_op_end(spinlock_t *ptl)
 {
-       spin_unlock(ptl);
+       if (ptl)
+               spin_unlock(ptl);
+}
+
+/**
+ * gmap_pmd_op_walk - walk the gmap tables, get the guest table lock
+ *                   and return the pmd pointer
+ * @gmap: pointer to guest mapping meta data structure
+ * @gaddr: virtual address in the guest address space
+ *
+ * Returns a pointer to the pmd for a guest address, or NULL
+ */
+static inline pmd_t *gmap_pmd_op_walk(struct gmap *gmap, unsigned long gaddr)
+{
+       pmd_t *pmdp;
+
+       BUG_ON(gmap_is_shadow(gmap));
+       spin_lock(&gmap->guest_table_lock);
+       pmdp = (pmd_t *) gmap_table_walk(gmap, gaddr, 1);
+
+       if (!pmdp || pmd_none(*pmdp)) {
+               spin_unlock(&gmap->guest_table_lock);
+               return NULL;
+       }
+
+       /* 4k page table entries are locked via the pte (pte_alloc_map_lock). */
+       if (!pmd_large(*pmdp))
+               spin_unlock(&gmap->guest_table_lock);
+       return pmdp;
+}
+
+/**
+ * gmap_pmd_op_end - release the guest_table_lock if needed
+ * @gmap: pointer to the guest mapping meta data structure
+ * @pmdp: pointer to the pmd
+ */
+static inline void gmap_pmd_op_end(struct gmap *gmap, pmd_t *pmdp)
+{
+       if (pmd_large(*pmdp))
+               spin_unlock(&gmap->guest_table_lock);
+}
+
+/*
+ * gmap_protect_pmd - remove access rights to memory and set pmd notification bits
+ * @pmdp: pointer to the pmd to be protected
+ * @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE
+ * @bits: notification bits to set
+ *
+ * Returns:
+ * 0 if successfully protected
+ * -EAGAIN if a fixup is needed
+ * -EINVAL if unsupported notifier bits have been specified
+ *
+ * Expected to be called with sg->mm->mmap_sem in read and
+ * guest_table_lock held.
+ */
+static int gmap_protect_pmd(struct gmap *gmap, unsigned long gaddr,
+                           pmd_t *pmdp, int prot, unsigned long bits)
+{
+       int pmd_i = pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID;
+       int pmd_p = pmd_val(*pmdp) & _SEGMENT_ENTRY_PROTECT;
+       pmd_t new = *pmdp;
+
+       /* Fixup needed */
+       if ((pmd_i && (prot != PROT_NONE)) || (pmd_p && (prot == PROT_WRITE)))
+               return -EAGAIN;
+
+       if (prot == PROT_NONE && !pmd_i) {
+               pmd_val(new) |= _SEGMENT_ENTRY_INVALID;
+               gmap_pmdp_xchg(gmap, pmdp, new, gaddr);
+       }
+
+       if (prot == PROT_READ && !pmd_p) {
+               pmd_val(new) &= ~_SEGMENT_ENTRY_INVALID;
+               pmd_val(new) |= _SEGMENT_ENTRY_PROTECT;
+               gmap_pmdp_xchg(gmap, pmdp, new, gaddr);
+       }
+
+       if (bits & GMAP_NOTIFY_MPROT)
+               pmd_val(*pmdp) |= _SEGMENT_ENTRY_GMAP_IN;
+
+       /* Shadow GMAP protection needs split PMDs */
+       if (bits & GMAP_NOTIFY_SHADOW)
+               return -EINVAL;
+
+       return 0;
+}
+
+/*
+ * gmap_protect_pte - remove access rights to memory and set pgste bits
+ * @gmap: pointer to guest mapping meta data structure
+ * @gaddr: virtual address in the guest address space
+ * @pmdp: pointer to the pmd associated with the pte
+ * @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE
+ * @bits: notification bits to set
+ *
+ * Returns 0 if successfully protected, -ENOMEM if out of memory and
+ * -EAGAIN if a fixup is needed.
+ *
+ * Expected to be called with sg->mm->mmap_sem in read
+ */
+static int gmap_protect_pte(struct gmap *gmap, unsigned long gaddr,
+                           pmd_t *pmdp, int prot, unsigned long bits)
+{
+       int rc;
+       pte_t *ptep;
+       spinlock_t *ptl = NULL;
+       unsigned long pbits = 0;
+
+       if (pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID)
+               return -EAGAIN;
+
+       ptep = pte_alloc_map_lock(gmap->mm, pmdp, gaddr, &ptl);
+       if (!ptep)
+               return -ENOMEM;
+
+       pbits |= (bits & GMAP_NOTIFY_MPROT) ? PGSTE_IN_BIT : 0;
+       pbits |= (bits & GMAP_NOTIFY_SHADOW) ? PGSTE_VSIE_BIT : 0;
+       /* Protect and unlock. */
+       rc = ptep_force_prot(gmap->mm, gaddr, ptep, prot, pbits);
+       gmap_pte_op_end(ptl);
+       return rc;
 }
 
 /*
@@ -883,30 +1028,45 @@ static void gmap_pte_op_end(spinlock_t *ptl)
 static int gmap_protect_range(struct gmap *gmap, unsigned long gaddr,
                              unsigned long len, int prot, unsigned long bits)
 {
-       unsigned long vmaddr;
-       spinlock_t *ptl;
-       pte_t *ptep;
+       unsigned long vmaddr, dist;
+       pmd_t *pmdp;
        int rc;
 
        BUG_ON(gmap_is_shadow(gmap));
        while (len) {
                rc = -EAGAIN;
-               ptep = gmap_pte_op_walk(gmap, gaddr, &ptl);
-               if (ptep) {
-                       rc = ptep_force_prot(gmap->mm, gaddr, ptep, prot, bits);
-                       gmap_pte_op_end(ptl);
+               pmdp = gmap_pmd_op_walk(gmap, gaddr);
+               if (pmdp) {
+                       if (!pmd_large(*pmdp)) {
+                               rc = gmap_protect_pte(gmap, gaddr, pmdp, prot,
+                                                     bits);
+                               if (!rc) {
+                                       len -= PAGE_SIZE;
+                                       gaddr += PAGE_SIZE;
+                               }
+                       } else {
+                               rc = gmap_protect_pmd(gmap, gaddr, pmdp, prot,
+                                                     bits);
+                               if (!rc) {
+                                       dist = HPAGE_SIZE - (gaddr & ~HPAGE_MASK);
+                                       len = len < dist ? 0 : len - dist;
+                                       gaddr = (gaddr & HPAGE_MASK) + HPAGE_SIZE;
+                               }
+                       }
+                       gmap_pmd_op_end(gmap, pmdp);
                }
                if (rc) {
+                       if (rc == -EINVAL)
+                               return rc;
+
+                       /* -EAGAIN, fixup of userspace mm and gmap */
                        vmaddr = __gmap_translate(gmap, gaddr);
                        if (IS_ERR_VALUE(vmaddr))
                                return vmaddr;
                        rc = gmap_pte_op_fixup(gmap, gaddr, vmaddr, prot);
                        if (rc)
                                return rc;
-                       continue;
                }
-               gaddr += PAGE_SIZE;
-               len -= PAGE_SIZE;
        }
        return 0;
 }
@@ -935,7 +1095,7 @@ int gmap_mprotect_notify(struct gmap *gmap, unsigned long gaddr,
        if (!MACHINE_HAS_ESOP && prot == PROT_READ)
                return -EINVAL;
        down_read(&gmap->mm->mmap_sem);
-       rc = gmap_protect_range(gmap, gaddr, len, prot, PGSTE_IN_BIT);
+       rc = gmap_protect_range(gmap, gaddr, len, prot, GMAP_NOTIFY_MPROT);
        up_read(&gmap->mm->mmap_sem);
        return rc;
 }
@@ -1474,6 +1634,7 @@ struct gmap *gmap_shadow(struct gmap *parent, unsigned long asce,
        unsigned long limit;
        int rc;
 
+       BUG_ON(parent->mm->context.allow_gmap_hpage_1m);
        BUG_ON(gmap_is_shadow(parent));
        spin_lock(&parent->shadow_lock);
        sg = gmap_find_shadow(parent, asce, edat_level);
@@ -1526,7 +1687,7 @@ struct gmap *gmap_shadow(struct gmap *parent, unsigned long asce,
        down_read(&parent->mm->mmap_sem);
        rc = gmap_protect_range(parent, asce & _ASCE_ORIGIN,
                                ((asce & _ASCE_TABLE_LENGTH) + 1) * PAGE_SIZE,
-                               PROT_READ, PGSTE_VSIE_BIT);
+                               PROT_READ, GMAP_NOTIFY_SHADOW);
        up_read(&parent->mm->mmap_sem);
        spin_lock(&parent->shadow_lock);
        new->initialized = true;
@@ -2092,6 +2253,225 @@ void ptep_notify(struct mm_struct *mm, unsigned long vmaddr,
 }
 EXPORT_SYMBOL_GPL(ptep_notify);
 
+static void pmdp_notify_gmap(struct gmap *gmap, pmd_t *pmdp,
+                            unsigned long gaddr)
+{
+       pmd_val(*pmdp) &= ~_SEGMENT_ENTRY_GMAP_IN;
+       gmap_call_notifier(gmap, gaddr, gaddr + HPAGE_SIZE - 1);
+}
+
+/**
+ * gmap_pmdp_xchg - exchange a gmap pmd with another
+ * @gmap: pointer to the guest address space structure
+ * @pmdp: pointer to the pmd entry
+ * @new: replacement entry
+ * @gaddr: the affected guest address
+ *
+ * This function is assumed to be called with the guest_table_lock
+ * held.
+ */
+static void gmap_pmdp_xchg(struct gmap *gmap, pmd_t *pmdp, pmd_t new,
+                          unsigned long gaddr)
+{
+       gaddr &= HPAGE_MASK;
+       pmdp_notify_gmap(gmap, pmdp, gaddr);
+       pmd_val(new) &= ~_SEGMENT_ENTRY_GMAP_IN;
+       if (MACHINE_HAS_TLB_GUEST)
+               __pmdp_idte(gaddr, (pmd_t *)pmdp, IDTE_GUEST_ASCE, gmap->asce,
+                           IDTE_GLOBAL);
+       else if (MACHINE_HAS_IDTE)
+               __pmdp_idte(gaddr, (pmd_t *)pmdp, 0, 0, IDTE_GLOBAL);
+       else
+               __pmdp_csp(pmdp);
+       *pmdp = new;
+}
+
+static void gmap_pmdp_clear(struct mm_struct *mm, unsigned long vmaddr,
+                           int purge)
+{
+       pmd_t *pmdp;
+       struct gmap *gmap;
+       unsigned long gaddr;
+
+       rcu_read_lock();
+       list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
+               spin_lock(&gmap->guest_table_lock);
+               pmdp = (pmd_t *)radix_tree_delete(&gmap->host_to_guest,
+                                                 vmaddr >> PMD_SHIFT);
+               if (pmdp) {
+                       gaddr = __gmap_segment_gaddr((unsigned long *)pmdp);
+                       pmdp_notify_gmap(gmap, pmdp, gaddr);
+                       WARN_ON(pmd_val(*pmdp) & ~(_SEGMENT_ENTRY_HARDWARE_BITS_LARGE |
+                                                  _SEGMENT_ENTRY_GMAP_UC));
+                       if (purge)
+                               __pmdp_csp(pmdp);
+                       pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY;
+               }
+               spin_unlock(&gmap->guest_table_lock);
+       }
+       rcu_read_unlock();
+}
+
+/**
+ * gmap_pmdp_invalidate - invalidate all affected guest pmd entries without
+ *                        flushing
+ * @mm: pointer to the process mm_struct
+ * @vmaddr: virtual address in the process address space
+ */
+void gmap_pmdp_invalidate(struct mm_struct *mm, unsigned long vmaddr)
+{
+       gmap_pmdp_clear(mm, vmaddr, 0);
+}
+EXPORT_SYMBOL_GPL(gmap_pmdp_invalidate);
+
+/**
+ * gmap_pmdp_csp - csp all affected guest pmd entries
+ * @mm: pointer to the process mm_struct
+ * @vmaddr: virtual address in the process address space
+ */
+void gmap_pmdp_csp(struct mm_struct *mm, unsigned long vmaddr)
+{
+       gmap_pmdp_clear(mm, vmaddr, 1);
+}
+EXPORT_SYMBOL_GPL(gmap_pmdp_csp);
+
+/**
+ * gmap_pmdp_idte_local - invalidate and clear a guest pmd entry
+ * @mm: pointer to the process mm_struct
+ * @vmaddr: virtual address in the process address space
+ */
+void gmap_pmdp_idte_local(struct mm_struct *mm, unsigned long vmaddr)
+{
+       unsigned long *entry, gaddr;
+       struct gmap *gmap;
+       pmd_t *pmdp;
+
+       rcu_read_lock();
+       list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
+               spin_lock(&gmap->guest_table_lock);
+               entry = radix_tree_delete(&gmap->host_to_guest,
+                                         vmaddr >> PMD_SHIFT);
+               if (entry) {
+                       pmdp = (pmd_t *)entry;
+                       gaddr = __gmap_segment_gaddr(entry);
+                       pmdp_notify_gmap(gmap, pmdp, gaddr);
+                       WARN_ON(*entry & ~(_SEGMENT_ENTRY_HARDWARE_BITS_LARGE |
+                                          _SEGMENT_ENTRY_GMAP_UC));
+                       if (MACHINE_HAS_TLB_GUEST)
+                               __pmdp_idte(gaddr, pmdp, IDTE_GUEST_ASCE,
+                                           gmap->asce, IDTE_LOCAL);
+                       else if (MACHINE_HAS_IDTE)
+                               __pmdp_idte(gaddr, pmdp, 0, 0, IDTE_LOCAL);
+                       *entry = _SEGMENT_ENTRY_EMPTY;
+               }
+               spin_unlock(&gmap->guest_table_lock);
+       }
+       rcu_read_unlock();
+}
+EXPORT_SYMBOL_GPL(gmap_pmdp_idte_local);
+
+/**
+ * gmap_pmdp_idte_global - invalidate and clear a guest pmd entry
+ * @mm: pointer to the process mm_struct
+ * @vmaddr: virtual address in the process address space
+ */
+void gmap_pmdp_idte_global(struct mm_struct *mm, unsigned long vmaddr)
+{
+       unsigned long *entry, gaddr;
+       struct gmap *gmap;
+       pmd_t *pmdp;
+
+       rcu_read_lock();
+       list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
+               spin_lock(&gmap->guest_table_lock);
+               entry = radix_tree_delete(&gmap->host_to_guest,
+                                         vmaddr >> PMD_SHIFT);
+               if (entry) {
+                       pmdp = (pmd_t *)entry;
+                       gaddr = __gmap_segment_gaddr(entry);
+                       pmdp_notify_gmap(gmap, pmdp, gaddr);
+                       WARN_ON(*entry & ~(_SEGMENT_ENTRY_HARDWARE_BITS_LARGE |
+                                          _SEGMENT_ENTRY_GMAP_UC));
+                       if (MACHINE_HAS_TLB_GUEST)
+                               __pmdp_idte(gaddr, pmdp, IDTE_GUEST_ASCE,
+                                           gmap->asce, IDTE_GLOBAL);
+                       else if (MACHINE_HAS_IDTE)
+                               __pmdp_idte(gaddr, pmdp, 0, 0, IDTE_GLOBAL);
+                       else
+                               __pmdp_csp(pmdp);
+                       *entry = _SEGMENT_ENTRY_EMPTY;
+               }
+               spin_unlock(&gmap->guest_table_lock);
+       }
+       rcu_read_unlock();
+}
+EXPORT_SYMBOL_GPL(gmap_pmdp_idte_global);
+
+/**
+ * gmap_test_and_clear_dirty_pmd - test and reset segment dirty status
+ * @gmap: pointer to guest address space
+ * @pmdp: pointer to the pmd to be tested
+ * @gaddr: virtual address in the guest address space
+ *
+ * This function is assumed to be called with the guest_table_lock
+ * held.
+ */
+bool gmap_test_and_clear_dirty_pmd(struct gmap *gmap, pmd_t *pmdp,
+                                  unsigned long gaddr)
+{
+       if (pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID)
+               return false;
+
+       /* Already protected memory, which did not change is clean */
+       if (pmd_val(*pmdp) & _SEGMENT_ENTRY_PROTECT &&
+           !(pmd_val(*pmdp) & _SEGMENT_ENTRY_GMAP_UC))
+               return false;
+
+       /* Clear UC indication and reset protection */
+       pmd_val(*pmdp) &= ~_SEGMENT_ENTRY_GMAP_UC;
+       gmap_protect_pmd(gmap, gaddr, pmdp, PROT_READ, 0);
+       return true;
+}
+
+/**
+ * gmap_sync_dirty_log_pmd - set bitmap based on dirty status of segment
+ * @gmap: pointer to guest address space
+ * @bitmap: dirty bitmap for this pmd
+ * @gaddr: virtual address in the guest address space
+ * @vmaddr: virtual address in the host address space
+ *
+ * This function is assumed to be called with the guest_table_lock
+ * held.
+ */
+void gmap_sync_dirty_log_pmd(struct gmap *gmap, unsigned long bitmap[4],
+                            unsigned long gaddr, unsigned long vmaddr)
+{
+       int i;
+       pmd_t *pmdp;
+       pte_t *ptep;
+       spinlock_t *ptl;
+
+       pmdp = gmap_pmd_op_walk(gmap, gaddr);
+       if (!pmdp)
+               return;
+
+       if (pmd_large(*pmdp)) {
+               if (gmap_test_and_clear_dirty_pmd(gmap, pmdp, gaddr))
+                       bitmap_fill(bitmap, _PAGE_ENTRIES);
+       } else {
+               for (i = 0; i < _PAGE_ENTRIES; i++, vmaddr += PAGE_SIZE) {
+                       ptep = pte_alloc_map_lock(gmap->mm, pmdp, vmaddr, &ptl);
+                       if (!ptep)
+                               continue;
+                       if (ptep_test_and_clear_uc(gmap->mm, vmaddr, ptep))
+                               set_bit(i, bitmap);
+                       spin_unlock(ptl);
+               }
+       }
+       gmap_pmd_op_end(gmap, pmdp);
+}
+EXPORT_SYMBOL_GPL(gmap_sync_dirty_log_pmd);
+
 static inline void thp_split_mm(struct mm_struct *mm)
 {
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
@@ -2168,17 +2548,45 @@ EXPORT_SYMBOL_GPL(s390_enable_sie);
  * Enable storage key handling from now on and initialize the storage
  * keys with the default key.
  */
-static int __s390_enable_skey(pte_t *pte, unsigned long addr,
-                             unsigned long next, struct mm_walk *walk)
+static int __s390_enable_skey_pte(pte_t *pte, unsigned long addr,
+                                 unsigned long next, struct mm_walk *walk)
 {
        /* Clear storage key */
        ptep_zap_key(walk->mm, addr, pte);
        return 0;
 }
 
+static int __s390_enable_skey_hugetlb(pte_t *pte, unsigned long addr,
+                                     unsigned long hmask, unsigned long next,
+                                     struct mm_walk *walk)
+{
+       pmd_t *pmd = (pmd_t *)pte;
+       unsigned long start, end;
+       struct page *page = pmd_page(*pmd);
+
+       /*
+        * The write check makes sure we do not set a key on shared
+        * memory. This is needed as the walker does not differentiate
+        * between actual guest memory and the process executable or
+        * shared libraries.
+        */
+       if (pmd_val(*pmd) & _SEGMENT_ENTRY_INVALID ||
+           !(pmd_val(*pmd) & _SEGMENT_ENTRY_WRITE))
+               return 0;
+
+       start = pmd_val(*pmd) & HPAGE_MASK;
+       end = start + HPAGE_SIZE - 1;
+       __storage_key_init_range(start, end);
+       set_bit(PG_arch_1, &page->flags);
+       return 0;
+}
+
 int s390_enable_skey(void)
 {
-       struct mm_walk walk = { .pte_entry = __s390_enable_skey };
+       struct mm_walk walk = {
+               .hugetlb_entry = __s390_enable_skey_hugetlb,
+               .pte_entry = __s390_enable_skey_pte,
+       };
        struct mm_struct *mm = current->mm;
        struct vm_area_struct *vma;
        int rc = 0;
index e804090f4470fce93446cdde89cd5170d6c25644..b0246c705a192aac8462ee99f7a7e32081a9c6ce 100644 (file)
@@ -123,6 +123,29 @@ static inline pte_t __rste_to_pte(unsigned long rste)
        return pte;
 }
 
+static void clear_huge_pte_skeys(struct mm_struct *mm, unsigned long rste)
+{
+       struct page *page;
+       unsigned long size, paddr;
+
+       if (!mm_uses_skeys(mm) ||
+           rste & _SEGMENT_ENTRY_INVALID)
+               return;
+
+       if ((rste & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3) {
+               page = pud_page(__pud(rste));
+               size = PUD_SIZE;
+               paddr = rste & PUD_MASK;
+       } else {
+               page = pmd_page(__pmd(rste));
+               size = PMD_SIZE;
+               paddr = rste & PMD_MASK;
+       }
+
+       if (!test_and_set_bit(PG_arch_1, &page->flags))
+               __storage_key_init_range(paddr, paddr + size - 1);
+}
+
 void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
                     pte_t *ptep, pte_t pte)
 {
@@ -137,6 +160,7 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
                rste |= _REGION_ENTRY_TYPE_R3 | _REGION3_ENTRY_LARGE;
        else
                rste |= _SEGMENT_ENTRY_LARGE;
+       clear_huge_pte_skeys(mm, rste);
        pte_val(*ptep) = rste;
 }
 
index 382153ff17e30e5203b217654d6b25fa0ddd78e6..dc3cede7f2ec9df1a886fc3e92821f2b7f9c6195 100644 (file)
@@ -271,7 +271,7 @@ void arch_set_page_states(int make_stable)
                        list_for_each(l, &zone->free_area[order].free_list[t]) {
                                page = list_entry(l, struct page, lru);
                                if (make_stable)
-                                       set_page_stable_dat(page, 0);
+                                       set_page_stable_dat(page, order);
                                else
                                        set_page_unused(page, order);
                        }
index c44171588d081444d724719208f366b24492cda0..f8c6faab41f4b3543ee0106ed39b6b0111af3d6e 100644 (file)
@@ -14,7 +14,7 @@
 
 static inline unsigned long sske_frame(unsigned long addr, unsigned char skey)
 {
-       asm volatile(".insn rrf,0xb22b0000,%[skey],%[addr],9,0"
+       asm volatile(".insn rrf,0xb22b0000,%[skey],%[addr],1,0"
                     : [addr] "+a" (addr) : [skey] "d" (skey));
        return addr;
 }
@@ -23,8 +23,6 @@ void __storage_key_init_range(unsigned long start, unsigned long end)
 {
        unsigned long boundary, size;
 
-       if (!PAGE_DEFAULT_KEY)
-               return;
        while (start < end) {
                if (MACHINE_HAS_EDAT1) {
                        /* set storage keys for a 1MB frame */
@@ -37,7 +35,7 @@ void __storage_key_init_range(unsigned long start, unsigned long end)
                                continue;
                        }
                }
-               page_set_storage_key(start, PAGE_DEFAULT_KEY, 0);
+               page_set_storage_key(start, PAGE_DEFAULT_KEY, 1);
                start += PAGE_SIZE;
        }
 }
index 84bd6329a88dd3ace39e612197dccec0a48dc4fc..76d89ee8b428837fc6c32f962d0104787caa29a3 100644 (file)
@@ -28,7 +28,7 @@ static struct ctl_table page_table_sysctl[] = {
                .data           = &page_table_allocate_pgste,
                .maxlen         = sizeof(int),
                .mode           = S_IRUGO | S_IWUSR,
-               .proc_handler   = proc_dointvec,
+               .proc_handler   = proc_dointvec_minmax,
                .extra1         = &page_table_allocate_pgste_min,
                .extra2         = &page_table_allocate_pgste_max,
        },
@@ -252,6 +252,8 @@ void page_table_free(struct mm_struct *mm, unsigned long *table)
                spin_unlock_bh(&mm->context.lock);
                if (mask != 0)
                        return;
+       } else {
+               atomic_xor_bits(&page->_refcount, 3U << 24);
        }
 
        pgtable_page_dtor(page);
@@ -304,6 +306,8 @@ static void __tlb_remove_table(void *_table)
                        break;
                /* fallthrough */
        case 3:         /* 4K page table with pgstes */
+               if (mask & 3)
+                       atomic_xor_bits(&page->_refcount, 3 << 24);
                pgtable_page_dtor(page);
                __free_page(page);
                break;
index 301e466e4263d8c87abd9069952c10dac371f640..f2cc7da473e4ed2afb858c868a6e3238d9c1dce5 100644 (file)
@@ -347,18 +347,27 @@ static inline void pmdp_idte_local(struct mm_struct *mm,
                            mm->context.asce, IDTE_LOCAL);
        else
                __pmdp_idte(addr, pmdp, 0, 0, IDTE_LOCAL);
+       if (mm_has_pgste(mm) && mm->context.allow_gmap_hpage_1m)
+               gmap_pmdp_idte_local(mm, addr);
 }
 
 static inline void pmdp_idte_global(struct mm_struct *mm,
                                    unsigned long addr, pmd_t *pmdp)
 {
-       if (MACHINE_HAS_TLB_GUEST)
+       if (MACHINE_HAS_TLB_GUEST) {
                __pmdp_idte(addr, pmdp, IDTE_NODAT | IDTE_GUEST_ASCE,
                            mm->context.asce, IDTE_GLOBAL);
-       else if (MACHINE_HAS_IDTE)
+               if (mm_has_pgste(mm) && mm->context.allow_gmap_hpage_1m)
+                       gmap_pmdp_idte_global(mm, addr);
+       } else if (MACHINE_HAS_IDTE) {
                __pmdp_idte(addr, pmdp, 0, 0, IDTE_GLOBAL);
-       else
+               if (mm_has_pgste(mm) && mm->context.allow_gmap_hpage_1m)
+                       gmap_pmdp_idte_global(mm, addr);
+       } else {
                __pmdp_csp(pmdp);
+               if (mm_has_pgste(mm) && mm->context.allow_gmap_hpage_1m)
+                       gmap_pmdp_csp(mm, addr);
+       }
 }
 
 static inline pmd_t pmdp_flush_direct(struct mm_struct *mm,
@@ -392,6 +401,8 @@ static inline pmd_t pmdp_flush_lazy(struct mm_struct *mm,
                          cpumask_of(smp_processor_id()))) {
                pmd_val(*pmdp) |= _SEGMENT_ENTRY_INVALID;
                mm->context.flush_mm = 1;
+               if (mm_has_pgste(mm))
+                       gmap_pmdp_invalidate(mm, addr);
        } else {
                pmdp_idte_global(mm, addr, pmdp);
        }
@@ -399,6 +410,24 @@ static inline pmd_t pmdp_flush_lazy(struct mm_struct *mm,
        return old;
 }
 
+static pmd_t *pmd_alloc_map(struct mm_struct *mm, unsigned long addr)
+{
+       pgd_t *pgd;
+       p4d_t *p4d;
+       pud_t *pud;
+       pmd_t *pmd;
+
+       pgd = pgd_offset(mm, addr);
+       p4d = p4d_alloc(mm, pgd, addr);
+       if (!p4d)
+               return NULL;
+       pud = pud_alloc(mm, p4d, addr);
+       if (!pud)
+               return NULL;
+       pmd = pmd_alloc(mm, pud, addr);
+       return pmd;
+}
+
 pmd_t pmdp_xchg_direct(struct mm_struct *mm, unsigned long addr,
                       pmd_t *pmdp, pmd_t new)
 {
@@ -693,40 +722,14 @@ void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
 /*
  * Test and reset if a guest page is dirty
  */
-bool test_and_clear_guest_dirty(struct mm_struct *mm, unsigned long addr)
+bool ptep_test_and_clear_uc(struct mm_struct *mm, unsigned long addr,
+                      pte_t *ptep)
 {
-       spinlock_t *ptl;
-       pgd_t *pgd;
-       p4d_t *p4d;
-       pud_t *pud;
-       pmd_t *pmd;
        pgste_t pgste;
-       pte_t *ptep;
        pte_t pte;
        bool dirty;
        int nodat;
 
-       pgd = pgd_offset(mm, addr);
-       p4d = p4d_alloc(mm, pgd, addr);
-       if (!p4d)
-               return false;
-       pud = pud_alloc(mm, p4d, addr);
-       if (!pud)
-               return false;
-       pmd = pmd_alloc(mm, pud, addr);
-       if (!pmd)
-               return false;
-       /* We can't run guests backed by huge pages, but userspace can
-        * still set them up and then try to migrate them without any
-        * migration support.
-        */
-       if (pmd_large(*pmd))
-               return true;
-
-       ptep = pte_alloc_map_lock(mm, pmd, addr, &ptl);
-       if (unlikely(!ptep))
-               return false;
-
        pgste = pgste_get_lock(ptep);
        dirty = !!(pgste_val(pgste) & PGSTE_UC_BIT);
        pgste_val(pgste) &= ~PGSTE_UC_BIT;
@@ -742,21 +745,43 @@ bool test_and_clear_guest_dirty(struct mm_struct *mm, unsigned long addr)
                *ptep = pte;
        }
        pgste_set_unlock(ptep, pgste);
-
-       spin_unlock(ptl);
        return dirty;
 }
-EXPORT_SYMBOL_GPL(test_and_clear_guest_dirty);
+EXPORT_SYMBOL_GPL(ptep_test_and_clear_uc);
 
 int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
                          unsigned char key, bool nq)
 {
-       unsigned long keyul;
+       unsigned long keyul, paddr;
        spinlock_t *ptl;
        pgste_t old, new;
+       pmd_t *pmdp;
        pte_t *ptep;
 
-       ptep = get_locked_pte(mm, addr, &ptl);
+       pmdp = pmd_alloc_map(mm, addr);
+       if (unlikely(!pmdp))
+               return -EFAULT;
+
+       ptl = pmd_lock(mm, pmdp);
+       if (!pmd_present(*pmdp)) {
+               spin_unlock(ptl);
+               return -EFAULT;
+       }
+
+       if (pmd_large(*pmdp)) {
+               paddr = pmd_val(*pmdp) & HPAGE_MASK;
+               paddr |= addr & ~HPAGE_MASK;
+               /*
+                * Huge pmds need quiescing operations, they are
+                * always mapped.
+                */
+               page_set_storage_key(paddr, key, 1);
+               spin_unlock(ptl);
+               return 0;
+       }
+       spin_unlock(ptl);
+
+       ptep = pte_alloc_map_lock(mm, pmdp, addr, &ptl);
        if (unlikely(!ptep))
                return -EFAULT;
 
@@ -767,14 +792,14 @@ int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
        pgste_val(new) |= (keyul & (_PAGE_CHANGED | _PAGE_REFERENCED)) << 48;
        pgste_val(new) |= (keyul & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56;
        if (!(pte_val(*ptep) & _PAGE_INVALID)) {
-               unsigned long address, bits, skey;
+               unsigned long bits, skey;
 
-               address = pte_val(*ptep) & PAGE_MASK;
-               skey = (unsigned long) page_get_storage_key(address);
+               paddr = pte_val(*ptep) & PAGE_MASK;
+               skey = (unsigned long) page_get_storage_key(paddr);
                bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED);
                skey = key & (_PAGE_ACC_BITS | _PAGE_FP_BIT);
                /* Set storage key ACC and FP */
-               page_set_storage_key(address, skey, !nq);
+               page_set_storage_key(paddr, skey, !nq);
                /* Merge host changed & referenced into pgste  */
                pgste_val(new) |= bits << 52;
        }
@@ -830,11 +855,32 @@ EXPORT_SYMBOL(cond_set_guest_storage_key);
 int reset_guest_reference_bit(struct mm_struct *mm, unsigned long addr)
 {
        spinlock_t *ptl;
+       unsigned long paddr;
        pgste_t old, new;
+       pmd_t *pmdp;
        pte_t *ptep;
        int cc = 0;
 
-       ptep = get_locked_pte(mm, addr, &ptl);
+       pmdp = pmd_alloc_map(mm, addr);
+       if (unlikely(!pmdp))
+               return -EFAULT;
+
+       ptl = pmd_lock(mm, pmdp);
+       if (!pmd_present(*pmdp)) {
+               spin_unlock(ptl);
+               return -EFAULT;
+       }
+
+       if (pmd_large(*pmdp)) {
+               paddr = pmd_val(*pmdp) & HPAGE_MASK;
+               paddr |= addr & ~HPAGE_MASK;
+               cc = page_reset_referenced(paddr);
+               spin_unlock(ptl);
+               return cc;
+       }
+       spin_unlock(ptl);
+
+       ptep = pte_alloc_map_lock(mm, pmdp, addr, &ptl);
        if (unlikely(!ptep))
                return -EFAULT;
 
@@ -843,7 +889,8 @@ int reset_guest_reference_bit(struct mm_struct *mm, unsigned long addr)
        pgste_val(new) &= ~PGSTE_GR_BIT;
 
        if (!(pte_val(*ptep) & _PAGE_INVALID)) {
-               cc = page_reset_referenced(pte_val(*ptep) & PAGE_MASK);
+               paddr = pte_val(*ptep) & PAGE_MASK;
+               cc = page_reset_referenced(paddr);
                /* Merge real referenced bit into host-set */
                pgste_val(new) |= ((unsigned long) cc << 53) & PGSTE_HR_BIT;
        }
@@ -862,18 +909,42 @@ EXPORT_SYMBOL(reset_guest_reference_bit);
 int get_guest_storage_key(struct mm_struct *mm, unsigned long addr,
                          unsigned char *key)
 {
+       unsigned long paddr;
        spinlock_t *ptl;
        pgste_t pgste;
+       pmd_t *pmdp;
        pte_t *ptep;
 
-       ptep = get_locked_pte(mm, addr, &ptl);
+       pmdp = pmd_alloc_map(mm, addr);
+       if (unlikely(!pmdp))
+               return -EFAULT;
+
+       ptl = pmd_lock(mm, pmdp);
+       if (!pmd_present(*pmdp)) {
+               /* Not yet mapped memory has a zero key */
+               spin_unlock(ptl);
+               *key = 0;
+               return 0;
+       }
+
+       if (pmd_large(*pmdp)) {
+               paddr = pmd_val(*pmdp) & HPAGE_MASK;
+               paddr |= addr & ~HPAGE_MASK;
+               *key = page_get_storage_key(paddr);
+               spin_unlock(ptl);
+               return 0;
+       }
+       spin_unlock(ptl);
+
+       ptep = pte_alloc_map_lock(mm, pmdp, addr, &ptl);
        if (unlikely(!ptep))
                return -EFAULT;
 
        pgste = pgste_get_lock(ptep);
        *key = (pgste_val(pgste) & (PGSTE_ACC_BITS | PGSTE_FP_BIT)) >> 56;
+       paddr = pte_val(*ptep) & PAGE_MASK;
        if (!(pte_val(*ptep) & _PAGE_INVALID))
-               *key = page_get_storage_key(pte_val(*ptep) & PAGE_MASK);
+               *key = page_get_storage_key(paddr);
        /* Reflect guest's logical view, not physical */
        *key |= (pgste_val(pgste) & (PGSTE_GR_BIT | PGSTE_GC_BIT)) >> 48;
        pgste_set_unlock(ptep, pgste);
index d2db8acb1a55480895e38fdf142c3d074610230d..d7052cbe984f81c02d203a6b34e7af6d658bd4f9 100644 (file)
@@ -485,8 +485,6 @@ static void bpf_jit_epilogue(struct bpf_jit *jit, u32 stack_depth)
                        /* br %r1 */
                        _EMIT2(0x07f1);
                } else {
-                       /* larl %r1,.+14 */
-                       EMIT6_PCREL_RILB(0xc0000000, REG_1, jit->prg + 14);
                        /* ex 0,S390_lowcore.br_r1_tampoline */
                        EMIT4_DISP(0x44000000, REG_0, REG_0,
                                   offsetof(struct lowcore, br_r1_trampoline));
@@ -1286,6 +1284,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
                goto free_addrs;
        }
        if (bpf_jit_prog(&jit, fp)) {
+               bpf_jit_binary_free(header);
                fp = orig_fp;
                goto free_addrs;
        }
index 06a80434cfe63f69c7c7ff2e86caba734161484e..5bd374491f9461467790e1e0522f838827f094df 100644 (file)
@@ -134,26 +134,14 @@ void __init numa_setup(void)
 {
        pr_info("NUMA mode: %s\n", mode->name);
        nodes_clear(node_possible_map);
+       /* Initially attach all possible CPUs to node 0. */
+       cpumask_copy(&node_to_cpumask_map[0], cpu_possible_mask);
        if (mode->setup)
                mode->setup();
        numa_setup_memory();
        memblock_dump_all();
 }
 
-/*
- * numa_init_early() - Initialization initcall
- *
- * This runs when only one CPU is online and before the first
- * topology update is called for by the scheduler.
- */
-static int __init numa_init_early(void)
-{
-       /* Attach all possible CPUs to node 0 for now. */
-       cpumask_copy(&node_to_cpumask_map[0], cpu_possible_mask);
-       return 0;
-}
-early_initcall(numa_init_early);
-
 /*
  * numa_init_late() - Initialization initcall
  *
index b482e95b6249e380dfb39d89253789c61dedb1e3..57f7cdac70a3114891c044f8cda2ae6fb6d48dc7 100644 (file)
@@ -48,6 +48,10 @@ static char *pci_fmt2_names[] = {
        "Maximum work units",
 };
 
+static char *pci_fmt3_names[] = {
+       "Transmitted bytes",
+};
+
 static char *pci_sw_names[] = {
        "Allocated pages",
        "Mapped pages",
@@ -112,6 +116,10 @@ static int pci_perf_show(struct seq_file *m, void *v)
                pci_fmb_show(m, pci_fmt2_names, ARRAY_SIZE(pci_fmt2_names),
                             &zdev->fmb->fmt2.consumed_work_units);
                break;
+       case 3:
+               pci_fmb_show(m, pci_fmt3_names, ARRAY_SIZE(pci_fmt3_names),
+                            &zdev->fmb->fmt3.tx_bytes);
+               break;
        default:
                seq_puts(m, "Unknown format\n");
        }
index 1ace023cbdcec3a6462d753b0f39e678b23b1828..8d61218a71aa876fa01209df33f19f3c89ecc955 100644 (file)
@@ -7,13 +7,13 @@ purgatory-y := head.o purgatory.o string.o sha256.o mem.o
 targets += $(purgatory-y) purgatory.ro kexec-purgatory.c
 PURGATORY_OBJS = $(addprefix $(obj)/,$(purgatory-y))
 
-$(obj)/sha256.o: $(srctree)/lib/sha256.c
+$(obj)/sha256.o: $(srctree)/lib/sha256.c FORCE
        $(call if_changed_rule,cc_o_c)
 
-$(obj)/mem.o: $(srctree)/arch/s390/lib/mem.S
+$(obj)/mem.o: $(srctree)/arch/s390/lib/mem.S FORCE
        $(call if_changed_rule,as_o_S)
 
-$(obj)/string.o: $(srctree)/arch/s390/lib/string.c
+$(obj)/string.o: $(srctree)/arch/s390/lib/string.c FORCE
        $(call if_changed_rule,cc_o_c)
 
 LDFLAGS_purgatory.ro := -e purgatory_start -r --no-undefined -nostdlib
@@ -21,8 +21,9 @@ LDFLAGS_purgatory.ro += -z nodefaultlib
 KBUILD_CFLAGS := -fno-strict-aliasing -Wall -Wstrict-prototypes
 KBUILD_CFLAGS += -Wno-pointer-sign -Wno-sign-compare
 KBUILD_CFLAGS += -fno-zero-initialized-in-bss -fno-builtin -ffreestanding
-KBUILD_CFLAGS += -c -MD -Os -m64 -msoft-float
+KBUILD_CFLAGS += -c -MD -Os -m64 -msoft-float -fno-common
 KBUILD_CFLAGS += $(call cc-option,-fno-PIE)
+KBUILD_AFLAGS := $(filter-out -DCC_USING_EXPOLINE,$(KBUILD_AFLAGS))
 
 $(obj)/purgatory.ro: $(PURGATORY_OBJS) FORCE
                $(call if_changed,ld)
index 660c96a05a9b64e2d409335e1a3af0afcc11026b..2e3707b12eddbb92f02a27632ced337c7a889200 100644 (file)
@@ -243,33 +243,26 @@ gprregs:
        .quad   0
        .endr
 
-purgatory_sha256_digest:
-       .global purgatory_sha256_digest
-       .rept   32      /* SHA256_DIGEST_SIZE */
-       .byte   0
-       .endr
-
-purgatory_sha_regions:
-       .global purgatory_sha_regions
-       .rept   16 * __KEXEC_SHA_REGION_SIZE    /* KEXEC_SEGMENTS_MAX */
-       .byte   0
-       .endr
-
-kernel_entry:
-       .global kernel_entry
-       .quad   0
-
-kernel_type:
-       .global kernel_type
-       .quad   0
-
-crash_start:
-       .global crash_start
-       .quad   0
+/* Macro to define a global variable with name and size (in bytes) to be
+ * shared with C code.
+ *
+ * Add the .size and .type attribute to satisfy checks on the Elf_Sym during
+ * purgatory load.
+ */
+.macro GLOBAL_VARIABLE name,size
+\name:
+       .global \name
+       .size   \name,\size
+       .type   \name,object
+       .skip   \size,0
+.endm
 
-crash_size:
-       .global crash_size
-       .quad   0
+GLOBAL_VARIABLE purgatory_sha256_digest,32
+GLOBAL_VARIABLE purgatory_sha_regions,16*__KEXEC_SHA_REGION_SIZE
+GLOBAL_VARIABLE kernel_entry,8
+GLOBAL_VARIABLE kernel_type,8
+GLOBAL_VARIABLE crash_start,8
+GLOBAL_VARIABLE crash_size,8
 
        .align  PAGE_SIZE
 stack:
index 4e2beb3c29b797e2156cf72172d5e9bd94e257b6..3528e6da4e8792f020b94786a99da13049d5ba46 100644 (file)
 #include <linux/string.h>
 #include <asm/purgatory.h>
 
-struct kexec_sha_region purgatory_sha_regions[KEXEC_SEGMENT_MAX];
-u8 purgatory_sha256_digest[SHA256_DIGEST_SIZE];
-
-u64 kernel_entry;
-u64 kernel_type;
-
-u64 crash_start;
-u64 crash_size;
-
 int verify_sha256_digest(void)
 {
        struct kexec_sha_region *ptr, *end;
index d92f2d94a5d94f0e7cfd0a237511bbbb59fff847..9bba2c14e0cafbcad575cd3f44f2521b9bed79af 100644 (file)
@@ -2,13 +2,22 @@
 
 quiet_cmd_chkbss = CHKBSS  $<
 define cmd_chkbss
+       rm -f $@; \
        if ! $(OBJDUMP) -j .bss -w -h $< | awk 'END { if ($$3) exit 1 }'; then \
                echo "error: $< .bss section is not empty" >&2; exit 1; \
        fi; \
        touch $@;
 endef
 
-$(obj)/built-in.a: $(patsubst %, $(obj)/%.chkbss, $(chkbss))
+chkbss-target ?= $(obj)/built-in.a
+ifneq (,$(findstring /,$(chkbss)))
+chkbss-files := $(patsubst %, %.chkbss, $(chkbss))
+else
+chkbss-files := $(patsubst %, $(obj)/%.chkbss, $(chkbss))
+endif
+
+$(chkbss-target): $(chkbss-files)
+targets += $(notdir $(chkbss-files))
 
 %.o.chkbss: %.o
        $(call cmd,chkbss)
index 259aa0680d1a748f1f78ffdcbc779042678bd0cb..a1bc02b29c81385ffacdd77d43f01c8ea5c8d675 100644 (file)
@@ -257,7 +257,7 @@ static void add_to_group(struct gen_opcode *desc, struct insn *insn, int offset)
        if (!desc->group)
                exit(EXIT_FAILURE);
        group = &desc->group[desc->nr_groups - 1];
-       strncpy(group->opcode, insn->opcode, 2);
+       memcpy(group->opcode, insn->opcode, 2);
        group->type = insn->type;
        group->offset = offset;
        group->count = 1;
@@ -283,7 +283,7 @@ static void print_opcode_table(struct gen_opcode *desc)
                        continue;
                add_to_group(desc, insn, offset);
                if (strncmp(opcode, insn->opcode, 2)) {
-                       strncpy(opcode, insn->opcode, 2);
+                       memcpy(opcode, insn->opcode, 2);
                        printf("\t/* %.2s */ \\\n", opcode);
                }
                print_opcode(insn, offset);
index 0fd0099f43cca4fe830ef0664554280fe7d24055..f37b95a80232d65d3467b49acb89a80290703752 100644 (file)
 #include <asm/atomic-irq.h>
 #endif
 
-#define atomic_add_negative(a, v)      (atomic_add_return((a), (v)) < 0)
-#define atomic_dec_return(v)           atomic_sub_return(1, (v))
-#define atomic_inc_return(v)           atomic_add_return(1, (v))
-#define atomic_inc_and_test(v)         (atomic_inc_return(v) == 0)
-#define atomic_sub_and_test(i,v)       (atomic_sub_return((i), (v)) == 0)
-#define atomic_dec_and_test(v)         (atomic_sub_return(1, (v)) == 0)
-
-#define atomic_inc(v)                  atomic_add(1, (v))
-#define atomic_dec(v)                  atomic_sub(1, (v))
-
 #define atomic_xchg(v, new)            (xchg(&((v)->counter), new))
 #define atomic_cmpxchg(v, o, n)                (cmpxchg(&((v)->counter), (o), (n)))
 
-/**
- * __atomic_add_unless - add unless the number is a given value
- * @v: pointer of type atomic_t
- * @a: the amount to add to v...
- * @u: ...unless v is equal to u.
- *
- * Atomically adds @a to @v, so long as it was not @u.
- * Returns the old value of @v.
- */
-static inline int __atomic_add_unless(atomic_t *v, int a, int u)
-{
-       int c, old;
-       c = atomic_read(v);
-       for (;;) {
-               if (unlikely(c == (u)))
-                       break;
-               old = atomic_cmpxchg((v), c, c + (a));
-               if (likely(old == c))
-                       break;
-               c = old;
-       }
-
-       return c;
-}
-
 #endif /* CONFIG_CPU_J2 */
 
 #endif /* __ASM_SH_ATOMIC_H */
index 1e881f5db6597a06bb0db787f4c105261f36ae98..593a9704782bdc7108696011ab0d65850d5393d3 100644 (file)
@@ -8,7 +8,8 @@
  * This work is licensed under the terms of the GNU GPL, version 2.  See the
  * file "COPYING" in the main directory of this archive for more details.
  */
-#include <linux/bitops.h>
+#include <linux/bits.h>
+#include <linux/compiler.h>
 #include <asm/byteorder.h>
 
 /*
index 7431c172c0cb63358fe2955249203cb91642e73a..199d17b765f2b9424d9c3eea0225260ed39b175a 100644 (file)
@@ -10,7 +10,6 @@
 #include <linux/types.h>
 
 struct arch_hw_breakpoint {
-       char            *name; /* Contains name of the symbol to set bkpt */
        unsigned long   address;
        u16             len;
        u16             type;
@@ -41,6 +40,7 @@ struct sh_ubc {
        struct clk      *clk;   /* optional interface clock / MSTP bit */
 };
 
+struct perf_event_attr;
 struct perf_event;
 struct task_struct;
 struct pmu;
@@ -54,8 +54,10 @@ static inline int hw_breakpoint_slots(int type)
 }
 
 /* arch/sh/kernel/hw_breakpoint.c */
-extern int arch_check_bp_in_kernelspace(struct perf_event *bp);
-extern int arch_validate_hwbkpt_settings(struct perf_event *bp);
+extern int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw);
+extern int hw_breakpoint_arch_parse(struct perf_event *bp,
+                                   const struct perf_event_attr *attr,
+                                   struct arch_hw_breakpoint *hw);
 extern int hw_breakpoint_exceptions_notify(struct notifier_block *unused,
                                           unsigned long val, void *data);
 
index 85d8bcaa8493a97c652250320b0a193f31b4cd80..6171682f77989979bf2cb56c634afd44c4784f74 100644 (file)
@@ -27,7 +27,6 @@ struct kprobe;
 
 void arch_remove_kprobe(struct kprobe *);
 void kretprobe_trampoline(void);
-void jprobe_return_end(void);
 
 /* Architecture specific copy of original instruction*/
 struct arch_specific_insn {
@@ -43,9 +42,6 @@ struct prev_kprobe {
 /* per-cpu kprobe control block */
 struct kprobe_ctlblk {
        unsigned long kprobe_status;
-       unsigned long jprobe_saved_r15;
-       struct pt_regs jprobe_saved_regs;
-       kprobe_opcode_t jprobes_stack[MAX_STACK_SIZE];
        struct prev_kprobe prev_kprobe;
 };
 
index 8648ed05ccf00e4974237ff1fd77e5a1270dd7ca..d9ff3b42da7cb11a3e6d62ec39dcfc2c35ce40d2 100644 (file)
@@ -124,14 +124,13 @@ static int get_hbp_len(u16 hbp_len)
 /*
  * Check for virtual address in kernel space.
  */
-int arch_check_bp_in_kernelspace(struct perf_event *bp)
+int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw)
 {
        unsigned int len;
        unsigned long va;
-       struct arch_hw_breakpoint *info = counter_arch_bp(bp);
 
-       va = info->address;
-       len = get_hbp_len(info->len);
+       va = hw->address;
+       len = get_hbp_len(hw->len);
 
        return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE);
 }
@@ -174,40 +173,40 @@ int arch_bp_generic_fields(int sh_len, int sh_type,
        return 0;
 }
 
-static int arch_build_bp_info(struct perf_event *bp)
+static int arch_build_bp_info(struct perf_event *bp,
+                             const struct perf_event_attr *attr,
+                             struct arch_hw_breakpoint *hw)
 {
-       struct arch_hw_breakpoint *info = counter_arch_bp(bp);
-
-       info->address = bp->attr.bp_addr;
+       hw->address = attr->bp_addr;
 
        /* Len */
-       switch (bp->attr.bp_len) {
+       switch (attr->bp_len) {
        case HW_BREAKPOINT_LEN_1:
-               info->len = SH_BREAKPOINT_LEN_1;
+               hw->len = SH_BREAKPOINT_LEN_1;
                break;
        case HW_BREAKPOINT_LEN_2:
-               info->len = SH_BREAKPOINT_LEN_2;
+               hw->len = SH_BREAKPOINT_LEN_2;
                break;
        case HW_BREAKPOINT_LEN_4:
-               info->len = SH_BREAKPOINT_LEN_4;
+               hw->len = SH_BREAKPOINT_LEN_4;
                break;
        case HW_BREAKPOINT_LEN_8:
-               info->len = SH_BREAKPOINT_LEN_8;
+               hw->len = SH_BREAKPOINT_LEN_8;
                break;
        default:
                return -EINVAL;
        }
 
        /* Type */
-       switch (bp->attr.bp_type) {
+       switch (attr->bp_type) {
        case HW_BREAKPOINT_R:
-               info->type = SH_BREAKPOINT_READ;
+               hw->type = SH_BREAKPOINT_READ;
                break;
        case HW_BREAKPOINT_W:
-               info->type = SH_BREAKPOINT_WRITE;
+               hw->type = SH_BREAKPOINT_WRITE;
                break;
        case HW_BREAKPOINT_W | HW_BREAKPOINT_R:
-               info->type = SH_BREAKPOINT_RW;
+               hw->type = SH_BREAKPOINT_RW;
                break;
        default:
                return -EINVAL;
@@ -219,19 +218,20 @@ static int arch_build_bp_info(struct perf_event *bp)
 /*
  * Validate the arch-specific HW Breakpoint register settings
  */
-int arch_validate_hwbkpt_settings(struct perf_event *bp)
+int hw_breakpoint_arch_parse(struct perf_event *bp,
+                            const struct perf_event_attr *attr,
+                            struct arch_hw_breakpoint *hw)
 {
-       struct arch_hw_breakpoint *info = counter_arch_bp(bp);
        unsigned int align;
        int ret;
 
-       ret = arch_build_bp_info(bp);
+       ret = arch_build_bp_info(bp, attr, hw);
        if (ret)
                return ret;
 
        ret = -EINVAL;
 
-       switch (info->len) {
+       switch (hw->len) {
        case SH_BREAKPOINT_LEN_1:
                align = 0;
                break;
@@ -248,18 +248,11 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
                return ret;
        }
 
-       /*
-        * For kernel-addresses, either the address or symbol name can be
-        * specified.
-        */
-       if (info->name)
-               info->address = (unsigned long)kallsyms_lookup_name(info->name);
-
        /*
         * Check that the low-order bits of the address are appropriate
         * for the alignment implied by len.
         */
-       if (info->address & align)
+       if (hw->address & align)
                return -EINVAL;
 
        return 0;
@@ -346,7 +339,7 @@ static int __kprobes hw_breakpoint_handler(struct die_args *args)
                perf_bp_event(bp, args->regs);
 
                /* Deliver the signal to userspace */
-               if (!arch_check_bp_in_kernelspace(bp)) {
+               if (!arch_check_bp_in_kernelspace(&bp->hw.info)) {
                        force_sig_fault(SIGTRAP, TRAP_HWBKPT,
                                        (void __user *)NULL, current);
                }
index 52a5e11247d192b30c777a26cd3db6f75b50cf40..241e903dd3ee224a7f05b2318854389afeb13a94 100644 (file)
@@ -248,11 +248,6 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
                        prepare_singlestep(p, regs);
                        kcb->kprobe_status = KPROBE_REENTER;
                        return 1;
-               } else {
-                       p = __this_cpu_read(current_kprobe);
-                       if (p->break_handler && p->break_handler(p, regs)) {
-                               goto ss_probe;
-                       }
                }
                goto no_kprobe;
        }
@@ -277,11 +272,13 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
        set_current_kprobe(p, regs, kcb);
        kcb->kprobe_status = KPROBE_HIT_ACTIVE;
 
-       if (p->pre_handler && p->pre_handler(p, regs))
+       if (p->pre_handler && p->pre_handler(p, regs)) {
                /* handler has already set things up, so skip ss setup */
+               reset_current_kprobe();
+               preempt_enable_no_resched();
                return 1;
+       }
 
-ss_probe:
        prepare_singlestep(p, regs);
        kcb->kprobe_status = KPROBE_HIT_SS;
        return 1;
@@ -358,8 +355,6 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
        regs->pc = orig_ret_address;
        kretprobe_hash_unlock(current, &flags);
 
-       preempt_enable_no_resched();
-
        hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
                hlist_del(&ri->hlist);
                kfree(ri);
@@ -508,14 +503,8 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
                                if (post_kprobe_handler(args->regs))
                                        ret = NOTIFY_STOP;
                        } else {
-                               if (kprobe_handler(args->regs)) {
+                               if (kprobe_handler(args->regs))
                                        ret = NOTIFY_STOP;
-                               } else {
-                                       p = __this_cpu_read(current_kprobe);
-                                       if (p->break_handler &&
-                                           p->break_handler(p, args->regs))
-                                               ret = NOTIFY_STOP;
-                               }
                        }
                }
        }
@@ -523,57 +512,6 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
        return ret;
 }
 
-int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
-{
-       struct jprobe *jp = container_of(p, struct jprobe, kp);
-       unsigned long addr;
-       struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
-
-       kcb->jprobe_saved_regs = *regs;
-       kcb->jprobe_saved_r15 = regs->regs[15];
-       addr = kcb->jprobe_saved_r15;
-
-       /*
-        * TBD: As Linus pointed out, gcc assumes that the callee
-        * owns the argument space and could overwrite it, e.g.
-        * tailcall optimization. So, to be absolutely safe
-        * we also save and restore enough stack bytes to cover
-        * the argument area.
-        */
-       memcpy(kcb->jprobes_stack, (kprobe_opcode_t *) addr,
-              MIN_STACK_SIZE(addr));
-
-       regs->pc = (unsigned long)(jp->entry);
-
-       return 1;
-}
-
-void __kprobes jprobe_return(void)
-{
-       asm volatile ("trapa #0x3a\n\t" "jprobe_return_end:\n\t" "nop\n\t");
-}
-
-int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
-{
-       struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
-       unsigned long stack_addr = kcb->jprobe_saved_r15;
-       u8 *addr = (u8 *)regs->pc;
-
-       if ((addr >= (u8 *)jprobe_return) &&
-           (addr <= (u8 *)jprobe_return_end)) {
-               *regs = kcb->jprobe_saved_regs;
-
-               memcpy((kprobe_opcode_t *)stack_addr, kcb->jprobes_stack,
-                      MIN_STACK_SIZE(stack_addr));
-
-               kcb->kprobe_status = KPROBE_HIT_SS;
-               preempt_enable_no_resched();
-               return 1;
-       }
-
-       return 0;
-}
-
 static struct kprobe trampoline_p = {
        .addr = (kprobe_opcode_t *)&kretprobe_trampoline,
        .pre_handler = trampoline_probe_handler
index ac67828da20100c1459bf2c13e2cef9a09414db1..410b263ef5c849030ce7a6379040361509d08225 100644 (file)
@@ -13,6 +13,7 @@ generic-y += local64.h
 generic-y += mcs_spinlock.h
 generic-y += mm-arch-hooks.h
 generic-y += module.h
+generic-y += msi.h
 generic-y += preempt.h
 generic-y += rwsem.h
 generic-y += serial.h
index d13ce517f4b9946382579a7a4d5e9bbccdb6332e..94c930f0bc62a7b3aa9736c310bb2a7024abecc2 100644 (file)
@@ -27,17 +27,17 @@ int atomic_fetch_or(int, atomic_t *);
 int atomic_fetch_xor(int, atomic_t *);
 int atomic_cmpxchg(atomic_t *, int, int);
 int atomic_xchg(atomic_t *, int);
-int __atomic_add_unless(atomic_t *, int, int);
+int atomic_fetch_add_unless(atomic_t *, int, int);
 void atomic_set(atomic_t *, int);
 
+#define atomic_fetch_add_unless        atomic_fetch_add_unless
+
 #define atomic_set_release(v, i)       atomic_set((v), (i))
 
 #define atomic_read(v)          READ_ONCE((v)->counter)
 
 #define atomic_add(i, v)       ((void)atomic_add_return( (int)(i), (v)))
 #define atomic_sub(i, v)       ((void)atomic_add_return(-(int)(i), (v)))
-#define atomic_inc(v)          ((void)atomic_add_return(        1, (v)))
-#define atomic_dec(v)          ((void)atomic_add_return(       -1, (v)))
 
 #define atomic_and(i, v)       ((void)atomic_fetch_and((i), (v)))
 #define atomic_or(i, v)                ((void)atomic_fetch_or((i), (v)))
@@ -46,22 +46,4 @@ void atomic_set(atomic_t *, int);
 #define atomic_sub_return(i, v)        (atomic_add_return(-(int)(i), (v)))
 #define atomic_fetch_sub(i, v)  (atomic_fetch_add (-(int)(i), (v)))
 
-#define atomic_inc_return(v)   (atomic_add_return(        1, (v)))
-#define atomic_dec_return(v)   (atomic_add_return(       -1, (v)))
-
-#define atomic_add_negative(a, v)      (atomic_add_return((a), (v)) < 0)
-
-/*
- * atomic_inc_and_test - increment and test
- * @v: pointer of type atomic_t
- *
- * Atomically increments @v by 1
- * and returns true if the result is zero, or false for all
- * other cases.
- */
-#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
-
-#define atomic_dec_and_test(v) (atomic_dec_return(v) == 0)
-#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
-
 #endif /* !(__ARCH_SPARC_ATOMIC__) */
index 28db058d471b14809a3efb775489e92a0b816f12..6963482c81d842210495bceef34d181c823d4931 100644 (file)
@@ -50,38 +50,6 @@ ATOMIC_OPS(xor)
 #undef ATOMIC_OP_RETURN
 #undef ATOMIC_OP
 
-#define atomic_dec_return(v)   atomic_sub_return(1, v)
-#define atomic64_dec_return(v) atomic64_sub_return(1, v)
-
-#define atomic_inc_return(v)   atomic_add_return(1, v)
-#define atomic64_inc_return(v) atomic64_add_return(1, v)
-
-/*
- * atomic_inc_and_test - increment and test
- * @v: pointer of type atomic_t
- *
- * Atomically increments @v by 1
- * and returns true if the result is zero, or false for all
- * other cases.
- */
-#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
-#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
-
-#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
-#define atomic64_sub_and_test(i, v) (atomic64_sub_return(i, v) == 0)
-
-#define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
-#define atomic64_dec_and_test(v) (atomic64_sub_return(1, v) == 0)
-
-#define atomic_inc(v) atomic_add(1, v)
-#define atomic64_inc(v) atomic64_add(1, v)
-
-#define atomic_dec(v) atomic_sub(1, v)
-#define atomic64_dec(v) atomic64_sub(1, v)
-
-#define atomic_add_negative(i, v) (atomic_add_return(i, v) < 0)
-#define atomic64_add_negative(i, v) (atomic64_add_return(i, v) < 0)
-
 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
 
 static inline int atomic_xchg(atomic_t *v, int new)
@@ -89,42 +57,11 @@ static inline int atomic_xchg(atomic_t *v, int new)
        return xchg(&v->counter, new);
 }
 
-static inline int __atomic_add_unless(atomic_t *v, int a, int u)
-{
-       int c, old;
-       c = atomic_read(v);
-       for (;;) {
-               if (unlikely(c == (u)))
-                       break;
-               old = atomic_cmpxchg((v), c, c + (a));
-               if (likely(old == c))
-                       break;
-               c = old;
-       }
-       return c;
-}
-
 #define atomic64_cmpxchg(v, o, n) \
        ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
 
-static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
-{
-       long c, old;
-       c = atomic64_read(v);
-       for (;;) {
-               if (unlikely(c == (u)))
-                       break;
-               old = atomic64_cmpxchg((v), c, c + (a));
-               if (likely(old == c))
-                       break;
-               c = old;
-       }
-       return c != (u);
-}
-
-#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
-
 long atomic64_dec_if_positive(atomic64_t *v);
+#define atomic64_dec_if_positive atomic64_dec_if_positive
 
 #endif /* !(__ARCH_SPARC64_ATOMIC__) */
index 3704490b44888f170b742dd7fffdf22496b1b540..bfcaa6326c20656745ed7e2ffcfa7b4f7d1da432 100644 (file)
@@ -44,7 +44,6 @@ struct kprobe_ctlblk {
        unsigned long kprobe_status;
        unsigned long kprobe_orig_tnpc;
        unsigned long kprobe_orig_tstate_pil;
-       struct pt_regs jprobe_saved_regs;
        struct prev_kprobe prev_kprobe;
 };
 
diff --git a/arch/sparc/include/asm/msi.h b/arch/sparc/include/asm/msi.h
deleted file mode 100644 (file)
index 3c17c10..0000000
+++ /dev/null
@@ -1,32 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * msi.h:  Defines specific to the MBus - Sbus - Interface.
- *
- * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
- * Copyright (C) 1996 Eddie C. Dost   (ecd@skynet.be)
- */
-
-#ifndef _SPARC_MSI_H
-#define _SPARC_MSI_H
-
-/*
- * Locations of MSI Registers.
- */
-#define MSI_MBUS_ARBEN 0xe0001008      /* MBus Arbiter Enable register */
-
-/*
- * Useful bits in the MSI Registers.
- */
-#define MSI_ASYNC_MODE  0x80000000     /* Operate the MSI asynchronously */
-
-
-static inline void msi_set_sync(void)
-{
-       __asm__ __volatile__ ("lda [%0] %1, %%g3\n\t"
-                             "andn %%g3, %2, %%g3\n\t"
-                             "sta %%g3, [%0] %1\n\t" : :
-                             "r" (MSI_MBUS_ARBEN),
-                             "i" (ASI_M_CTL), "r" (MSI_ASYNC_MODE) : "g3");
-}
-
-#endif /* !(_SPARC_MSI_H) */
index ab4ba43479410fed04c5bb870b50f26a41de01b5..dfbca2470536eda4de3dac29e3a485549db74972 100644 (file)
@@ -147,18 +147,12 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
                        kcb->kprobe_status = KPROBE_REENTER;
                        prepare_singlestep(p, regs, kcb);
                        return 1;
-               } else {
-                       if (*(u32 *)addr != BREAKPOINT_INSTRUCTION) {
+               } else if (*(u32 *)addr != BREAKPOINT_INSTRUCTION) {
                        /* The breakpoint instruction was removed by
                         * another cpu right after we hit, no further
                         * handling of this interrupt is appropriate
                         */
-                               ret = 1;
-                               goto no_kprobe;
-                       }
-                       p = __this_cpu_read(current_kprobe);
-                       if (p->break_handler && p->break_handler(p, regs))
-                               goto ss_probe;
+                       ret = 1;
                }
                goto no_kprobe;
        }
@@ -181,10 +175,12 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
 
        set_current_kprobe(p, regs, kcb);
        kcb->kprobe_status = KPROBE_HIT_ACTIVE;
-       if (p->pre_handler && p->pre_handler(p, regs))
+       if (p->pre_handler && p->pre_handler(p, regs)) {
+               reset_current_kprobe();
+               preempt_enable_no_resched();
                return 1;
+       }
 
-ss_probe:
        prepare_singlestep(p, regs, kcb);
        kcb->kprobe_status = KPROBE_HIT_SS;
        return 1;
@@ -441,53 +437,6 @@ out:
        exception_exit(prev_state);
 }
 
-/* Jprobes support.  */
-int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
-{
-       struct jprobe *jp = container_of(p, struct jprobe, kp);
-       struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
-
-       memcpy(&(kcb->jprobe_saved_regs), regs, sizeof(*regs));
-
-       regs->tpc  = (unsigned long) jp->entry;
-       regs->tnpc = ((unsigned long) jp->entry) + 0x4UL;
-       regs->tstate |= TSTATE_PIL;
-
-       return 1;
-}
-
-void __kprobes jprobe_return(void)
-{
-       struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
-       register unsigned long orig_fp asm("g1");
-
-       orig_fp = kcb->jprobe_saved_regs.u_regs[UREG_FP];
-       __asm__ __volatile__("\n"
-"1:    cmp             %%sp, %0\n\t"
-       "blu,a,pt       %%xcc, 1b\n\t"
-       " restore\n\t"
-       ".globl         jprobe_return_trap_instruction\n"
-"jprobe_return_trap_instruction:\n\t"
-       "ta             0x70"
-       : /* no outputs */
-       : "r" (orig_fp));
-}
-
-extern void jprobe_return_trap_instruction(void);
-
-int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
-{
-       u32 *addr = (u32 *) regs->tpc;
-       struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
-
-       if (addr == (u32 *) jprobe_return_trap_instruction) {
-               memcpy(regs, &(kcb->jprobe_saved_regs), sizeof(*regs));
-               preempt_enable_no_resched();
-               return 1;
-       }
-       return 0;
-}
-
 /* The value stored in the return address register is actually 2
  * instructions before where the callee will return to.
  * Sequences usually look something like this
@@ -562,9 +511,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
        regs->tpc = orig_ret_address;
        regs->tnpc = orig_ret_address + 4;
 
-       reset_current_kprobe();
        kretprobe_hash_unlock(current, &flags);
-       preempt_enable_no_resched();
 
        hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
                hlist_del(&ri->hlist);
index 2ef8cfa9677ed5b034640be15933e0639026ad49..f0eba72aa1ad692c124b608f5314e8a0ef96f3e1 100644 (file)
@@ -814,7 +814,7 @@ static void __init get_tick_patch(void)
        }
 }
 
-static void init_tick_ops(struct sparc64_tick_ops *ops)
+static void __init init_tick_ops(struct sparc64_tick_ops *ops)
 {
        unsigned long freq, quotient, tick;
 
index 465a901a0ada71aef7ac08e36c90b7f528ae809d..281fa634bb1a80e023b4b6c2a68b5e225fd80868 100644 (file)
@@ -95,7 +95,7 @@ int atomic_cmpxchg(atomic_t *v, int old, int new)
 }
 EXPORT_SYMBOL(atomic_cmpxchg);
 
-int __atomic_add_unless(atomic_t *v, int a, int u)
+int atomic_fetch_add_unless(atomic_t *v, int a, int u)
 {
        int ret;
        unsigned long flags;
@@ -107,7 +107,7 @@ int __atomic_add_unless(atomic_t *v, int a, int u)
        spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
        return ret;
 }
-EXPORT_SYMBOL(__atomic_add_unless);
+EXPORT_SYMBOL(atomic_fetch_add_unless);
 
 /* Atomic operations are already serializing */
 void atomic_set(atomic_t *v, int i)
index 1d70c3f6d9868d3aee729263dc30493c354720b4..be9cb006517924284e50e66f1faf6bf1c56f5494 100644 (file)
@@ -37,7 +37,6 @@
 #include <asm/mbus.h>
 #include <asm/page.h>
 #include <asm/asi.h>
-#include <asm/msi.h>
 #include <asm/smp.h>
 #include <asm/io.h>
 
@@ -116,6 +115,25 @@ static inline void srmmu_ctxd_set(ctxd_t *ctxp, pgd_t *pgdp)
        set_pte((pte_t *)ctxp, pte);
 }
 
+/*
+ * Locations of MSI Registers.
+ */
+#define MSI_MBUS_ARBEN 0xe0001008      /* MBus Arbiter Enable register */
+
+/*
+ * Useful bits in the MSI Registers.
+ */
+#define MSI_ASYNC_MODE  0x80000000     /* Operate the MSI asynchronously */
+
+static void msi_set_sync(void)
+{
+       __asm__ __volatile__ ("lda [%0] %1, %%g3\n\t"
+                             "andn %%g3, %2, %%g3\n\t"
+                             "sta %%g3, [%0] %1\n\t" : :
+                             "r" (MSI_MBUS_ARBEN),
+                             "i" (ASI_M_CTL), "r" (MSI_ASYNC_MODE) : "g3");
+}
+
 void pmd_set(pmd_t *pmdp, pte_t *ptep)
 {
        unsigned long ptp;      /* Physical address, shifted right by 4 */
index f1dbb4ee19d781751ac22f233ec7dea5f9a66ed3..6d4774f203d00f7e127e9f5a95b9db42122aa5da 100644 (file)
@@ -63,7 +63,7 @@ config X86
        select ARCH_HAS_PTE_SPECIAL
        select ARCH_HAS_REFCOUNT
        select ARCH_HAS_UACCESS_FLUSHCACHE      if X86_64
-       select ARCH_HAS_UACCESS_MCSAFE          if X86_64
+       select ARCH_HAS_UACCESS_MCSAFE          if X86_64 && X86_MCE
        select ARCH_HAS_SET_MEMORY
        select ARCH_HAS_SG_CHAIN
        select ARCH_HAS_STRICT_KERNEL_RWX
@@ -180,7 +180,7 @@ config X86
        select HAVE_PERF_USER_STACK_DUMP
        select HAVE_RCU_TABLE_FREE
        select HAVE_REGS_AND_STACK_ACCESS_API
-       select HAVE_RELIABLE_STACKTRACE         if X86_64 && UNWINDER_FRAME_POINTER && STACK_VALIDATION
+       select HAVE_RELIABLE_STACKTRACE         if X86_64 && (UNWINDER_FRAME_POINTER || UNWINDER_ORC) && STACK_VALIDATION
        select HAVE_STACKPROTECTOR              if CC_HAS_SANE_STACKPROTECTOR
        select HAVE_STACK_VALIDATION            if X86_64
        select HAVE_RSEQ
index f0a6ea22429d7384d81f81e38fb39dbfc9e720ed..7e3c07d6ad424b228b0c50f2370fdb195794b61c 100644 (file)
@@ -80,11 +80,6 @@ ifeq ($(CONFIG_X86_32),y)
         # alignment instructions.
         KBUILD_CFLAGS += $(call cc-option,$(cc_stack_align4))
 
-        # Disable unit-at-a-time mode on pre-gcc-4.0 compilers, it makes gcc use
-        # a lot more stack due to the lack of sharing of stacklots:
-        KBUILD_CFLAGS += $(call cc-ifversion, -lt, 0400, \
-                               $(call cc-option,-fno-unit-at-a-time))
-
         # CPU-specific tuning. Anything which can be shared with UML should go here.
         include arch/x86/Makefile_32.cpu
         KBUILD_CFLAGS += $(cflags-y)
@@ -258,11 +253,6 @@ archscripts: scripts_basic
 archheaders:
        $(Q)$(MAKE) $(build)=arch/x86/entry/syscalls all
 
-archprepare:
-ifeq ($(CONFIG_KEXEC_FILE),y)
-       $(Q)$(MAKE) $(build)=arch/x86/purgatory arch/x86/purgatory/kexec-purgatory.c
-endif
-
 ###
 # Kernel objects
 
@@ -327,7 +317,6 @@ archclean:
        $(Q)rm -rf $(objtree)/arch/x86_64
        $(Q)$(MAKE) $(clean)=$(boot)
        $(Q)$(MAKE) $(clean)=arch/x86/tools
-       $(Q)$(MAKE) $(clean)=arch/x86/purgatory
 
 define archhelp
   echo  '* bzImage      - Compressed kernel image (arch/x86/boot/bzImage)'
index 0d41d68131cc43bcfae0bf88da2561cce6b0129b..2e1382486e91935a509c4991099da6a6787f32ef 100644 (file)
@@ -17,6 +17,7 @@
 #define _LINUX_BITOPS_H                /* Inhibit inclusion of <linux/bitops.h> */
 
 #include <linux/types.h>
+#include <asm/asm.h>
 
 static inline bool constant_test_bit(int nr, const void *addr)
 {
@@ -28,7 +29,7 @@ static inline bool variable_test_bit(int nr, const void *addr)
        bool v;
        const u32 *p = (const u32 *)addr;
 
-       asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
+       asm("btl %2,%1" CC_SET(c) : CC_OUT(c) (v) : "m" (*p), "Ir" (nr));
        return v;
 }
 
index fa42f895fdde7ace2ab2a0f911388015cd71a0a6..169c2feda14a055472a5e81481c2a4d2bd5717b8 100644 (file)
@@ -106,9 +106,13 @@ define cmd_check_data_rel
        done
 endef
 
+# We need to run two commands under "if_changed", so merge them into a
+# single invocation.
+quiet_cmd_check-and-link-vmlinux = LD      $@
+      cmd_check-and-link-vmlinux = $(cmd_check_data_rel); $(cmd_ld)
+
 $(obj)/vmlinux: $(vmlinux-objs-y) FORCE
-       $(call if_changed,check_data_rel)
-       $(call if_changed,ld)
+       $(call if_changed,check-and-link-vmlinux)
 
 OBJCOPYFLAGS_vmlinux.bin :=  -R .comment -S
 $(obj)/vmlinux.bin: vmlinux FORCE
index a8a8642d2b0b802424caf7b4f925edbce7e3284e..1458b1700fc7e4c580ea963920d199fd7a4fb625 100644 (file)
@@ -34,74 +34,13 @@ static void setup_boot_services##bits(struct efi_config *c)         \
                                                                        \
        table = (typeof(table))sys_table;                               \
                                                                        \
-       c->runtime_services = table->runtime;                           \
-       c->boot_services = table->boottime;                             \
-       c->text_output = table->con_out;                                \
+       c->runtime_services     = table->runtime;                       \
+       c->boot_services        = table->boottime;                      \
+       c->text_output          = table->con_out;                       \
 }
 BOOT_SERVICES(32);
 BOOT_SERVICES(64);
 
-static inline efi_status_t __open_volume32(void *__image, void **__fh)
-{
-       efi_file_io_interface_t *io;
-       efi_loaded_image_32_t *image = __image;
-       efi_file_handle_32_t *fh;
-       efi_guid_t fs_proto = EFI_FILE_SYSTEM_GUID;
-       efi_status_t status;
-       void *handle = (void *)(unsigned long)image->device_handle;
-       unsigned long func;
-
-       status = efi_call_early(handle_protocol, handle,
-                               &fs_proto, (void **)&io);
-       if (status != EFI_SUCCESS) {
-               efi_printk(sys_table, "Failed to handle fs_proto\n");
-               return status;
-       }
-
-       func = (unsigned long)io->open_volume;
-       status = efi_early->call(func, io, &fh);
-       if (status != EFI_SUCCESS)
-               efi_printk(sys_table, "Failed to open volume\n");
-
-       *__fh = fh;
-       return status;
-}
-
-static inline efi_status_t __open_volume64(void *__image, void **__fh)
-{
-       efi_file_io_interface_t *io;
-       efi_loaded_image_64_t *image = __image;
-       efi_file_handle_64_t *fh;
-       efi_guid_t fs_proto = EFI_FILE_SYSTEM_GUID;
-       efi_status_t status;
-       void *handle = (void *)(unsigned long)image->device_handle;
-       unsigned long func;
-
-       status = efi_call_early(handle_protocol, handle,
-                               &fs_proto, (void **)&io);
-       if (status != EFI_SUCCESS) {
-               efi_printk(sys_table, "Failed to handle fs_proto\n");
-               return status;
-       }
-
-       func = (unsigned long)io->open_volume;
-       status = efi_early->call(func, io, &fh);
-       if (status != EFI_SUCCESS)
-               efi_printk(sys_table, "Failed to open volume\n");
-
-       *__fh = fh;
-       return status;
-}
-
-efi_status_t
-efi_open_volume(efi_system_table_t *sys_table, void *__image, void **__fh)
-{
-       if (efi_early->is64)
-               return __open_volume64(__image, __fh);
-
-       return __open_volume32(__image, __fh);
-}
-
 void efi_char16_printk(efi_system_table_t *table, efi_char16_t *str)
 {
        efi_call_proto(efi_simple_text_output_protocol, output_string,
@@ -109,23 +48,17 @@ void efi_char16_printk(efi_system_table_t *table, efi_char16_t *str)
 }
 
 static efi_status_t
-__setup_efi_pci(efi_pci_io_protocol_t *pci, struct pci_setup_rom **__rom)
+preserve_pci_rom_image(efi_pci_io_protocol_t *pci, struct pci_setup_rom **__rom)
 {
        struct pci_setup_rom *rom = NULL;
        efi_status_t status;
        unsigned long size;
-       uint64_t attributes, romsize;
+       uint64_t romsize;
        void *romimage;
 
-       status = efi_call_proto(efi_pci_io_protocol, attributes, pci,
-                               EfiPciIoAttributeOperationGet, 0, 0,
-                               &attributes);
-       if (status != EFI_SUCCESS)
-               return status;
-
        /*
-        * Some firmware images contain EFI function pointers at the place where the
-        * romimage and romsize fields are supposed to be. Typically the EFI
+        * Some firmware images contain EFI function pointers at the place where
+        * the romimage and romsize fields are supposed to be. Typically the EFI
         * code is mapped at high addresses, translating to an unrealistically
         * large romsize. The UEFI spec limits the size of option ROMs to 16
         * MiB so we reject any ROMs over 16 MiB in size to catch this.
@@ -140,16 +73,16 @@ __setup_efi_pci(efi_pci_io_protocol_t *pci, struct pci_setup_rom **__rom)
 
        status = efi_call_early(allocate_pool, EFI_LOADER_DATA, size, &rom);
        if (status != EFI_SUCCESS) {
-               efi_printk(sys_table, "Failed to alloc mem for rom\n");
+               efi_printk(sys_table, "Failed to allocate memory for 'rom'\n");
                return status;
        }
 
        memset(rom, 0, sizeof(*rom));
 
-       rom->data.type = SETUP_PCI;
-       rom->data.len = size - sizeof(struct setup_data);
-       rom->data.next = 0;
-       rom->pcilen = pci->romsize;
+       rom->data.type  = SETUP_PCI;
+       rom->data.len   = size - sizeof(struct setup_data);
+       rom->data.next  = 0;
+       rom->pcilen     = pci->romsize;
        *__rom = rom;
 
        status = efi_call_proto(efi_pci_io_protocol, pci.read, pci,
@@ -185,96 +118,6 @@ free_struct:
        return status;
 }
 
-static void
-setup_efi_pci32(struct boot_params *params, void **pci_handle,
-               unsigned long size)
-{
-       efi_pci_io_protocol_t *pci = NULL;
-       efi_guid_t pci_proto = EFI_PCI_IO_PROTOCOL_GUID;
-       u32 *handles = (u32 *)(unsigned long)pci_handle;
-       efi_status_t status;
-       unsigned long nr_pci;
-       struct setup_data *data;
-       int i;
-
-       data = (struct setup_data *)(unsigned long)params->hdr.setup_data;
-
-       while (data && data->next)
-               data = (struct setup_data *)(unsigned long)data->next;
-
-       nr_pci = size / sizeof(u32);
-       for (i = 0; i < nr_pci; i++) {
-               struct pci_setup_rom *rom = NULL;
-               u32 h = handles[i];
-
-               status = efi_call_early(handle_protocol, h,
-                                       &pci_proto, (void **)&pci);
-
-               if (status != EFI_SUCCESS)
-                       continue;
-
-               if (!pci)
-                       continue;
-
-               status = __setup_efi_pci(pci, &rom);
-               if (status != EFI_SUCCESS)
-                       continue;
-
-               if (data)
-                       data->next = (unsigned long)rom;
-               else
-                       params->hdr.setup_data = (unsigned long)rom;
-
-               data = (struct setup_data *)rom;
-
-       }
-}
-
-static void
-setup_efi_pci64(struct boot_params *params, void **pci_handle,
-               unsigned long size)
-{
-       efi_pci_io_protocol_t *pci = NULL;
-       efi_guid_t pci_proto = EFI_PCI_IO_PROTOCOL_GUID;
-       u64 *handles = (u64 *)(unsigned long)pci_handle;
-       efi_status_t status;
-       unsigned long nr_pci;
-       struct setup_data *data;
-       int i;
-
-       data = (struct setup_data *)(unsigned long)params->hdr.setup_data;
-
-       while (data && data->next)
-               data = (struct setup_data *)(unsigned long)data->next;
-
-       nr_pci = size / sizeof(u64);
-       for (i = 0; i < nr_pci; i++) {
-               struct pci_setup_rom *rom = NULL;
-               u64 h = handles[i];
-
-               status = efi_call_early(handle_protocol, h,
-                                       &pci_proto, (void **)&pci);
-
-               if (status != EFI_SUCCESS)
-                       continue;
-
-               if (!pci)
-                       continue;
-
-               status = __setup_efi_pci(pci, &rom);
-               if (status != EFI_SUCCESS)
-                       continue;
-
-               if (data)
-                       data->next = (unsigned long)rom;
-               else
-                       params->hdr.setup_data = (unsigned long)rom;
-
-               data = (struct setup_data *)rom;
-
-       }
-}
-
 /*
  * There's no way to return an informative status from this function,
  * because any analysis (and printing of error messages) needs to be
@@ -290,6 +133,9 @@ static void setup_efi_pci(struct boot_params *params)
        void **pci_handle = NULL;
        efi_guid_t pci_proto = EFI_PCI_IO_PROTOCOL_GUID;
        unsigned long size = 0;
+       unsigned long nr_pci;
+       struct setup_data *data;
+       int i;
 
        status = efi_call_early(locate_handle,
                                EFI_LOCATE_BY_PROTOCOL,
@@ -301,7 +147,7 @@ static void setup_efi_pci(struct boot_params *params)
                                        size, (void **)&pci_handle);
 
                if (status != EFI_SUCCESS) {
-                       efi_printk(sys_table, "Failed to alloc mem for pci_handle\n");
+                       efi_printk(sys_table, "Failed to allocate memory for 'pci_handle'\n");
                        return;
                }
 
@@ -313,10 +159,34 @@ static void setup_efi_pci(struct boot_params *params)
        if (status != EFI_SUCCESS)
                goto free_handle;
 
-       if (efi_early->is64)
-               setup_efi_pci64(params, pci_handle, size);
-       else
-               setup_efi_pci32(params, pci_handle, size);
+       data = (struct setup_data *)(unsigned long)params->hdr.setup_data;
+
+       while (data && data->next)
+               data = (struct setup_data *)(unsigned long)data->next;
+
+       nr_pci = size / (efi_is_64bit() ? sizeof(u64) : sizeof(u32));
+       for (i = 0; i < nr_pci; i++) {
+               efi_pci_io_protocol_t *pci = NULL;
+               struct pci_setup_rom *rom;
+
+               status = efi_call_early(handle_protocol,
+                                       efi_is_64bit() ? ((u64 *)pci_handle)[i]
+                                                      : ((u32 *)pci_handle)[i],
+                                       &pci_proto, (void **)&pci);
+               if (status != EFI_SUCCESS || !pci)
+                       continue;
+
+               status = preserve_pci_rom_image(pci, &rom);
+               if (status != EFI_SUCCESS)
+                       continue;
+
+               if (data)
+                       data->next = (unsigned long)rom;
+               else
+                       params->hdr.setup_data = (unsigned long)rom;
+
+               data = (struct setup_data *)rom;
+       }
 
 free_handle:
        efi_call_early(free_pool, pci_handle);
@@ -347,8 +217,7 @@ static void retrieve_apple_device_properties(struct boot_params *boot_params)
                status = efi_call_early(allocate_pool, EFI_LOADER_DATA,
                                        size + sizeof(struct setup_data), &new);
                if (status != EFI_SUCCESS) {
-                       efi_printk(sys_table,
-                                       "Failed to alloc mem for properties\n");
+                       efi_printk(sys_table, "Failed to allocate memory for 'properties'\n");
                        return;
                }
 
@@ -364,9 +233,9 @@ static void retrieve_apple_device_properties(struct boot_params *boot_params)
        new->next = 0;
 
        data = (struct setup_data *)(unsigned long)boot_params->hdr.setup_data;
-       if (!data)
+       if (!data) {
                boot_params->hdr.setup_data = (unsigned long)new;
-       else {
+       else {
                while (data->next)
                        data = (struct setup_data *)(unsigned long)data->next;
                data->next = (unsigned long)new;
@@ -386,81 +255,55 @@ static void setup_quirks(struct boot_params *boot_params)
        }
 }
 
+/*
+ * See if we have Universal Graphics Adapter (UGA) protocol
+ */
 static efi_status_t
-setup_uga32(void **uga_handle, unsigned long size, u32 *width, u32 *height)
+setup_uga(struct screen_info *si, efi_guid_t *uga_proto, unsigned long size)
 {
-       struct efi_uga_draw_protocol *uga = NULL, *first_uga;
-       efi_guid_t uga_proto = EFI_UGA_PROTOCOL_GUID;
+       efi_status_t status;
+       u32 width, height;
+       void **uga_handle = NULL;
+       efi_uga_draw_protocol_t *uga = NULL, *first_uga;
        unsigned long nr_ugas;
-       u32 *handles = (u32 *)uga_handle;
-       efi_status_t status = EFI_INVALID_PARAMETER;
        int i;
 
-       first_uga = NULL;
-       nr_ugas = size / sizeof(u32);
-       for (i = 0; i < nr_ugas; i++) {
-               efi_guid_t pciio_proto = EFI_PCI_IO_PROTOCOL_GUID;
-               u32 w, h, depth, refresh;
-               void *pciio;
-               u32 handle = handles[i];
-
-               status = efi_call_early(handle_protocol, handle,
-                                       &uga_proto, (void **)&uga);
-               if (status != EFI_SUCCESS)
-                       continue;
-
-               efi_call_early(handle_protocol, handle, &pciio_proto, &pciio);
-
-               status = efi_early->call((unsigned long)uga->get_mode, uga,
-                                        &w, &h, &depth, &refresh);
-               if (status == EFI_SUCCESS && (!first_uga || pciio)) {
-                       *width = w;
-                       *height = h;
-
-                       /*
-                        * Once we've found a UGA supporting PCIIO,
-                        * don't bother looking any further.
-                        */
-                       if (pciio)
-                               break;
-
-                       first_uga = uga;
-               }
-       }
+       status = efi_call_early(allocate_pool, EFI_LOADER_DATA,
+                               size, (void **)&uga_handle);
+       if (status != EFI_SUCCESS)
+               return status;
 
-       return status;
-}
+       status = efi_call_early(locate_handle,
+                               EFI_LOCATE_BY_PROTOCOL,
+                               uga_proto, NULL, &size, uga_handle);
+       if (status != EFI_SUCCESS)
+               goto free_handle;
 
-static efi_status_t
-setup_uga64(void **uga_handle, unsigned long size, u32 *width, u32 *height)
-{
-       struct efi_uga_draw_protocol *uga = NULL, *first_uga;
-       efi_guid_t uga_proto = EFI_UGA_PROTOCOL_GUID;
-       unsigned long nr_ugas;
-       u64 *handles = (u64 *)uga_handle;
-       efi_status_t status = EFI_INVALID_PARAMETER;
-       int i;
+       height = 0;
+       width = 0;
 
        first_uga = NULL;
-       nr_ugas = size / sizeof(u64);
+       nr_ugas = size / (efi_is_64bit() ? sizeof(u64) : sizeof(u32));
        for (i = 0; i < nr_ugas; i++) {
                efi_guid_t pciio_proto = EFI_PCI_IO_PROTOCOL_GUID;
                u32 w, h, depth, refresh;
                void *pciio;
-               u64 handle = handles[i];
+               unsigned long handle = efi_is_64bit() ? ((u64 *)uga_handle)[i]
+                                                     : ((u32 *)uga_handle)[i];
 
                status = efi_call_early(handle_protocol, handle,
-                                       &uga_proto, (void **)&uga);
+                                       uga_proto, (void **)&uga);
                if (status != EFI_SUCCESS)
                        continue;
 
+               pciio = NULL;
                efi_call_early(handle_protocol, handle, &pciio_proto, &pciio);
 
-               status = efi_early->call((unsigned long)uga->get_mode, uga,
-                                        &w, &h, &depth, &refresh);
+               status = efi_call_proto(efi_uga_draw_protocol, get_mode, uga,
+                                       &w, &h, &depth, &refresh);
                if (status == EFI_SUCCESS && (!first_uga || pciio)) {
-                       *width = w;
-                       *height = h;
+                       width = w;
+                       height = h;
 
                        /*
                         * Once we've found a UGA supporting PCIIO,
@@ -473,59 +316,28 @@ setup_uga64(void **uga_handle, unsigned long size, u32 *width, u32 *height)
                }
        }
 
-       return status;
-}
-
-/*
- * See if we have Universal Graphics Adapter (UGA) protocol
- */
-static efi_status_t setup_uga(struct screen_info *si, efi_guid_t *uga_proto,
-                             unsigned long size)
-{
-       efi_status_t status;
-       u32 width, height;
-       void **uga_handle = NULL;
-
-       status = efi_call_early(allocate_pool, EFI_LOADER_DATA,
-                               size, (void **)&uga_handle);
-       if (status != EFI_SUCCESS)
-               return status;
-
-       status = efi_call_early(locate_handle,
-                               EFI_LOCATE_BY_PROTOCOL,
-                               uga_proto, NULL, &size, uga_handle);
-       if (status != EFI_SUCCESS)
-               goto free_handle;
-
-       height = 0;
-       width = 0;
-
-       if (efi_early->is64)
-               status = setup_uga64(uga_handle, size, &width, &height);
-       else
-               status = setup_uga32(uga_handle, size, &width, &height);
-
        if (!width && !height)
                goto free_handle;
 
        /* EFI framebuffer */
-       si->orig_video_isVGA = VIDEO_TYPE_EFI;
+       si->orig_video_isVGA    = VIDEO_TYPE_EFI;
 
-       si->lfb_depth = 32;
-       si->lfb_width = width;
-       si->lfb_height = height;
+       si->lfb_depth           = 32;
+       si->lfb_width           = width;
+       si->lfb_height          = height;
 
-       si->red_size = 8;
-       si->red_pos = 16;
-       si->green_size = 8;
-       si->green_pos = 8;
-       si->blue_size = 8;
-       si->blue_pos = 0;
-       si->rsvd_size = 8;
-       si->rsvd_pos = 24;
+       si->red_size            = 8;
+       si->red_pos             = 16;
+       si->green_size          = 8;
+       si->green_pos           = 8;
+       si->blue_size           = 8;
+       si->blue_pos            = 0;
+       si->rsvd_size           = 8;
+       si->rsvd_pos            = 24;
 
 free_handle:
        efi_call_early(free_pool, uga_handle);
+
        return status;
 }
 
@@ -592,7 +404,7 @@ struct boot_params *make_boot_params(struct efi_config *c)
        if (sys_table->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE)
                return NULL;
 
-       if (efi_early->is64)
+       if (efi_is_64bit())
                setup_boot_services64(efi_early);
        else
                setup_boot_services32(efi_early);
@@ -607,7 +419,7 @@ struct boot_params *make_boot_params(struct efi_config *c)
        status = efi_low_alloc(sys_table, 0x4000, 1,
                               (unsigned long *)&boot_params);
        if (status != EFI_SUCCESS) {
-               efi_printk(sys_table, "Failed to alloc lowmem for boot params\n");
+               efi_printk(sys_table, "Failed to allocate lowmem for boot params\n");
                return NULL;
        }
 
@@ -623,9 +435,9 @@ struct boot_params *make_boot_params(struct efi_config *c)
         * Fill out some of the header fields ourselves because the
         * EFI firmware loader doesn't load the first sector.
         */
-       hdr->root_flags = 1;
-       hdr->vid_mode = 0xffff;
-       hdr->boot_flag = 0xAA55;
+       hdr->root_flags = 1;
+       hdr->vid_mode   = 0xffff;
+       hdr->boot_flag  = 0xAA55;
 
        hdr->type_of_loader = 0x21;
 
@@ -633,6 +445,7 @@ struct boot_params *make_boot_params(struct efi_config *c)
        cmdline_ptr = efi_convert_cmdline(sys_table, image, &options_size);
        if (!cmdline_ptr)
                goto fail;
+
        hdr->cmd_line_ptr = (unsigned long)cmdline_ptr;
        /* Fill in upper bits of command line address, NOP on 32 bit  */
        boot_params->ext_cmd_line_ptr = (u64)(unsigned long)cmdline_ptr >> 32;
@@ -669,10 +482,12 @@ struct boot_params *make_boot_params(struct efi_config *c)
        boot_params->ext_ramdisk_size  = (u64)ramdisk_size >> 32;
 
        return boot_params;
+
 fail2:
        efi_free(sys_table, options_size, hdr->cmd_line_ptr);
 fail:
        efi_free(sys_table, 0x4000, (unsigned long)boot_params);
+
        return NULL;
 }
 
@@ -684,7 +499,7 @@ static void add_e820ext(struct boot_params *params,
        unsigned long size;
 
        e820ext->type = SETUP_E820_EXT;
-       e820ext->len = nr_entries * sizeof(struct boot_e820_entry);
+       e820ext->len  = nr_entries * sizeof(struct boot_e820_entry);
        e820ext->next = 0;
 
        data = (struct setup_data *)(unsigned long)params->hdr.setup_data;
@@ -698,8 +513,8 @@ static void add_e820ext(struct boot_params *params,
                params->hdr.setup_data = (unsigned long)e820ext;
 }
 
-static efi_status_t setup_e820(struct boot_params *params,
-                              struct setup_data *e820ext, u32 e820ext_size)
+static efi_status_t
+setup_e820(struct boot_params *params, struct setup_data *e820ext, u32 e820ext_size)
 {
        struct boot_e820_entry *entry = params->e820_table;
        struct efi_info *efi = &params->efi_info;
@@ -820,11 +635,10 @@ static efi_status_t alloc_e820ext(u32 nr_desc, struct setup_data **e820ext,
 }
 
 struct exit_boot_struct {
-       struct boot_params *boot_params;
-       struct efi_info *efi;
-       struct setup_data *e820ext;
-       __u32 e820ext_size;
-       bool is64;
+       struct boot_params      *boot_params;
+       struct efi_info         *efi;
+       struct setup_data       *e820ext;
+       __u32                   e820ext_size;
 };
 
 static efi_status_t exit_boot_func(efi_system_table_t *sys_table_arg,
@@ -851,25 +665,25 @@ static efi_status_t exit_boot_func(efi_system_table_t *sys_table_arg,
                first = false;
        }
 
-       signature = p->is64 ? EFI64_LOADER_SIGNATURE : EFI32_LOADER_SIGNATURE;
+       signature = efi_is_64bit() ? EFI64_LOADER_SIGNATURE
+                                  : EFI32_LOADER_SIGNATURE;
        memcpy(&p->efi->efi_loader_signature, signature, sizeof(__u32));
 
-       p->efi->efi_systab = (unsigned long)sys_table_arg;
-       p->efi->efi_memdesc_size = *map->desc_size;
-       p->efi->efi_memdesc_version = *map->desc_ver;
-       p->efi->efi_memmap = (unsigned long)*map->map;
-       p->efi->efi_memmap_size = *map->map_size;
+       p->efi->efi_systab              = (unsigned long)sys_table_arg;
+       p->efi->efi_memdesc_size        = *map->desc_size;
+       p->efi->efi_memdesc_version     = *map->desc_ver;
+       p->efi->efi_memmap              = (unsigned long)*map->map;
+       p->efi->efi_memmap_size         = *map->map_size;
 
 #ifdef CONFIG_X86_64
-       p->efi->efi_systab_hi = (unsigned long)sys_table_arg >> 32;
-       p->efi->efi_memmap_hi = (unsigned long)*map->map >> 32;
+       p->efi->efi_systab_hi           = (unsigned long)sys_table_arg >> 32;
+       p->efi->efi_memmap_hi           = (unsigned long)*map->map >> 32;
 #endif
 
        return EFI_SUCCESS;
 }
 
-static efi_status_t exit_boot(struct boot_params *boot_params,
-                             void *handle, bool is64)
+static efi_status_t exit_boot(struct boot_params *boot_params, void *handle)
 {
        unsigned long map_sz, key, desc_size, buff_size;
        efi_memory_desc_t *mem_map;
@@ -880,17 +694,16 @@ static efi_status_t exit_boot(struct boot_params *boot_params,
        struct efi_boot_memmap map;
        struct exit_boot_struct priv;
 
-       map.map =               &mem_map;
-       map.map_size =          &map_sz;
-       map.desc_size =         &desc_size;
-       map.desc_ver =          &desc_version;
-       map.key_ptr =           &key;
-       map.buff_size =         &buff_size;
-       priv.boot_params =      boot_params;
-       priv.efi =              &boot_params->efi_info;
-       priv.e820ext =          NULL;
-       priv.e820ext_size =     0;
-       priv.is64 =             is64;
+       map.map                 = &mem_map;
+       map.map_size            = &map_sz;
+       map.desc_size           = &desc_size;
+       map.desc_ver            = &desc_version;
+       map.key_ptr             = &key;
+       map.buff_size           = &buff_size;
+       priv.boot_params        = boot_params;
+       priv.efi                = &boot_params->efi_info;
+       priv.e820ext            = NULL;
+       priv.e820ext_size       = 0;
 
        /* Might as well exit boot services now */
        status = efi_exit_boot_services(sys_table, handle, &map, &priv,
@@ -898,10 +711,11 @@ static efi_status_t exit_boot(struct boot_params *boot_params,
        if (status != EFI_SUCCESS)
                return status;
 
-       e820ext = priv.e820ext;
-       e820ext_size = priv.e820ext_size;
+       e820ext                 = priv.e820ext;
+       e820ext_size            = priv.e820ext_size;
+
        /* Historic? */
-       boot_params->alt_mem_k = 32 * 1024;
+       boot_params->alt_mem_k  = 32 * 1024;
 
        status = setup_e820(boot_params, e820ext, e820ext_size);
        if (status != EFI_SUCCESS)
@@ -914,8 +728,8 @@ static efi_status_t exit_boot(struct boot_params *boot_params,
  * On success we return a pointer to a boot_params structure, and NULL
  * on failure.
  */
-struct boot_params *efi_main(struct efi_config *c,
-                            struct boot_params *boot_params)
+struct boot_params *
+efi_main(struct efi_config *c, struct boot_params *boot_params)
 {
        struct desc_ptr *gdt = NULL;
        efi_loaded_image_t *image;
@@ -924,13 +738,11 @@ struct boot_params *efi_main(struct efi_config *c,
        struct desc_struct *desc;
        void *handle;
        efi_system_table_t *_table;
-       bool is64;
 
        efi_early = c;
 
        _table = (efi_system_table_t *)(unsigned long)efi_early->table;
        handle = (void *)(unsigned long)efi_early->image_handle;
-       is64 = efi_early->is64;
 
        sys_table = _table;
 
@@ -938,7 +750,7 @@ struct boot_params *efi_main(struct efi_config *c,
        if (sys_table->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE)
                goto fail;
 
-       if (is64)
+       if (efi_is_64bit())
                setup_boot_services64(efi_early);
        else
                setup_boot_services32(efi_early);
@@ -963,7 +775,7 @@ struct boot_params *efi_main(struct efi_config *c,
        status = efi_call_early(allocate_pool, EFI_LOADER_DATA,
                                sizeof(*gdt), (void **)&gdt);
        if (status != EFI_SUCCESS) {
-               efi_printk(sys_table, "Failed to alloc mem for gdt structure\n");
+               efi_printk(sys_table, "Failed to allocate memory for 'gdt' structure\n");
                goto fail;
        }
 
@@ -971,7 +783,7 @@ struct boot_params *efi_main(struct efi_config *c,
        status = efi_low_alloc(sys_table, gdt->size, 8,
                           (unsigned long *)&gdt->address);
        if (status != EFI_SUCCESS) {
-               efi_printk(sys_table, "Failed to alloc mem for gdt\n");
+               efi_printk(sys_table, "Failed to allocate memory for 'gdt'\n");
                goto fail;
        }
 
@@ -994,7 +806,7 @@ struct boot_params *efi_main(struct efi_config *c,
                hdr->code32_start = bzimage_addr;
        }
 
-       status = exit_boot(boot_params, handle, is64);
+       status = exit_boot(boot_params, handle);
        if (status != EFI_SUCCESS) {
                efi_printk(sys_table, "exit_boot() failed!\n");
                goto fail;
@@ -1008,19 +820,20 @@ struct boot_params *efi_main(struct efi_config *c,
 
        if (IS_ENABLED(CONFIG_X86_64)) {
                /* __KERNEL32_CS */
-               desc->limit0 = 0xffff;
-               desc->base0 = 0x0000;
-               desc->base1 = 0x0000;
-               desc->type = SEG_TYPE_CODE | SEG_TYPE_EXEC_READ;
-               desc->s = DESC_TYPE_CODE_DATA;
-               desc->dpl = 0;
-               desc->p = 1;
-               desc->limit1 = 0xf;
-               desc->avl = 0;
-               desc->l = 0;
-               desc->d = SEG_OP_SIZE_32BIT;
-               desc->g = SEG_GRANULARITY_4KB;
-               desc->base2 = 0x00;
+               desc->limit0    = 0xffff;
+               desc->base0     = 0x0000;
+               desc->base1     = 0x0000;
+               desc->type      = SEG_TYPE_CODE | SEG_TYPE_EXEC_READ;
+               desc->s         = DESC_TYPE_CODE_DATA;
+               desc->dpl       = 0;
+               desc->p         = 1;
+               desc->limit1    = 0xf;
+               desc->avl       = 0;
+               desc->l         = 0;
+               desc->d         = SEG_OP_SIZE_32BIT;
+               desc->g         = SEG_GRANULARITY_4KB;
+               desc->base2     = 0x00;
+
                desc++;
        } else {
                /* Second entry is unused on 32-bit */
@@ -1028,15 +841,16 @@ struct boot_params *efi_main(struct efi_config *c,
        }
 
        /* __KERNEL_CS */
-       desc->limit0 = 0xffff;
-       desc->base0 = 0x0000;
-       desc->base1 = 0x0000;
-       desc->type = SEG_TYPE_CODE | SEG_TYPE_EXEC_READ;
-       desc->s = DESC_TYPE_CODE_DATA;
-       desc->dpl = 0;
-       desc->p = 1;
-       desc->limit1 = 0xf;
-       desc->avl = 0;
+       desc->limit0    = 0xffff;
+       desc->base0     = 0x0000;
+       desc->base1     = 0x0000;
+       desc->type      = SEG_TYPE_CODE | SEG_TYPE_EXEC_READ;
+       desc->s         = DESC_TYPE_CODE_DATA;
+       desc->dpl       = 0;
+       desc->p         = 1;
+       desc->limit1    = 0xf;
+       desc->avl       = 0;
+
        if (IS_ENABLED(CONFIG_X86_64)) {
                desc->l = 1;
                desc->d = 0;
@@ -1044,41 +858,41 @@ struct boot_params *efi_main(struct efi_config *c,
                desc->l = 0;
                desc->d = SEG_OP_SIZE_32BIT;
        }
-       desc->g = SEG_GRANULARITY_4KB;
-       desc->base2 = 0x00;
+       desc->g         = SEG_GRANULARITY_4KB;
+       desc->base2     = 0x00;
        desc++;
 
        /* __KERNEL_DS */
-       desc->limit0 = 0xffff;
-       desc->base0 = 0x0000;
-       desc->base1 = 0x0000;
-       desc->type = SEG_TYPE_DATA | SEG_TYPE_READ_WRITE;
-       desc->s = DESC_TYPE_CODE_DATA;
-       desc->dpl = 0;
-       desc->p = 1;
-       desc->limit1 = 0xf;
-       desc->avl = 0;
-       desc->l = 0;
-       desc->d = SEG_OP_SIZE_32BIT;
-       desc->g = SEG_GRANULARITY_4KB;
-       desc->base2 = 0x00;
+       desc->limit0    = 0xffff;
+       desc->base0     = 0x0000;
+       desc->base1     = 0x0000;
+       desc->type      = SEG_TYPE_DATA | SEG_TYPE_READ_WRITE;
+       desc->s         = DESC_TYPE_CODE_DATA;
+       desc->dpl       = 0;
+       desc->p         = 1;
+       desc->limit1    = 0xf;
+       desc->avl       = 0;
+       desc->l         = 0;
+       desc->d         = SEG_OP_SIZE_32BIT;
+       desc->g         = SEG_GRANULARITY_4KB;
+       desc->base2     = 0x00;
        desc++;
 
        if (IS_ENABLED(CONFIG_X86_64)) {
                /* Task segment value */
-               desc->limit0 = 0x0000;
-               desc->base0 = 0x0000;
-               desc->base1 = 0x0000;
-               desc->type = SEG_TYPE_TSS;
-               desc->s = 0;
-               desc->dpl = 0;
-               desc->p = 1;
-               desc->limit1 = 0x0;
-               desc->avl = 0;
-               desc->l = 0;
-               desc->d = 0;
-               desc->g = SEG_GRANULARITY_4KB;
-               desc->base2 = 0x00;
+               desc->limit0    = 0x0000;
+               desc->base0     = 0x0000;
+               desc->base1     = 0x0000;
+               desc->type      = SEG_TYPE_TSS;
+               desc->s         = 0;
+               desc->dpl       = 0;
+               desc->p         = 1;
+               desc->limit1    = 0x0;
+               desc->avl       = 0;
+               desc->l         = 0;
+               desc->d         = 0;
+               desc->g         = SEG_GRANULARITY_4KB;
+               desc->base2     = 0x00;
                desc++;
        }
 
@@ -1088,5 +902,6 @@ struct boot_params *efi_main(struct efi_config *c,
        return boot_params;
 fail:
        efi_printk(sys_table, "efi_main() failed!\n");
+
        return NULL;
 }
index e799dc5c644872e65625b9442d194470a49d663c..8297387c4676124167207dc1383467a0b221c6ff 100644 (file)
 
 #define DESC_TYPE_CODE_DATA    (1 << 0)
 
-struct efi_uga_draw_protocol_32 {
+typedef struct {
        u32 get_mode;
        u32 set_mode;
        u32 blt;
-};
+} efi_uga_draw_protocol_32_t;
 
-struct efi_uga_draw_protocol_64 {
+typedef struct {
        u64 get_mode;
        u64 set_mode;
        u64 blt;
-};
+} efi_uga_draw_protocol_64_t;
 
-struct efi_uga_draw_protocol {
+typedef struct {
        void *get_mode;
        void *set_mode;
        void *blt;
-};
+} efi_uga_draw_protocol_t;
 
 #endif /* BOOT_COMPRESSED_EBOOT_H */
index b87a7582853dd34a91b8d007a9a146574a4ca674..302517929932bb52405d5396c57f084691090e5e 100644 (file)
@@ -102,7 +102,7 @@ static bool memmap_too_large;
 
 
 /* Store memory limit specified by "mem=nn[KMG]" or "memmap=nn[KMG]" */
-unsigned long long mem_limit = ULLONG_MAX;
+static unsigned long long mem_limit = ULLONG_MAX;
 
 
 enum mem_avoid_index {
@@ -215,7 +215,36 @@ static void mem_avoid_memmap(char *str)
                memmap_too_large = true;
 }
 
-static int handle_mem_memmap(void)
+/* Store the number of 1GB huge pages which users specified: */
+static unsigned long max_gb_huge_pages;
+
+static void parse_gb_huge_pages(char *param, char *val)
+{
+       static bool gbpage_sz;
+       char *p;
+
+       if (!strcmp(param, "hugepagesz")) {
+               p = val;
+               if (memparse(p, &p) != PUD_SIZE) {
+                       gbpage_sz = false;
+                       return;
+               }
+
+               if (gbpage_sz)
+                       warn("Repeatedly set hugeTLB page size of 1G!\n");
+               gbpage_sz = true;
+               return;
+       }
+
+       if (!strcmp(param, "hugepages") && gbpage_sz) {
+               p = val;
+               max_gb_huge_pages = simple_strtoull(p, &p, 0);
+               return;
+       }
+}
+
+
+static int handle_mem_options(void)
 {
        char *args = (char *)get_cmd_line_ptr();
        size_t len = strlen((char *)args);
@@ -223,7 +252,8 @@ static int handle_mem_memmap(void)
        char *param, *val;
        u64 mem_size;
 
-       if (!strstr(args, "memmap=") && !strstr(args, "mem="))
+       if (!strstr(args, "memmap=") && !strstr(args, "mem=") &&
+               !strstr(args, "hugepages"))
                return 0;
 
        tmp_cmdline = malloc(len + 1);
@@ -248,6 +278,8 @@ static int handle_mem_memmap(void)
 
                if (!strcmp(param, "memmap")) {
                        mem_avoid_memmap(val);
+               } else if (strstr(param, "hugepages")) {
+                       parse_gb_huge_pages(param, val);
                } else if (!strcmp(param, "mem")) {
                        char *p = val;
 
@@ -387,7 +419,7 @@ static void mem_avoid_init(unsigned long input, unsigned long input_size,
        /* We don't need to set a mapping for setup_data. */
 
        /* Mark the memmap regions we need to avoid */
-       handle_mem_memmap();
+       handle_mem_options();
 
 #ifdef CONFIG_X86_VERBOSE_BOOTUP
        /* Make sure video RAM can be used. */
@@ -466,6 +498,60 @@ static void store_slot_info(struct mem_vector *region, unsigned long image_size)
        }
 }
 
+/*
+ * Skip as many 1GB huge pages as possible in the passed region
+ * according to the number which users specified:
+ */
+static void
+process_gb_huge_pages(struct mem_vector *region, unsigned long image_size)
+{
+       unsigned long addr, size = 0;
+       struct mem_vector tmp;
+       int i = 0;
+
+       if (!max_gb_huge_pages) {
+               store_slot_info(region, image_size);
+               return;
+       }
+
+       addr = ALIGN(region->start, PUD_SIZE);
+       /* Did we raise the address above the passed in memory entry? */
+       if (addr < region->start + region->size)
+               size = region->size - (addr - region->start);
+
+       /* Check how many 1GB huge pages can be filtered out: */
+       while (size > PUD_SIZE && max_gb_huge_pages) {
+               size -= PUD_SIZE;
+               max_gb_huge_pages--;
+               i++;
+       }
+
+       /* No good 1GB huge pages found: */
+       if (!i) {
+               store_slot_info(region, image_size);
+               return;
+       }
+
+       /*
+        * Skip those 'i'*1GB good huge pages, and continue checking and
+        * processing the remaining head or tail part of the passed region
+        * if available.
+        */
+
+       if (addr >= region->start + image_size) {
+               tmp.start = region->start;
+               tmp.size = addr - region->start;
+               store_slot_info(&tmp, image_size);
+       }
+
+       size  = region->size - (addr - region->start) - i * PUD_SIZE;
+       if (size >= image_size) {
+               tmp.start = addr + i * PUD_SIZE;
+               tmp.size = size;
+               store_slot_info(&tmp, image_size);
+       }
+}
+
 static unsigned long slots_fetch_random(void)
 {
        unsigned long slot;
@@ -546,7 +632,7 @@ static void process_mem_region(struct mem_vector *entry,
 
                /* If nothing overlaps, store the region and return. */
                if (!mem_avoid_overlap(&region, &overlap)) {
-                       store_slot_info(&region, image_size);
+                       process_gb_huge_pages(&region, image_size);
                        return;
                }
 
@@ -556,7 +642,7 @@ static void process_mem_region(struct mem_vector *entry,
 
                        beginning.start = region.start;
                        beginning.size = overlap.start - region.start;
-                       store_slot_info(&beginning, image_size);
+                       process_gb_huge_pages(&beginning, image_size);
                }
 
                /* Return if overlap extends to or past end of region. */
index 8c51075452519ccf5fdeae7d6b933d687ce3bca4..9e215737149103dcb627ebc9138a9b7126b24f99 100644 (file)
@@ -1,3 +1,4 @@
+#include <asm/e820/types.h>
 #include <asm/processor.h>
 #include "pgtable.h"
 #include "../string.h"
@@ -34,10 +35,62 @@ unsigned long *trampoline_32bit __section(.data);
 extern struct boot_params *boot_params;
 int cmdline_find_option_bool(const char *option);
 
+static unsigned long find_trampoline_placement(void)
+{
+       unsigned long bios_start, ebda_start;
+       unsigned long trampoline_start;
+       struct boot_e820_entry *entry;
+       int i;
+
+       /*
+        * Find a suitable spot for the trampoline.
+        * This code is based on reserve_bios_regions().
+        */
+
+       ebda_start = *(unsigned short *)0x40e << 4;
+       bios_start = *(unsigned short *)0x413 << 10;
+
+       if (bios_start < BIOS_START_MIN || bios_start > BIOS_START_MAX)
+               bios_start = BIOS_START_MAX;
+
+       if (ebda_start > BIOS_START_MIN && ebda_start < bios_start)
+               bios_start = ebda_start;
+
+       bios_start = round_down(bios_start, PAGE_SIZE);
+
+       /* Find the first usable memory region under bios_start. */
+       for (i = boot_params->e820_entries - 1; i >= 0; i--) {
+               entry = &boot_params->e820_table[i];
+
+               /* Skip all entries above bios_start. */
+               if (bios_start <= entry->addr)
+                       continue;
+
+               /* Skip non-RAM entries. */
+               if (entry->type != E820_TYPE_RAM)
+                       continue;
+
+               /* Adjust bios_start to the end of the entry if needed. */
+               if (bios_start > entry->addr + entry->size)
+                       bios_start = entry->addr + entry->size;
+
+               /* Keep bios_start page-aligned. */
+               bios_start = round_down(bios_start, PAGE_SIZE);
+
+               /* Skip the entry if it's too small. */
+               if (bios_start - TRAMPOLINE_32BIT_SIZE < entry->addr)
+                       continue;
+
+               break;
+       }
+
+       /* Place the trampoline just below the end of low memory */
+       return bios_start - TRAMPOLINE_32BIT_SIZE;
+}
+
 struct paging_config paging_prepare(void *rmode)
 {
        struct paging_config paging_config = {};
-       unsigned long bios_start, ebda_start;
 
        /* Initialize boot_params. Required for cmdline_find_option_bool(). */
        boot_params = rmode;
@@ -61,23 +114,7 @@ struct paging_config paging_prepare(void *rmode)
                paging_config.l5_required = 1;
        }
 
-       /*
-        * Find a suitable spot for the trampoline.
-        * This code is based on reserve_bios_regions().
-        */
-
-       ebda_start = *(unsigned short *)0x40e << 4;
-       bios_start = *(unsigned short *)0x413 << 10;
-
-       if (bios_start < BIOS_START_MIN || bios_start > BIOS_START_MAX)
-               bios_start = BIOS_START_MAX;
-
-       if (ebda_start > BIOS_START_MIN && ebda_start < bios_start)
-               bios_start = ebda_start;
-
-       /* Place the trampoline just below the end of low memory, aligned to 4k */
-       paging_config.trampoline_start = bios_start - TRAMPOLINE_32BIT_SIZE;
-       paging_config.trampoline_start = round_down(paging_config.trampoline_start, PAGE_SIZE);
+       paging_config.trampoline_start = find_trampoline_placement();
 
        trampoline_32bit = (unsigned long *)paging_config.trampoline_start;
 
index 16f49123d747b7a1c224b75c60d8a3af1e8e701c..c4428a176973311950429e0ba00c4ce13e9f6fe6 100644 (file)
@@ -13,6 +13,7 @@
  */
 
 #include <linux/types.h>
+#include <asm/asm.h>
 #include "ctype.h"
 #include "string.h"
 
@@ -28,8 +29,8 @@
 int memcmp(const void *s1, const void *s2, size_t len)
 {
        bool diff;
-       asm("repe; cmpsb; setnz %0"
-           : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
+       asm("repe; cmpsb" CC_SET(nz)
+           : CC_OUT(nz) (diff), "+D" (s1), "+S" (s2), "+c" (len));
        return diff;
 }
 
index 9254e0b6cc060011d63b2bfa9ec281768776b2bc..5f7e43d4f64a0c08dd32e3ba767e1726b357452d 100644 (file)
@@ -75,7 +75,7 @@
  *   %r9
  */
 __load_partial:
-       xor %r9, %r9
+       xor %r9d, %r9d
        pxor MSG, MSG
 
        mov LEN, %r8
@@ -535,6 +535,7 @@ ENTRY(crypto_aegis128_aesni_enc_tail)
        movdqu STATE3, 0x40(STATEP)
 
        FRAME_END
+       ret
 ENDPROC(crypto_aegis128_aesni_enc_tail)
 
 .macro decrypt_block a s0 s1 s2 s3 s4 i
index 5de7c0d46edfc56459280519e8987e28349baee9..acd11b3bf639e0a50013014e5eaeca6b11083c28 100644 (file)
@@ -375,16 +375,12 @@ static struct aead_alg crypto_aegis128_aesni_alg[] = {
        }
 };
 
-static const struct x86_cpu_id aesni_cpu_id[] = {
-       X86_FEATURE_MATCH(X86_FEATURE_AES),
-       X86_FEATURE_MATCH(X86_FEATURE_XMM2),
-       {}
-};
-MODULE_DEVICE_TABLE(x86cpu, aesni_cpu_id);
-
 static int __init crypto_aegis128_aesni_module_init(void)
 {
-       if (!x86_match_cpu(aesni_cpu_id))
+       if (!boot_cpu_has(X86_FEATURE_XMM2) ||
+           !boot_cpu_has(X86_FEATURE_AES) ||
+           !boot_cpu_has(X86_FEATURE_OSXSAVE) ||
+           !cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL))
                return -ENODEV;
 
        return crypto_register_aeads(crypto_aegis128_aesni_alg,
index 9263c344f2c797d847b7b7ec534803010a5c9e72..491dd61c845ce4402ae38c988b585a56057f66f4 100644 (file)
@@ -66,7 +66,7 @@
  *   %r9
  */
 __load_partial:
-       xor %r9, %r9
+       xor %r9d, %r9d
        pxor MSG0, MSG0
        pxor MSG1, MSG1
 
@@ -645,6 +645,7 @@ ENTRY(crypto_aegis128l_aesni_enc_tail)
        state_store0
 
        FRAME_END
+       ret
 ENDPROC(crypto_aegis128l_aesni_enc_tail)
 
 /*
index 876e4866e63386ec0dd0231c6be35b9f94bc709c..2071c3d1ae07575143cc4d6262e92eaeef9ba560 100644 (file)
@@ -375,16 +375,12 @@ static struct aead_alg crypto_aegis128l_aesni_alg[] = {
        }
 };
 
-static const struct x86_cpu_id aesni_cpu_id[] = {
-       X86_FEATURE_MATCH(X86_FEATURE_AES),
-       X86_FEATURE_MATCH(X86_FEATURE_XMM2),
-       {}
-};
-MODULE_DEVICE_TABLE(x86cpu, aesni_cpu_id);
-
 static int __init crypto_aegis128l_aesni_module_init(void)
 {
-       if (!x86_match_cpu(aesni_cpu_id))
+       if (!boot_cpu_has(X86_FEATURE_XMM2) ||
+           !boot_cpu_has(X86_FEATURE_AES) ||
+           !boot_cpu_has(X86_FEATURE_OSXSAVE) ||
+           !cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL))
                return -ENODEV;
 
        return crypto_register_aeads(crypto_aegis128l_aesni_alg,
index 1d977d515bf992c649d8890316dfca41fc511364..8870c7c5d9a4df035b84ac3d66074d802b3498ee 100644 (file)
@@ -59,7 +59,7 @@
  *   %r9
  */
 __load_partial:
-       xor %r9, %r9
+       xor %r9d, %r9d
        pxor MSG, MSG
 
        mov LEN, %r8
@@ -543,6 +543,7 @@ ENTRY(crypto_aegis256_aesni_enc_tail)
        state_store0
 
        FRAME_END
+       ret
 ENDPROC(crypto_aegis256_aesni_enc_tail)
 
 /*
index 2b5dd3af8f4dc4c20caad2a96825576dc17fe2be..b5f2a8fd5a713ca986e2d3ef24aa1b69d421ced2 100644 (file)
@@ -375,16 +375,12 @@ static struct aead_alg crypto_aegis256_aesni_alg[] = {
        }
 };
 
-static const struct x86_cpu_id aesni_cpu_id[] = {
-       X86_FEATURE_MATCH(X86_FEATURE_AES),
-       X86_FEATURE_MATCH(X86_FEATURE_XMM2),
-       {}
-};
-MODULE_DEVICE_TABLE(x86cpu, aesni_cpu_id);
-
 static int __init crypto_aegis256_aesni_module_init(void)
 {
-       if (!x86_match_cpu(aesni_cpu_id))
+       if (!boot_cpu_has(X86_FEATURE_XMM2) ||
+           !boot_cpu_has(X86_FEATURE_AES) ||
+           !boot_cpu_has(X86_FEATURE_OSXSAVE) ||
+           !cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL))
                return -ENODEV;
 
        return crypto_register_aeads(crypto_aegis256_aesni_alg,
index e762ef417562ff96ba769a555a36f546ff5d7d92..9bd139569b410d9e41cff15fb36aeb366930be95 100644 (file)
@@ -258,7 +258,7 @@ ALL_F:      .octa 0xffffffffffffffffffffffffffffffff
 .macro GCM_INIT Iv SUBKEY AAD AADLEN
        mov \AADLEN, %r11
        mov %r11, AadLen(%arg2) # ctx_data.aad_length = aad_length
-       xor %r11, %r11
+       xor %r11d, %r11d
        mov %r11, InLen(%arg2) # ctx_data.in_length = 0
        mov %r11, PBlockLen(%arg2) # ctx_data.partial_block_length = 0
        mov %r11, PBlockEncKey(%arg2) # ctx_data.partial_block_enc_key = 0
@@ -286,7 +286,7 @@ ALL_F:      .octa 0xffffffffffffffffffffffffffffffff
        movdqu HashKey(%arg2), %xmm13
        add %arg5, InLen(%arg2)
 
-       xor %r11, %r11 # initialise the data pointer offset as zero
+       xor %r11d, %r11d # initialise the data pointer offset as zero
        PARTIAL_BLOCK %arg3 %arg4 %arg5 %r11 %xmm8 \operation
 
        sub %r11, %arg5         # sub partial block data used
@@ -702,7 +702,7 @@ _no_extra_mask_1_\@:
 
        # GHASH computation for the last <16 Byte block
        GHASH_MUL \AAD_HASH, %xmm13, %xmm0, %xmm10, %xmm11, %xmm5, %xmm6
-       xor     %rax,%rax
+       xor     %eax, %eax
 
        mov     %rax, PBlockLen(%arg2)
        jmp     _dec_done_\@
@@ -737,7 +737,7 @@ _no_extra_mask_2_\@:
 
        # GHASH computation for the last <16 Byte block
        GHASH_MUL \AAD_HASH, %xmm13, %xmm0, %xmm10, %xmm11, %xmm5, %xmm6
-       xor     %rax,%rax
+       xor     %eax, %eax
 
        mov     %rax, PBlockLen(%arg2)
        jmp     _encode_done_\@
index faecb1518bf8164831d4d44099c11e36fc45f411..1985ea0b551bf9bd825b05ea64be0319e4b02366 100644 (file)
@@ -463,7 +463,7 @@ _get_AAD_rest_final\@:
 
 _get_AAD_done\@:
        # initialize the data pointer offset as zero
-       xor     %r11, %r11
+       xor     %r11d, %r11d
 
        # start AES for num_initial_blocks blocks
        mov     arg5, %rax                     # rax = *Y0
@@ -1770,7 +1770,7 @@ _get_AAD_rest_final\@:
 
 _get_AAD_done\@:
        # initialize the data pointer offset as zero
-       xor     %r11, %r11
+       xor     %r11d, %r11d
 
        # start AES for num_initial_blocks blocks
        mov     arg5, %rax                     # rax = *Y0
index 37d422e77931129d06e88c30edd040bc78c3de04..de182c460f82a2fc585bcc51f4b2b1669c3aead4 100644 (file)
@@ -113,7 +113,7 @@ ENDPROC(__morus1280_update_zero)
  *   %r9
  */
 __load_partial:
-       xor %r9, %r9
+       xor %r9d, %r9d
        vpxor MSG, MSG, MSG
 
        mov %rcx, %r8
@@ -453,6 +453,7 @@ ENTRY(crypto_morus1280_avx2_enc_tail)
        vmovdqu STATE4, (4 * 32)(%rdi)
 
        FRAME_END
+       ret
 ENDPROC(crypto_morus1280_avx2_enc_tail)
 
 /*
index f111f36d26dce558ddb1e4ad5427a048011aded2..6634907d6ccdf17eb3c425337ca96bf746927ff7 100644 (file)
@@ -37,15 +37,11 @@ asmlinkage void crypto_morus1280_avx2_final(void *state, void *tag_xor,
 
 MORUS1280_DECLARE_ALGS(avx2, "morus1280-avx2", 400);
 
-static const struct x86_cpu_id avx2_cpu_id[] = {
-    X86_FEATURE_MATCH(X86_FEATURE_AVX2),
-    {}
-};
-MODULE_DEVICE_TABLE(x86cpu, avx2_cpu_id);
-
 static int __init crypto_morus1280_avx2_module_init(void)
 {
-       if (!x86_match_cpu(avx2_cpu_id))
+       if (!boot_cpu_has(X86_FEATURE_AVX2) ||
+           !boot_cpu_has(X86_FEATURE_OSXSAVE) ||
+           !cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL))
                return -ENODEV;
 
        return crypto_register_aeads(crypto_morus1280_avx2_algs,
index 1fe637c7be9db5515bbaff68f935dea11d151062..da5d2905db6024c7aa2ceb1a2c5e9714522a80fa 100644 (file)
@@ -235,7 +235,7 @@ ENDPROC(__morus1280_update_zero)
  *   %r9
  */
 __load_partial:
-       xor %r9, %r9
+       xor %r9d, %r9d
        pxor MSG_LO, MSG_LO
        pxor MSG_HI, MSG_HI
 
@@ -652,6 +652,7 @@ ENTRY(crypto_morus1280_sse2_enc_tail)
        movdqu STATE4_HI, (9 * 16)(%rdi)
 
        FRAME_END
+       ret
 ENDPROC(crypto_morus1280_sse2_enc_tail)
 
 /*
index 839270aa713cab55dfebc55f9d02c53aa13c9f73..95cf857d2cbb1943ba8ce356c48416837f1655d9 100644 (file)
@@ -37,15 +37,11 @@ asmlinkage void crypto_morus1280_sse2_final(void *state, void *tag_xor,
 
 MORUS1280_DECLARE_ALGS(sse2, "morus1280-sse2", 350);
 
-static const struct x86_cpu_id sse2_cpu_id[] = {
-    X86_FEATURE_MATCH(X86_FEATURE_XMM2),
-    {}
-};
-MODULE_DEVICE_TABLE(x86cpu, sse2_cpu_id);
-
 static int __init crypto_morus1280_sse2_module_init(void)
 {
-       if (!x86_match_cpu(sse2_cpu_id))
+       if (!boot_cpu_has(X86_FEATURE_XMM2) ||
+           !boot_cpu_has(X86_FEATURE_OSXSAVE) ||
+           !cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL))
                return -ENODEV;
 
        return crypto_register_aeads(crypto_morus1280_sse2_algs,
index 71c72a0a0862c25da3293b499f0b2994e14f9926..414db480250e1bbc1a45e619b6acb40c9f8a1f4b 100644 (file)
@@ -113,7 +113,7 @@ ENDPROC(__morus640_update_zero)
  *   %r9
  */
 __load_partial:
-       xor %r9, %r9
+       xor %r9d, %r9d
        pxor MSG, MSG
 
        mov %rcx, %r8
@@ -437,6 +437,7 @@ ENTRY(crypto_morus640_sse2_enc_tail)
        movdqu STATE4, (4 * 16)(%rdi)
 
        FRAME_END
+       ret
 ENDPROC(crypto_morus640_sse2_enc_tail)
 
 /*
index 26b47e2db8d2149c64b407ee10ccd4f013fcb542..615fb7bc9a323d949d038a8496125eab0c4bc4ba 100644 (file)
@@ -37,15 +37,11 @@ asmlinkage void crypto_morus640_sse2_final(void *state, void *tag_xor,
 
 MORUS640_DECLARE_ALGS(sse2, "morus640-sse2", 400);
 
-static const struct x86_cpu_id sse2_cpu_id[] = {
-    X86_FEATURE_MATCH(X86_FEATURE_XMM2),
-    {}
-};
-MODULE_DEVICE_TABLE(x86cpu, sse2_cpu_id);
-
 static int __init crypto_morus640_sse2_module_init(void)
 {
-       if (!x86_match_cpu(sse2_cpu_id))
+       if (!boot_cpu_has(X86_FEATURE_XMM2) ||
+           !boot_cpu_has(X86_FEATURE_OSXSAVE) ||
+           !cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL))
                return -ENODEV;
 
        return crypto_register_aeads(crypto_morus640_sse2_algs,
index 6204bd53528c65c0d4a70f05e76a60bcc476499e..613d0bfc3d840f93fb2b047479f6c5eb6c2056be 100644 (file)
@@ -96,7 +96,7 @@
        # cleanup workspace
        mov     $8, %ecx
        mov     %rsp, %rdi
-       xor     %rax, %rax
+       xor     %eax, %eax
        rep stosq
 
        mov     %rbp, %rsp              # deallocate workspace
index 92190879b228c82f4ec681aa9c07bccc0e32204a..3b2490b81918128a61f6df1807788436d4f8ceb7 100644 (file)
@@ -164,7 +164,7 @@ static void exit_to_usermode_loop(struct pt_regs *regs, u32 cached_flags)
                if (cached_flags & _TIF_NOTIFY_RESUME) {
                        clear_thread_flag(TIF_NOTIFY_RESUME);
                        tracehook_notify_resume(regs);
-                       rseq_handle_notify_resume(regs);
+                       rseq_handle_notify_resume(NULL, regs);
                }
 
                if (cached_flags & _TIF_USER_RETURN_NOTIFY)
index 2582881d19ceeeb75a9a90588547914ccdefdbd0..2767c625a52cf68891b9bbfa2af1fe9a0b3dfd00 100644 (file)
@@ -65,7 +65,7 @@
 # define preempt_stop(clobbers)        DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF
 #else
 # define preempt_stop(clobbers)
-# define resume_kernel         restore_all
+# define resume_kernel         restore_all_kernel
 #endif
 
 .macro TRACE_IRQS_IRET
@@ -77,6 +77,8 @@
 #endif
 .endm
 
+#define PTI_SWITCH_MASK         (1 << PAGE_SHIFT)
+
 /*
  * User gs save/restore
  *
 
 #endif /* CONFIG_X86_32_LAZY_GS */
 
-.macro SAVE_ALL pt_regs_ax=%eax
+/* Unconditionally switch to user cr3 */
+.macro SWITCH_TO_USER_CR3 scratch_reg:req
+       ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI
+
+       movl    %cr3, \scratch_reg
+       orl     $PTI_SWITCH_MASK, \scratch_reg
+       movl    \scratch_reg, %cr3
+.Lend_\@:
+.endm
+
+.macro BUG_IF_WRONG_CR3 no_user_check=0
+#ifdef CONFIG_DEBUG_ENTRY
+       ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI
+       .if \no_user_check == 0
+       /* coming from usermode? */
+       testl   $SEGMENT_RPL_MASK, PT_CS(%esp)
+       jz      .Lend_\@
+       .endif
+       /* On user-cr3? */
+       movl    %cr3, %eax
+       testl   $PTI_SWITCH_MASK, %eax
+       jnz     .Lend_\@
+       /* From userspace with kernel cr3 - BUG */
+       ud2
+.Lend_\@:
+#endif
+.endm
+
+/*
+ * Switch to kernel cr3 if not already loaded and return current cr3 in
+ * \scratch_reg
+ */
+.macro SWITCH_TO_KERNEL_CR3 scratch_reg:req
+       ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI
+       movl    %cr3, \scratch_reg
+       /* Test if we are already on kernel CR3 */
+       testl   $PTI_SWITCH_MASK, \scratch_reg
+       jz      .Lend_\@
+       andl    $(~PTI_SWITCH_MASK), \scratch_reg
+       movl    \scratch_reg, %cr3
+       /* Return original CR3 in \scratch_reg */
+       orl     $PTI_SWITCH_MASK, \scratch_reg
+.Lend_\@:
+.endm
+
+.macro SAVE_ALL pt_regs_ax=%eax switch_stacks=0
        cld
        PUSH_GS
        pushl   %fs
        movl    $(__KERNEL_PERCPU), %edx
        movl    %edx, %fs
        SET_KERNEL_GS %edx
+
+       /* Switch to kernel stack if necessary */
+.if \switch_stacks > 0
+       SWITCH_TO_KERNEL_STACK
+.endif
+
+.endm
+
+.macro SAVE_ALL_NMI cr3_reg:req
+       SAVE_ALL
+
+       BUG_IF_WRONG_CR3
+
+       /*
+        * Now switch the CR3 when PTI is enabled.
+        *
+        * We can enter with either user or kernel cr3, the code will
+        * store the old cr3 in \cr3_reg and switches to the kernel cr3
+        * if necessary.
+        */
+       SWITCH_TO_KERNEL_CR3 scratch_reg=\cr3_reg
+
+.Lend_\@:
 .endm
 
 /*
        POP_GS_EX
 .endm
 
+.macro RESTORE_ALL_NMI cr3_reg:req pop=0
+       /*
+        * Now switch the CR3 when PTI is enabled.
+        *
+        * We enter with kernel cr3 and switch the cr3 to the value
+        * stored on \cr3_reg, which is either a user or a kernel cr3.
+        */
+       ALTERNATIVE "jmp .Lswitched_\@", "", X86_FEATURE_PTI
+
+       testl   $PTI_SWITCH_MASK, \cr3_reg
+       jz      .Lswitched_\@
+
+       /* User cr3 in \cr3_reg - write it to hardware cr3 */
+       movl    \cr3_reg, %cr3
+
+.Lswitched_\@:
+
+       BUG_IF_WRONG_CR3
+
+       RESTORE_REGS pop=\pop
+.endm
+
+.macro CHECK_AND_APPLY_ESPFIX
+#ifdef CONFIG_X86_ESPFIX32
+#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
+
+       ALTERNATIVE     "jmp .Lend_\@", "", X86_BUG_ESPFIX
+
+       movl    PT_EFLAGS(%esp), %eax           # mix EFLAGS, SS and CS
+       /*
+        * Warning: PT_OLDSS(%esp) contains the wrong/random values if we
+        * are returning to the kernel.
+        * See comments in process.c:copy_thread() for details.
+        */
+       movb    PT_OLDSS(%esp), %ah
+       movb    PT_CS(%esp), %al
+       andl    $(X86_EFLAGS_VM | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax
+       cmpl    $((SEGMENT_LDT << 8) | USER_RPL), %eax
+       jne     .Lend_\@        # returning to user-space with LDT SS
+
+       /*
+        * Setup and switch to ESPFIX stack
+        *
+        * We're returning to userspace with a 16 bit stack. The CPU will not
+        * restore the high word of ESP for us on executing iret... This is an
+        * "official" bug of all the x86-compatible CPUs, which we can work
+        * around to make dosemu and wine happy. We do this by preloading the
+        * high word of ESP with the high word of the userspace ESP while
+        * compensating for the offset by changing to the ESPFIX segment with
+        * a base address that matches for the difference.
+        */
+       mov     %esp, %edx                      /* load kernel esp */
+       mov     PT_OLDESP(%esp), %eax           /* load userspace esp */
+       mov     %dx, %ax                        /* eax: new kernel esp */
+       sub     %eax, %edx                      /* offset (low word is 0) */
+       shr     $16, %edx
+       mov     %dl, GDT_ESPFIX_SS + 4          /* bits 16..23 */
+       mov     %dh, GDT_ESPFIX_SS + 7          /* bits 24..31 */
+       pushl   $__ESPFIX_SS
+       pushl   %eax                            /* new kernel esp */
+       /*
+        * Disable interrupts, but do not irqtrace this section: we
+        * will soon execute iret and the tracer was already set to
+        * the irqstate after the IRET:
+        */
+       DISABLE_INTERRUPTS(CLBR_ANY)
+       lss     (%esp), %esp                    /* switch to espfix segment */
+.Lend_\@:
+#endif /* CONFIG_X86_ESPFIX32 */
+.endm
+
+/*
+ * Called with pt_regs fully populated and kernel segments loaded,
+ * so we can access PER_CPU and use the integer registers.
+ *
+ * We need to be very careful here with the %esp switch, because an NMI
+ * can happen everywhere. If the NMI handler finds itself on the
+ * entry-stack, it will overwrite the task-stack and everything we
+ * copied there. So allocate the stack-frame on the task-stack and
+ * switch to it before we do any copying.
+ */
+
+#define CS_FROM_ENTRY_STACK    (1 << 31)
+#define CS_FROM_USER_CR3       (1 << 30)
+
+.macro SWITCH_TO_KERNEL_STACK
+
+       ALTERNATIVE     "", "jmp .Lend_\@", X86_FEATURE_XENPV
+
+       BUG_IF_WRONG_CR3
+
+       SWITCH_TO_KERNEL_CR3 scratch_reg=%eax
+
+       /*
+        * %eax now contains the entry cr3 and we carry it forward in
+        * that register for the time this macro runs
+        */
+
+       /* Are we on the entry stack? Bail out if not! */
+       movl    PER_CPU_VAR(cpu_entry_area), %ecx
+       addl    $CPU_ENTRY_AREA_entry_stack + SIZEOF_entry_stack, %ecx
+       subl    %esp, %ecx      /* ecx = (end of entry_stack) - esp */
+       cmpl    $SIZEOF_entry_stack, %ecx
+       jae     .Lend_\@
+
+       /* Load stack pointer into %esi and %edi */
+       movl    %esp, %esi
+       movl    %esi, %edi
+
+       /* Move %edi to the top of the entry stack */
+       andl    $(MASK_entry_stack), %edi
+       addl    $(SIZEOF_entry_stack), %edi
+
+       /* Load top of task-stack into %edi */
+       movl    TSS_entry2task_stack(%edi), %edi
+
+       /*
+        * Clear unused upper bits of the dword containing the word-sized CS
+        * slot in pt_regs in case hardware didn't clear it for us.
+        */
+       andl    $(0x0000ffff), PT_CS(%esp)
+
+       /* Special case - entry from kernel mode via entry stack */
+#ifdef CONFIG_VM86
+       movl    PT_EFLAGS(%esp), %ecx           # mix EFLAGS and CS
+       movb    PT_CS(%esp), %cl
+       andl    $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %ecx
+#else
+       movl    PT_CS(%esp), %ecx
+       andl    $SEGMENT_RPL_MASK, %ecx
+#endif
+       cmpl    $USER_RPL, %ecx
+       jb      .Lentry_from_kernel_\@
+
+       /* Bytes to copy */
+       movl    $PTREGS_SIZE, %ecx
+
+#ifdef CONFIG_VM86
+       testl   $X86_EFLAGS_VM, PT_EFLAGS(%esi)
+       jz      .Lcopy_pt_regs_\@
+
+       /*
+        * Stack-frame contains 4 additional segment registers when
+        * coming from VM86 mode
+        */
+       addl    $(4 * 4), %ecx
+
+#endif
+.Lcopy_pt_regs_\@:
+
+       /* Allocate frame on task-stack */
+       subl    %ecx, %edi
+
+       /* Switch to task-stack */
+       movl    %edi, %esp
+
+       /*
+        * We are now on the task-stack and can safely copy over the
+        * stack-frame
+        */
+       shrl    $2, %ecx
+       cld
+       rep movsl
+
+       jmp .Lend_\@
+
+.Lentry_from_kernel_\@:
+
+       /*
+        * This handles the case when we enter the kernel from
+        * kernel-mode and %esp points to the entry-stack. When this
+        * happens we need to switch to the task-stack to run C code,
+        * but switch back to the entry-stack again when we approach
+        * iret and return to the interrupted code-path. This usually
+        * happens when we hit an exception while restoring user-space
+        * segment registers on the way back to user-space or when the
+        * sysenter handler runs with eflags.tf set.
+        *
+        * When we switch to the task-stack here, we can't trust the
+        * contents of the entry-stack anymore, as the exception handler
+        * might be scheduled out or moved to another CPU. Therefore we
+        * copy the complete entry-stack to the task-stack and set a
+        * marker in the iret-frame (bit 31 of the CS dword) to detect
+        * what we've done on the iret path.
+        *
+        * On the iret path we copy everything back and switch to the
+        * entry-stack, so that the interrupted kernel code-path
+        * continues on the same stack it was interrupted with.
+        *
+        * Be aware that an NMI can happen anytime in this code.
+        *
+        * %esi: Entry-Stack pointer (same as %esp)
+        * %edi: Top of the task stack
+        * %eax: CR3 on kernel entry
+        */
+
+       /* Calculate number of bytes on the entry stack in %ecx */
+       movl    %esi, %ecx
+
+       /* %ecx to the top of entry-stack */
+       andl    $(MASK_entry_stack), %ecx
+       addl    $(SIZEOF_entry_stack), %ecx
+
+       /* Number of bytes on the entry stack to %ecx */
+       sub     %esi, %ecx
+
+       /* Mark stackframe as coming from entry stack */
+       orl     $CS_FROM_ENTRY_STACK, PT_CS(%esp)
+
+       /*
+        * Test the cr3 used to enter the kernel and add a marker
+        * so that we can switch back to it before iret.
+        */
+       testl   $PTI_SWITCH_MASK, %eax
+       jz      .Lcopy_pt_regs_\@
+       orl     $CS_FROM_USER_CR3, PT_CS(%esp)
+
+       /*
+        * %esi and %edi are unchanged, %ecx contains the number of
+        * bytes to copy. The code at .Lcopy_pt_regs_\@ will allocate
+        * the stack-frame on task-stack and copy everything over
+        */
+       jmp .Lcopy_pt_regs_\@
+
+.Lend_\@:
+.endm
+
+/*
+ * Switch back from the kernel stack to the entry stack.
+ *
+ * The %esp register must point to pt_regs on the task stack. It will
+ * first calculate the size of the stack-frame to copy, depending on
+ * whether we return to VM86 mode or not. With that it uses 'rep movsl'
+ * to copy the contents of the stack over to the entry stack.
+ *
+ * We must be very careful here, as we can't trust the contents of the
+ * task-stack once we switched to the entry-stack. When an NMI happens
+ * while on the entry-stack, the NMI handler will switch back to the top
+ * of the task stack, overwriting our stack-frame we are about to copy.
+ * Therefore we switch the stack only after everything is copied over.
+ */
+.macro SWITCH_TO_ENTRY_STACK
+
+       ALTERNATIVE     "", "jmp .Lend_\@", X86_FEATURE_XENPV
+
+       /* Bytes to copy */
+       movl    $PTREGS_SIZE, %ecx
+
+#ifdef CONFIG_VM86
+       testl   $(X86_EFLAGS_VM), PT_EFLAGS(%esp)
+       jz      .Lcopy_pt_regs_\@
+
+       /* Additional 4 registers to copy when returning to VM86 mode */
+       addl    $(4 * 4), %ecx
+
+.Lcopy_pt_regs_\@:
+#endif
+
+       /* Initialize source and destination for movsl */
+       movl    PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %edi
+       subl    %ecx, %edi
+       movl    %esp, %esi
+
+       /* Save future stack pointer in %ebx */
+       movl    %edi, %ebx
+
+       /* Copy over the stack-frame */
+       shrl    $2, %ecx
+       cld
+       rep movsl
+
+       /*
+        * Switch to entry-stack - needs to happen after everything is
+        * copied because the NMI handler will overwrite the task-stack
+        * when on entry-stack
+        */
+       movl    %ebx, %esp
+
+.Lend_\@:
+.endm
+
+/*
+ * This macro handles the case when we return to kernel-mode on the iret
+ * path and have to switch back to the entry stack and/or user-cr3
+ *
+ * See the comments below the .Lentry_from_kernel_\@ label in the
+ * SWITCH_TO_KERNEL_STACK macro for more details.
+ */
+.macro PARANOID_EXIT_TO_KERNEL_MODE
+
+       /*
+        * Test if we entered the kernel with the entry-stack. Most
+        * likely we did not, because this code only runs on the
+        * return-to-kernel path.
+        */
+       testl   $CS_FROM_ENTRY_STACK, PT_CS(%esp)
+       jz      .Lend_\@
+
+       /* Unlikely slow-path */
+
+       /* Clear marker from stack-frame */
+       andl    $(~CS_FROM_ENTRY_STACK), PT_CS(%esp)
+
+       /* Copy the remaining task-stack contents to entry-stack */
+       movl    %esp, %esi
+       movl    PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %edi
+
+       /* Bytes on the task-stack to ecx */
+       movl    PER_CPU_VAR(cpu_tss_rw + TSS_sp1), %ecx
+       subl    %esi, %ecx
+
+       /* Allocate stack-frame on entry-stack */
+       subl    %ecx, %edi
+
+       /*
+        * Save future stack-pointer, we must not switch until the
+        * copy is done, otherwise the NMI handler could destroy the
+        * contents of the task-stack we are about to copy.
+        */
+       movl    %edi, %ebx
+
+       /* Do the copy */
+       shrl    $2, %ecx
+       cld
+       rep movsl
+
+       /* Safe to switch to entry-stack now */
+       movl    %ebx, %esp
+
+       /*
+        * We came from entry-stack and need to check if we also need to
+        * switch back to user cr3.
+        */
+       testl   $CS_FROM_USER_CR3, PT_CS(%esp)
+       jz      .Lend_\@
+
+       /* Clear marker from stack-frame */
+       andl    $(~CS_FROM_USER_CR3), PT_CS(%esp)
+
+       SWITCH_TO_USER_CR3 scratch_reg=%eax
+
+.Lend_\@:
+.endm
 /*
  * %eax: prev task
  * %edx: next task
@@ -351,9 +764,9 @@ ENTRY(resume_kernel)
        DISABLE_INTERRUPTS(CLBR_ANY)
 .Lneed_resched:
        cmpl    $0, PER_CPU_VAR(__preempt_count)
-       jnz     restore_all
+       jnz     restore_all_kernel
        testl   $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off (exception path) ?
-       jz      restore_all
+       jz      restore_all_kernel
        call    preempt_schedule_irq
        jmp     .Lneed_resched
 END(resume_kernel)
@@ -412,7 +825,21 @@ ENTRY(xen_sysenter_target)
  * 0(%ebp) arg6
  */
 ENTRY(entry_SYSENTER_32)
-       movl    TSS_sysenter_sp0(%esp), %esp
+       /*
+        * On entry-stack with all userspace-regs live - save and
+        * restore eflags and %eax to use it as scratch-reg for the cr3
+        * switch.
+        */
+       pushfl
+       pushl   %eax
+       BUG_IF_WRONG_CR3 no_user_check=1
+       SWITCH_TO_KERNEL_CR3 scratch_reg=%eax
+       popl    %eax
+       popfl
+
+       /* Stack empty again, switch to task stack */
+       movl    TSS_entry2task_stack(%esp), %esp
+
 .Lsysenter_past_esp:
        pushl   $__USER_DS              /* pt_regs->ss */
        pushl   %ebp                    /* pt_regs->sp (stashed in bp) */
@@ -421,7 +848,7 @@ ENTRY(entry_SYSENTER_32)
        pushl   $__USER_CS              /* pt_regs->cs */
        pushl   $0                      /* pt_regs->ip = 0 (placeholder) */
        pushl   %eax                    /* pt_regs->orig_ax */
-       SAVE_ALL pt_regs_ax=$-ENOSYS    /* save rest */
+       SAVE_ALL pt_regs_ax=$-ENOSYS    /* save rest, stack already switched */
 
        /*
         * SYSENTER doesn't filter flags, so we need to clear NT, AC
@@ -460,25 +887,49 @@ ENTRY(entry_SYSENTER_32)
 
 /* Opportunistic SYSEXIT */
        TRACE_IRQS_ON                   /* User mode traces as IRQs on. */
+
+       /*
+        * Setup entry stack - we keep the pointer in %eax and do the
+        * switch after almost all user-state is restored.
+        */
+
+       /* Load entry stack pointer and allocate frame for eflags/eax */
+       movl    PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %eax
+       subl    $(2*4), %eax
+
+       /* Copy eflags and eax to entry stack */
+       movl    PT_EFLAGS(%esp), %edi
+       movl    PT_EAX(%esp), %esi
+       movl    %edi, (%eax)
+       movl    %esi, 4(%eax)
+
+       /* Restore user registers and segments */
        movl    PT_EIP(%esp), %edx      /* pt_regs->ip */
        movl    PT_OLDESP(%esp), %ecx   /* pt_regs->sp */
 1:     mov     PT_FS(%esp), %fs
        PTGS_TO_GS
+
        popl    %ebx                    /* pt_regs->bx */
        addl    $2*4, %esp              /* skip pt_regs->cx and pt_regs->dx */
        popl    %esi                    /* pt_regs->si */
        popl    %edi                    /* pt_regs->di */
        popl    %ebp                    /* pt_regs->bp */
-       popl    %eax                    /* pt_regs->ax */
+
+       /* Switch to entry stack */
+       movl    %eax, %esp
+
+       /* Now ready to switch the cr3 */
+       SWITCH_TO_USER_CR3 scratch_reg=%eax
 
        /*
         * Restore all flags except IF. (We restore IF separately because
         * STI gives a one-instruction window in which we won't be interrupted,
         * whereas POPF does not.)
         */
-       addl    $PT_EFLAGS-PT_DS, %esp  /* point esp at pt_regs->flags */
-       btr     $X86_EFLAGS_IF_BIT, (%esp)
+       btrl    $X86_EFLAGS_IF_BIT, (%esp)
+       BUG_IF_WRONG_CR3 no_user_check=1
        popfl
+       popl    %eax
 
        /*
         * Return back to the vDSO, which will pop ecx and edx.
@@ -532,7 +983,8 @@ ENDPROC(entry_SYSENTER_32)
 ENTRY(entry_INT80_32)
        ASM_CLAC
        pushl   %eax                    /* pt_regs->orig_ax */
-       SAVE_ALL pt_regs_ax=$-ENOSYS    /* save rest */
+
+       SAVE_ALL pt_regs_ax=$-ENOSYS switch_stacks=1    /* save rest */
 
        /*
         * User mode is traced as though IRQs are on, and the interrupt gate
@@ -546,24 +998,17 @@ ENTRY(entry_INT80_32)
 
 restore_all:
        TRACE_IRQS_IRET
+       SWITCH_TO_ENTRY_STACK
 .Lrestore_all_notrace:
-#ifdef CONFIG_X86_ESPFIX32
-       ALTERNATIVE     "jmp .Lrestore_nocheck", "", X86_BUG_ESPFIX
-
-       movl    PT_EFLAGS(%esp), %eax           # mix EFLAGS, SS and CS
-       /*
-        * Warning: PT_OLDSS(%esp) contains the wrong/random values if we
-        * are returning to the kernel.
-        * See comments in process.c:copy_thread() for details.
-        */
-       movb    PT_OLDSS(%esp), %ah
-       movb    PT_CS(%esp), %al
-       andl    $(X86_EFLAGS_VM | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax
-       cmpl    $((SEGMENT_LDT << 8) | USER_RPL), %eax
-       je .Lldt_ss                             # returning to user-space with LDT SS
-#endif
+       CHECK_AND_APPLY_ESPFIX
 .Lrestore_nocheck:
-       RESTORE_REGS 4                          # skip orig_eax/error_code
+       /* Switch back to user CR3 */
+       SWITCH_TO_USER_CR3 scratch_reg=%eax
+
+       BUG_IF_WRONG_CR3
+
+       /* Restore user state */
+       RESTORE_REGS pop=4                      # skip orig_eax/error_code
 .Lirq_return:
        /*
         * ARCH_HAS_MEMBARRIER_SYNC_CORE rely on IRET core serialization
@@ -572,46 +1017,33 @@ restore_all:
         */
        INTERRUPT_RETURN
 
+restore_all_kernel:
+       TRACE_IRQS_IRET
+       PARANOID_EXIT_TO_KERNEL_MODE
+       BUG_IF_WRONG_CR3
+       RESTORE_REGS 4
+       jmp     .Lirq_return
+
 .section .fixup, "ax"
 ENTRY(iret_exc )
        pushl   $0                              # no error code
        pushl   $do_iret_error
-       jmp     common_exception
-.previous
-       _ASM_EXTABLE(.Lirq_return, iret_exc)
 
-#ifdef CONFIG_X86_ESPFIX32
-.Lldt_ss:
-/*
- * Setup and switch to ESPFIX stack
- *
- * We're returning to userspace with a 16 bit stack. The CPU will not
- * restore the high word of ESP for us on executing iret... This is an
- * "official" bug of all the x86-compatible CPUs, which we can work
- * around to make dosemu and wine happy. We do this by preloading the
- * high word of ESP with the high word of the userspace ESP while
- * compensating for the offset by changing to the ESPFIX segment with
- * a base address that matches for the difference.
- */
-#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
-       mov     %esp, %edx                      /* load kernel esp */
-       mov     PT_OLDESP(%esp), %eax           /* load userspace esp */
-       mov     %dx, %ax                        /* eax: new kernel esp */
-       sub     %eax, %edx                      /* offset (low word is 0) */
-       shr     $16, %edx
-       mov     %dl, GDT_ESPFIX_SS + 4          /* bits 16..23 */
-       mov     %dh, GDT_ESPFIX_SS + 7          /* bits 24..31 */
-       pushl   $__ESPFIX_SS
-       pushl   %eax                            /* new kernel esp */
+#ifdef CONFIG_DEBUG_ENTRY
        /*
-        * Disable interrupts, but do not irqtrace this section: we
-        * will soon execute iret and the tracer was already set to
-        * the irqstate after the IRET:
+        * The stack-frame here is the one that iret faulted on, so its a
+        * return-to-user frame. We are on kernel-cr3 because we come here from
+        * the fixup code. This confuses the CR3 checker, so switch to user-cr3
+        * as the checker expects it.
         */
-       DISABLE_INTERRUPTS(CLBR_ANY)
-       lss     (%esp), %esp                    /* switch to espfix segment */
-       jmp     .Lrestore_nocheck
+       pushl   %eax
+       SWITCH_TO_USER_CR3 scratch_reg=%eax
+       popl    %eax
 #endif
+
+       jmp     common_exception
+.previous
+       _ASM_EXTABLE(.Lirq_return, iret_exc)
 ENDPROC(entry_INT80_32)
 
 .macro FIXUP_ESPFIX_STACK
@@ -671,7 +1103,8 @@ END(irq_entries_start)
 common_interrupt:
        ASM_CLAC
        addl    $-0x80, (%esp)                  /* Adjust vector into the [-256, -1] range */
-       SAVE_ALL
+
+       SAVE_ALL switch_stacks=1
        ENCODE_FRAME_POINTER
        TRACE_IRQS_OFF
        movl    %esp, %eax
@@ -679,16 +1112,16 @@ common_interrupt:
        jmp     ret_from_intr
 ENDPROC(common_interrupt)
 
-#define BUILD_INTERRUPT3(name, nr, fn) \
-ENTRY(name)                            \
-       ASM_CLAC;                       \
-       pushl   $~(nr);                 \
-       SAVE_ALL;                       \
-       ENCODE_FRAME_POINTER;           \
-       TRACE_IRQS_OFF                  \
-       movl    %esp, %eax;             \
-       call    fn;                     \
-       jmp     ret_from_intr;          \
+#define BUILD_INTERRUPT3(name, nr, fn)                 \
+ENTRY(name)                                            \
+       ASM_CLAC;                                       \
+       pushl   $~(nr);                                 \
+       SAVE_ALL switch_stacks=1;                       \
+       ENCODE_FRAME_POINTER;                           \
+       TRACE_IRQS_OFF                                  \
+       movl    %esp, %eax;                             \
+       call    fn;                                     \
+       jmp     ret_from_intr;                          \
 ENDPROC(name)
 
 #define BUILD_INTERRUPT(name, nr)              \
@@ -920,16 +1353,20 @@ common_exception:
        pushl   %es
        pushl   %ds
        pushl   %eax
+       movl    $(__USER_DS), %eax
+       movl    %eax, %ds
+       movl    %eax, %es
+       movl    $(__KERNEL_PERCPU), %eax
+       movl    %eax, %fs
        pushl   %ebp
        pushl   %edi
        pushl   %esi
        pushl   %edx
        pushl   %ecx
        pushl   %ebx
+       SWITCH_TO_KERNEL_STACK
        ENCODE_FRAME_POINTER
        cld
-       movl    $(__KERNEL_PERCPU), %ecx
-       movl    %ecx, %fs
        UNWIND_ESPFIX_STACK
        GS_TO_REG %ecx
        movl    PT_GS(%esp), %edi               # get the function address
@@ -937,9 +1374,6 @@ common_exception:
        movl    $-1, PT_ORIG_EAX(%esp)          # no syscall to restart
        REG_TO_PTGS %ecx
        SET_KERNEL_GS %ecx
-       movl    $(__USER_DS), %ecx
-       movl    %ecx, %ds
-       movl    %ecx, %es
        TRACE_IRQS_OFF
        movl    %esp, %eax                      # pt_regs pointer
        CALL_NOSPEC %edi
@@ -948,40 +1382,12 @@ END(common_exception)
 
 ENTRY(debug)
        /*
-        * #DB can happen at the first instruction of
-        * entry_SYSENTER_32 or in Xen's SYSENTER prologue.  If this
-        * happens, then we will be running on a very small stack.  We
-        * need to detect this condition and switch to the thread
-        * stack before calling any C code at all.
-        *
-        * If you edit this code, keep in mind that NMIs can happen in here.
+        * Entry from sysenter is now handled in common_exception
         */
        ASM_CLAC
        pushl   $-1                             # mark this as an int
-       SAVE_ALL
-       ENCODE_FRAME_POINTER
-       xorl    %edx, %edx                      # error code 0
-       movl    %esp, %eax                      # pt_regs pointer
-
-       /* Are we currently on the SYSENTER stack? */
-       movl    PER_CPU_VAR(cpu_entry_area), %ecx
-       addl    $CPU_ENTRY_AREA_entry_stack + SIZEOF_entry_stack, %ecx
-       subl    %eax, %ecx      /* ecx = (end of entry_stack) - esp */
-       cmpl    $SIZEOF_entry_stack, %ecx
-       jb      .Ldebug_from_sysenter_stack
-
-       TRACE_IRQS_OFF
-       call    do_debug
-       jmp     ret_from_exception
-
-.Ldebug_from_sysenter_stack:
-       /* We're on the SYSENTER stack.  Switch off. */
-       movl    %esp, %ebx
-       movl    PER_CPU_VAR(cpu_current_top_of_stack), %esp
-       TRACE_IRQS_OFF
-       call    do_debug
-       movl    %ebx, %esp
-       jmp     ret_from_exception
+       pushl   $do_debug
+       jmp     common_exception
 END(debug)
 
 /*
@@ -993,6 +1399,7 @@ END(debug)
  */
 ENTRY(nmi)
        ASM_CLAC
+
 #ifdef CONFIG_X86_ESPFIX32
        pushl   %eax
        movl    %ss, %eax
@@ -1002,7 +1409,7 @@ ENTRY(nmi)
 #endif
 
        pushl   %eax                            # pt_regs->orig_ax
-       SAVE_ALL
+       SAVE_ALL_NMI cr3_reg=%edi
        ENCODE_FRAME_POINTER
        xorl    %edx, %edx                      # zero error code
        movl    %esp, %eax                      # pt_regs pointer
@@ -1016,7 +1423,7 @@ ENTRY(nmi)
 
        /* Not on SYSENTER stack. */
        call    do_nmi
-       jmp     .Lrestore_all_notrace
+       jmp     .Lnmi_return
 
 .Lnmi_from_sysenter_stack:
        /*
@@ -1027,7 +1434,11 @@ ENTRY(nmi)
        movl    PER_CPU_VAR(cpu_current_top_of_stack), %esp
        call    do_nmi
        movl    %ebx, %esp
-       jmp     .Lrestore_all_notrace
+
+.Lnmi_return:
+       CHECK_AND_APPLY_ESPFIX
+       RESTORE_ALL_NMI cr3_reg=%edi pop=4
+       jmp     .Lirq_return
 
 #ifdef CONFIG_X86_ESPFIX32
 .Lnmi_espfix_stack:
@@ -1042,12 +1453,12 @@ ENTRY(nmi)
        pushl   16(%esp)
        .endr
        pushl   %eax
-       SAVE_ALL
+       SAVE_ALL_NMI cr3_reg=%edi
        ENCODE_FRAME_POINTER
        FIXUP_ESPFIX_STACK                      # %eax == %esp
        xorl    %edx, %edx                      # zero error code
        call    do_nmi
-       RESTORE_REGS
+       RESTORE_ALL_NMI cr3_reg=%edi
        lss     12+4(%esp), %esp                # back to espfix stack
        jmp     .Lirq_return
 #endif
@@ -1056,7 +1467,8 @@ END(nmi)
 ENTRY(int3)
        ASM_CLAC
        pushl   $-1                             # mark this as an int
-       SAVE_ALL
+
+       SAVE_ALL switch_stacks=1
        ENCODE_FRAME_POINTER
        TRACE_IRQS_OFF
        xorl    %edx, %edx                      # zero error code
index 73a522d53b5376b7eaa17cb3d0a7e7be317b02d1..957dfb693eccd5152700ec87ab3811b8cfb93e56 100644 (file)
@@ -92,7 +92,7 @@ END(native_usergs_sysret64)
 .endm
 
 .macro TRACE_IRQS_IRETQ_DEBUG
-       bt      $9, EFLAGS(%rsp)                /* interrupts off? */
+       btl     $9, EFLAGS(%rsp)                /* interrupts off? */
        jnc     1f
        TRACE_IRQS_ON_DEBUG
 1:
@@ -408,6 +408,7 @@ ENTRY(ret_from_fork)
 
 1:
        /* kernel thread */
+       UNWIND_HINT_EMPTY
        movq    %r12, %rdi
        CALL_NOSPEC %rbx
        /*
@@ -701,7 +702,7 @@ retint_kernel:
 #ifdef CONFIG_PREEMPT
        /* Interrupts are off */
        /* Check if we need preemption */
-       bt      $9, EFLAGS(%rsp)                /* were interrupts off? */
+       btl     $9, EFLAGS(%rsp)                /* were interrupts off? */
        jnc     1f
 0:     cmpl    $0, PER_CPU_VAR(__preempt_count)
        jnz     1f
@@ -981,7 +982,7 @@ ENTRY(\sym)
 
        call    \do_sym
 
-       jmp     error_exit                      /* %ebx: no swapgs flag */
+       jmp     error_exit
        .endif
 END(\sym)
 .endm
@@ -1222,7 +1223,6 @@ END(paranoid_exit)
 
 /*
  * Save all registers in pt_regs, and switch GS if needed.
- * Return: EBX=0: came from user mode; EBX=1: otherwise
  */
 ENTRY(error_entry)
        UNWIND_HINT_FUNC
@@ -1269,7 +1269,6 @@ ENTRY(error_entry)
         * for these here too.
         */
 .Lerror_kernelspace:
-       incl    %ebx
        leaq    native_irq_return_iret(%rip), %rcx
        cmpq    %rcx, RIP+8(%rsp)
        je      .Lerror_bad_iret
@@ -1303,28 +1302,20 @@ ENTRY(error_entry)
 
        /*
         * Pretend that the exception came from user mode: set up pt_regs
-        * as if we faulted immediately after IRET and clear EBX so that
-        * error_exit knows that we will be returning to user mode.
+        * as if we faulted immediately after IRET.
         */
        mov     %rsp, %rdi
        call    fixup_bad_iret
        mov     %rax, %rsp
-       decl    %ebx
        jmp     .Lerror_entry_from_usermode_after_swapgs
 END(error_entry)
 
-
-/*
- * On entry, EBX is a "return to kernel mode" flag:
- *   1: already in kernel mode, don't need SWAPGS
- *   0: user gsbase is loaded, we need SWAPGS and standard preparation for return to usermode
- */
 ENTRY(error_exit)
        UNWIND_HINT_REGS
        DISABLE_INTERRUPTS(CLBR_ANY)
        TRACE_IRQS_OFF
-       testl   %ebx, %ebx
-       jnz     retint_kernel
+       testb   $3, CS(%rsp)
+       j     retint_kernel
        jmp     retint_user
 END(error_exit)
 
index 9de7f1e1dede7f6e6ebdc66e5f63756a173cdc0a..7d0df78db727296d1c4451e3a930033669f47aa3 100644 (file)
@@ -84,13 +84,13 @@ ENTRY(entry_SYSENTER_compat)
        pushq   %rdx                    /* pt_regs->dx */
        pushq   %rcx                    /* pt_regs->cx */
        pushq   $-ENOSYS                /* pt_regs->ax */
-       pushq   %r8                     /* pt_regs->r8 */
+       pushq   $0                      /* pt_regs->r8  = 0 */
        xorl    %r8d, %r8d              /* nospec   r8 */
-       pushq   %r9                     /* pt_regs->r9 */
+       pushq   $0                      /* pt_regs->r9  = 0 */
        xorl    %r9d, %r9d              /* nospec   r9 */
-       pushq   %r10                    /* pt_regs->r10 */
+       pushq   $0                      /* pt_regs->r10 = 0 */
        xorl    %r10d, %r10d            /* nospec   r10 */
-       pushq   %r11                    /* pt_regs->r11 */
+       pushq   $0                      /* pt_regs->r11 = 0 */
        xorl    %r11d, %r11d            /* nospec   r11 */
        pushq   %rbx                    /* pt_regs->rbx */
        xorl    %ebx, %ebx              /* nospec   rbx */
@@ -374,13 +374,13 @@ ENTRY(entry_INT80_compat)
        pushq   %rcx                    /* pt_regs->cx */
        xorl    %ecx, %ecx              /* nospec   cx */
        pushq   $-ENOSYS                /* pt_regs->ax */
-       pushq   $0                      /* pt_regs->r8  = 0 */
+       pushq   %r8                     /* pt_regs->r8 */
        xorl    %r8d, %r8d              /* nospec   r8 */
-       pushq   $0                      /* pt_regs->r9  = 0 */
+       pushq   %r9                     /* pt_regs->r9 */
        xorl    %r9d, %r9d              /* nospec   r9 */
-       pushq   $0                      /* pt_regs->r10 = 0 */
+       pushq   %r10                    /* pt_regs->r10*/
        xorl    %r10d, %r10d            /* nospec   r10 */
-       pushq   $0                      /* pt_regs->r11 = 0 */
+       pushq   %r11                    /* pt_regs->r11 */
        xorl    %r11d, %r11d            /* nospec   r11 */
        pushq   %rbx                    /* pt_regs->rbx */
        xorl    %ebx, %ebx              /* nospec   rbx */
index 261802b1cc505d19c6150fbf644450afad49e16f..9f695f517747e409868226b0c7aea4afb29215ec 100644 (file)
@@ -46,10 +46,8 @@ targets += $(vdso_img_sodbg) $(vdso_img-y:%=vdso%.so)
 
 CPPFLAGS_vdso.lds += -P -C
 
-VDSO_LDFLAGS_vdso.lds = -m64 -Wl,-soname=linux-vdso.so.1 \
-                       -Wl,--no-undefined \
-                       -Wl,-z,max-page-size=4096 -Wl,-z,common-page-size=4096 \
-                       $(DISABLE_LTO)
+VDSO_LDFLAGS_vdso.lds = -m elf_x86_64 -soname linux-vdso.so.1 --no-undefined \
+                       -z max-page-size=4096 -z common-page-size=4096
 
 $(obj)/vdso64.so.dbg: $(obj)/vdso.lds $(vobjs) FORCE
        $(call if_changed,vdso)
@@ -58,9 +56,7 @@ HOST_EXTRACFLAGS += -I$(srctree)/tools/include -I$(srctree)/include/uapi -I$(src
 hostprogs-y                    += vdso2c
 
 quiet_cmd_vdso2c = VDSO2C  $@
-define cmd_vdso2c
-       $(obj)/vdso2c $< $(<:%.dbg=%) $@
-endef
+      cmd_vdso2c = $(obj)/vdso2c $< $(<:%.dbg=%) $@
 
 $(obj)/vdso-image-%.c: $(obj)/vdso%.so.dbg $(obj)/vdso%.so $(obj)/vdso2c FORCE
        $(call if_changed,vdso2c)
@@ -95,10 +91,8 @@ CFLAGS_REMOVE_vvar.o = -pg
 #
 
 CPPFLAGS_vdsox32.lds = $(CPPFLAGS_vdso.lds)
-VDSO_LDFLAGS_vdsox32.lds = -Wl,-m,elf32_x86_64 \
-                          -Wl,-soname=linux-vdso.so.1 \
-                          -Wl,-z,max-page-size=4096 \
-                          -Wl,-z,common-page-size=4096
+VDSO_LDFLAGS_vdsox32.lds = -m elf32_x86_64 -soname linux-vdso.so.1 \
+                          -z max-page-size=4096 -z common-page-size=4096
 
 # x32-rebranded versions
 vobjx32s-y := $(vobjs-y:.o=-x32.o)
@@ -123,7 +117,7 @@ $(obj)/vdsox32.so.dbg: $(obj)/vdsox32.lds $(vobjx32s) FORCE
        $(call if_changed,vdso)
 
 CPPFLAGS_vdso32.lds = $(CPPFLAGS_vdso.lds)
-VDSO_LDFLAGS_vdso32.lds = -m32 -Wl,-m,elf_i386 -Wl,-soname=linux-gate.so.1
+VDSO_LDFLAGS_vdso32.lds = -m elf_i386 -soname linux-gate.so.1
 
 targets += vdso32/vdso32.lds
 targets += vdso32/note.o vdso32/system_call.o vdso32/sigreturn.o
@@ -157,13 +151,13 @@ $(obj)/vdso32.so.dbg: FORCE \
 # The DSO images are built using a special linker script.
 #
 quiet_cmd_vdso = VDSO    $@
-      cmd_vdso = $(CC) -nostdlib -o $@ \
+      cmd_vdso = $(LD) -nostdlib -o $@ \
                       $(VDSO_LDFLAGS) $(VDSO_LDFLAGS_$(filter %.lds,$(^F))) \
-                      -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
+                      -$(filter %.lds,$^) $(filter %.o,$^) && \
                 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
 
-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=both) \
-       $(call cc-ldoption, -Wl$(comma)--build-id) -Wl,-Bsymbolic $(LTO_CFLAGS)
+VDSO_LDFLAGS = -shared $(call ld-option, --hash-style=both) \
+       $(call ld-option, --build-id) -Bsymbolic
 GCOV_PROFILE := n
 
 #
index 4b98101209a1877c5580a5fdfc7beecfd576f124..d50bb4dc065036181f7fc3b05182c7c8dce6b8ec 100644 (file)
@@ -579,7 +579,7 @@ static int perf_ibs_handle_irq(struct perf_ibs *perf_ibs, struct pt_regs *iregs)
 {
        struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu);
        struct perf_event *event = pcpu->event;
-       struct hw_perf_event *hwc = &event->hw;
+       struct hw_perf_event *hwc;
        struct perf_sample_data data;
        struct perf_raw_record raw;
        struct pt_regs regs;
@@ -602,6 +602,10 @@ fail:
                return 0;
        }
 
+       if (WARN_ON_ONCE(!event))
+               goto fail;
+
+       hwc = &event->hw;
        msr = hwc->config_base;
        buf = ibs_data.regs;
        rdmsrl(msr, *buf);
index 707b2a96e516b5579c9321cbf0822a627bc8a8d8..035c37481f572a253b08773df44069ab1590de91 100644 (file)
@@ -2041,15 +2041,15 @@ static void intel_pmu_disable_event(struct perf_event *event)
        cpuc->intel_ctrl_host_mask &= ~(1ull << hwc->idx);
        cpuc->intel_cp_status &= ~(1ull << hwc->idx);
 
+       if (unlikely(event->attr.precise_ip))
+               intel_pmu_pebs_disable(event);
+
        if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
                intel_pmu_disable_fixed(hwc);
                return;
        }
 
        x86_pmu_disable_event(event);
-
-       if (unlikely(event->attr.precise_ip))
-               intel_pmu_pebs_disable(event);
 }
 
 static void intel_pmu_del_event(struct perf_event *event)
@@ -2068,17 +2068,19 @@ static void intel_pmu_read_event(struct perf_event *event)
                x86_perf_event_update(event);
 }
 
-static void intel_pmu_enable_fixed(struct hw_perf_event *hwc)
+static void intel_pmu_enable_fixed(struct perf_event *event)
 {
+       struct hw_perf_event *hwc = &event->hw;
        int idx = hwc->idx - INTEL_PMC_IDX_FIXED;
-       u64 ctrl_val, bits, mask;
+       u64 ctrl_val, mask, bits = 0;
 
        /*
-        * Enable IRQ generation (0x8),
+        * Enable IRQ generation (0x8), if not PEBS,
         * and enable ring-3 counting (0x2) and ring-0 counting (0x1)
         * if requested:
         */
-       bits = 0x8ULL;
+       if (!event->attr.precise_ip)
+               bits |= 0x8;
        if (hwc->config & ARCH_PERFMON_EVENTSEL_USR)
                bits |= 0x2;
        if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
@@ -2120,14 +2122,14 @@ static void intel_pmu_enable_event(struct perf_event *event)
        if (unlikely(event_is_checkpointed(event)))
                cpuc->intel_cp_status |= (1ull << hwc->idx);
 
+       if (unlikely(event->attr.precise_ip))
+               intel_pmu_pebs_enable(event);
+
        if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
-               intel_pmu_enable_fixed(hwc);
+               intel_pmu_enable_fixed(event);
                return;
        }
 
-       if (unlikely(event->attr.precise_ip))
-               intel_pmu_pebs_enable(event);
-
        __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
 }
 
@@ -2280,7 +2282,10 @@ again:
         * counters from the GLOBAL_STATUS mask and we always process PEBS
         * events via drain_pebs().
         */
-       status &= ~(cpuc->pebs_enabled & PEBS_COUNTER_MASK);
+       if (x86_pmu.flags & PMU_FL_PEBS_ALL)
+               status &= ~cpuc->pebs_enabled;
+       else
+               status &= ~(cpuc->pebs_enabled & PEBS_COUNTER_MASK);
 
        /*
         * PEBS overflow sets bit 62 in the global status register
@@ -2997,6 +3002,9 @@ static int intel_pmu_hw_config(struct perf_event *event)
                }
                if (x86_pmu.pebs_aliases)
                        x86_pmu.pebs_aliases(event);
+
+               if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
+                       event->attr.sample_type |= __PERF_SAMPLE_CALLCHAIN_EARLY;
        }
 
        if (needs_branch_stack(event)) {
@@ -4069,7 +4077,6 @@ __init int intel_pmu_init(void)
                intel_pmu_lbr_init_skl();
 
                x86_pmu.event_constraints = intel_slm_event_constraints;
-               x86_pmu.pebs_constraints = intel_glp_pebs_event_constraints;
                x86_pmu.extra_regs = intel_glm_extra_regs;
                /*
                 * It's recommended to use CPU_CLK_UNHALTED.CORE_P + NPEBS
@@ -4079,6 +4086,7 @@ __init int intel_pmu_init(void)
                x86_pmu.pebs_prec_dist = true;
                x86_pmu.lbr_pt_coexist = true;
                x86_pmu.flags |= PMU_FL_HAS_RSP_1;
+               x86_pmu.flags |= PMU_FL_PEBS_ALL;
                x86_pmu.get_event_constraints = glp_get_event_constraints;
                x86_pmu.cpu_events = glm_events_attrs;
                /* Goldmont Plus has 4-wide pipeline */
index 8a10a045b57bde1345c542b8bbe2a19920d2400b..b7b01d762d32a3a6a30f4e2bd640aeae23dd7a3f 100644 (file)
@@ -408,9 +408,11 @@ static int alloc_bts_buffer(int cpu)
        ds->bts_buffer_base = (unsigned long) cea;
        ds_update_cea(cea, buffer, BTS_BUFFER_SIZE, PAGE_KERNEL);
        ds->bts_index = ds->bts_buffer_base;
-       max = BTS_RECORD_SIZE * (BTS_BUFFER_SIZE / BTS_RECORD_SIZE);
-       ds->bts_absolute_maximum = ds->bts_buffer_base + max;
-       ds->bts_interrupt_threshold = ds->bts_absolute_maximum - (max / 16);
+       max = BTS_BUFFER_SIZE / BTS_RECORD_SIZE;
+       ds->bts_absolute_maximum = ds->bts_buffer_base +
+                                       max * BTS_RECORD_SIZE;
+       ds->bts_interrupt_threshold = ds->bts_absolute_maximum -
+                                       (max / 16) * BTS_RECORD_SIZE;
        return 0;
 }
 
@@ -711,12 +713,6 @@ struct event_constraint intel_glm_pebs_event_constraints[] = {
        EVENT_CONSTRAINT_END
 };
 
-struct event_constraint intel_glp_pebs_event_constraints[] = {
-       /* Allow all events as PEBS with no flags */
-       INTEL_ALL_EVENT_CONSTRAINT(0, 0xf),
-       EVENT_CONSTRAINT_END
-};
-
 struct event_constraint intel_nehalem_pebs_event_constraints[] = {
        INTEL_PLD_CONSTRAINT(0x100b, 0xf),      /* MEM_INST_RETIRED.* */
        INTEL_FLAGS_EVENT_CONSTRAINT(0x0f, 0xf),    /* MEM_UNCORE_RETIRED.* */
@@ -869,6 +865,13 @@ struct event_constraint *intel_pebs_constraints(struct perf_event *event)
                }
        }
 
+       /*
+        * Extended PEBS support
+        * Makes the PEBS code search the normal constraints.
+        */
+       if (x86_pmu.flags & PMU_FL_PEBS_ALL)
+               return NULL;
+
        return &emptyconstraint;
 }
 
@@ -894,10 +897,16 @@ static inline void pebs_update_threshold(struct cpu_hw_events *cpuc)
 {
        struct debug_store *ds = cpuc->ds;
        u64 threshold;
+       int reserved;
+
+       if (x86_pmu.flags & PMU_FL_PEBS_ALL)
+               reserved = x86_pmu.max_pebs_events + x86_pmu.num_counters_fixed;
+       else
+               reserved = x86_pmu.max_pebs_events;
 
        if (cpuc->n_pebs == cpuc->n_large_pebs) {
                threshold = ds->pebs_absolute_maximum -
-                       x86_pmu.max_pebs_events * x86_pmu.pebs_record_size;
+                       reserved * x86_pmu.pebs_record_size;
        } else {
                threshold = ds->pebs_buffer_base + x86_pmu.pebs_record_size;
        }
@@ -961,7 +970,11 @@ void intel_pmu_pebs_enable(struct perf_event *event)
         * This must be done in pmu::start(), because PERF_EVENT_IOC_PERIOD.
         */
        if (hwc->flags & PERF_X86_EVENT_AUTO_RELOAD) {
-               ds->pebs_event_reset[hwc->idx] =
+               unsigned int idx = hwc->idx;
+
+               if (idx >= INTEL_PMC_IDX_FIXED)
+                       idx = MAX_PEBS_EVENTS + (idx - INTEL_PMC_IDX_FIXED);
+               ds->pebs_event_reset[idx] =
                        (u64)(-hwc->sample_period) & x86_pmu.cntval_mask;
        } else {
                ds->pebs_event_reset[hwc->idx] = 0;
@@ -1183,17 +1196,21 @@ static void setup_pebs_sample_data(struct perf_event *event,
                data->data_src.val = val;
        }
 
+       /*
+        * We must however always use iregs for the unwinder to stay sane; the
+        * record BP,SP,IP can point into thin air when the record is from a
+        * previous PMI context or an (I)RET happend between the record and
+        * PMI.
+        */
+       if (sample_type & PERF_SAMPLE_CALLCHAIN)
+               data->callchain = perf_callchain(event, iregs);
+
        /*
         * We use the interrupt regs as a base because the PEBS record does not
         * contain a full regs set, specifically it seems to lack segment
         * descriptors, which get used by things like user_mode().
         *
         * In the simple case fix up only the IP for PERF_SAMPLE_IP.
-        *
-        * We must however always use BP,SP from iregs for the unwinder to stay
-        * sane; the record BP,SP can point into thin air when the record is
-        * from a previous PMI context or an (I)RET happend between the record
-        * and PMI.
         */
        *regs = *iregs;
 
@@ -1212,15 +1229,8 @@ static void setup_pebs_sample_data(struct perf_event *event,
                regs->si = pebs->si;
                regs->di = pebs->di;
 
-               /*
-                * Per the above; only set BP,SP if we don't need callchains.
-                *
-                * XXX: does this make sense?
-                */
-               if (!(sample_type & PERF_SAMPLE_CALLCHAIN)) {
-                       regs->bp = pebs->bp;
-                       regs->sp = pebs->sp;
-               }
+               regs->bp = pebs->bp;
+               regs->sp = pebs->sp;
 
 #ifndef CONFIG_X86_32
                regs->r8 = pebs->r8;
@@ -1482,9 +1492,10 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
        struct debug_store *ds = cpuc->ds;
        struct perf_event *event;
        void *base, *at, *top;
-       short counts[MAX_PEBS_EVENTS] = {};
-       short error[MAX_PEBS_EVENTS] = {};
-       int bit, i;
+       short counts[INTEL_PMC_IDX_FIXED + MAX_FIXED_PEBS_EVENTS] = {};
+       short error[INTEL_PMC_IDX_FIXED + MAX_FIXED_PEBS_EVENTS] = {};
+       int bit, i, size;
+       u64 mask;
 
        if (!x86_pmu.pebs_active)
                return;
@@ -1494,6 +1505,13 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
 
        ds->pebs_index = ds->pebs_buffer_base;
 
+       mask = (1ULL << x86_pmu.max_pebs_events) - 1;
+       size = x86_pmu.max_pebs_events;
+       if (x86_pmu.flags & PMU_FL_PEBS_ALL) {
+               mask |= ((1ULL << x86_pmu.num_counters_fixed) - 1) << INTEL_PMC_IDX_FIXED;
+               size = INTEL_PMC_IDX_FIXED + x86_pmu.num_counters_fixed;
+       }
+
        if (unlikely(base >= top)) {
                /*
                 * The drain_pebs() could be called twice in a short period
@@ -1503,7 +1521,7 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
                 * update the event->count for this case.
                 */
                for_each_set_bit(bit, (unsigned long *)&cpuc->pebs_enabled,
-                                x86_pmu.max_pebs_events) {
+                                size) {
                        event = cpuc->events[bit];
                        if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD)
                                intel_pmu_save_and_restart_reload(event, 0);
@@ -1516,12 +1534,12 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
                u64 pebs_status;
 
                pebs_status = p->status & cpuc->pebs_enabled;
-               pebs_status &= (1ULL << x86_pmu.max_pebs_events) - 1;
+               pebs_status &= mask;
 
                /* PEBS v3 has more accurate status bits */
                if (x86_pmu.intel_cap.pebs_format >= 3) {
                        for_each_set_bit(bit, (unsigned long *)&pebs_status,
-                                        x86_pmu.max_pebs_events)
+                                        size)
                                counts[bit]++;
 
                        continue;
@@ -1569,7 +1587,7 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
                counts[bit]++;
        }
 
-       for (bit = 0; bit < x86_pmu.max_pebs_events; bit++) {
+       for (bit = 0; bit < size; bit++) {
                if ((counts[bit] == 0) && (error[bit] == 0))
                        continue;
 
index cf372b90557ed4e8a788c8f97b515ac956d2512e..f3e006bed9a75f94aa8bb9aa388d3037a2d71cbe 100644 (file)
@@ -216,6 +216,8 @@ static void intel_pmu_lbr_reset_64(void)
 
 void intel_pmu_lbr_reset(void)
 {
+       struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+
        if (!x86_pmu.lbr_nr)
                return;
 
@@ -223,6 +225,9 @@ void intel_pmu_lbr_reset(void)
                intel_pmu_lbr_reset_32();
        else
                intel_pmu_lbr_reset_64();
+
+       cpuc->last_task_ctx = NULL;
+       cpuc->last_log_id = 0;
 }
 
 /*
@@ -334,6 +339,7 @@ static inline u64 rdlbr_to(unsigned int idx)
 
 static void __intel_pmu_lbr_restore(struct x86_perf_task_context *task_ctx)
 {
+       struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
        int i;
        unsigned lbr_idx, mask;
        u64 tos;
@@ -344,9 +350,21 @@ static void __intel_pmu_lbr_restore(struct x86_perf_task_context *task_ctx)
                return;
        }
 
-       mask = x86_pmu.lbr_nr - 1;
        tos = task_ctx->tos;
-       for (i = 0; i < tos; i++) {
+       /*
+        * Does not restore the LBR registers, if
+        * - No one else touched them, and
+        * - Did not enter C6
+        */
+       if ((task_ctx == cpuc->last_task_ctx) &&
+           (task_ctx->log_id == cpuc->last_log_id) &&
+           rdlbr_from(tos)) {
+               task_ctx->lbr_stack_state = LBR_NONE;
+               return;
+       }
+
+       mask = x86_pmu.lbr_nr - 1;
+       for (i = 0; i < task_ctx->valid_lbrs; i++) {
                lbr_idx = (tos - i) & mask;
                wrlbr_from(lbr_idx, task_ctx->lbr_from[i]);
                wrlbr_to  (lbr_idx, task_ctx->lbr_to[i]);
@@ -354,14 +372,24 @@ static void __intel_pmu_lbr_restore(struct x86_perf_task_context *task_ctx)
                if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
                        wrmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]);
        }
+
+       for (; i < x86_pmu.lbr_nr; i++) {
+               lbr_idx = (tos - i) & mask;
+               wrlbr_from(lbr_idx, 0);
+               wrlbr_to(lbr_idx, 0);
+               if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
+                       wrmsrl(MSR_LBR_INFO_0 + lbr_idx, 0);
+       }
+
        wrmsrl(x86_pmu.lbr_tos, tos);
        task_ctx->lbr_stack_state = LBR_NONE;
 }
 
 static void __intel_pmu_lbr_save(struct x86_perf_task_context *task_ctx)
 {
+       struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
        unsigned lbr_idx, mask;
-       u64 tos;
+       u64 tos, from;
        int i;
 
        if (task_ctx->lbr_callstack_users == 0) {
@@ -371,15 +399,22 @@ static void __intel_pmu_lbr_save(struct x86_perf_task_context *task_ctx)
 
        mask = x86_pmu.lbr_nr - 1;
        tos = intel_pmu_lbr_tos();
-       for (i = 0; i < tos; i++) {
+       for (i = 0; i < x86_pmu.lbr_nr; i++) {
                lbr_idx = (tos - i) & mask;
-               task_ctx->lbr_from[i] = rdlbr_from(lbr_idx);
+               from = rdlbr_from(lbr_idx);
+               if (!from)
+                       break;
+               task_ctx->lbr_from[i] = from;
                task_ctx->lbr_to[i]   = rdlbr_to(lbr_idx);
                if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
                        rdmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]);
        }
+       task_ctx->valid_lbrs = i;
        task_ctx->tos = tos;
        task_ctx->lbr_stack_state = LBR_VALID;
+
+       cpuc->last_task_ctx = task_ctx;
+       cpuc->last_log_id = ++task_ctx->log_id;
 }
 
 void intel_pmu_lbr_sched_task(struct perf_event_context *ctx, bool sched_in)
@@ -531,7 +566,7 @@ static void intel_pmu_lbr_read_32(struct cpu_hw_events *cpuc)
  */
 static void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc)
 {
-       bool need_info = false;
+       bool need_info = false, call_stack = false;
        unsigned long mask = x86_pmu.lbr_nr - 1;
        int lbr_format = x86_pmu.intel_cap.lbr_format;
        u64 tos = intel_pmu_lbr_tos();
@@ -542,7 +577,7 @@ static void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc)
        if (cpuc->lbr_sel) {
                need_info = !(cpuc->lbr_sel->config & LBR_NO_INFO);
                if (cpuc->lbr_sel->config & LBR_CALL_STACK)
-                       num = tos;
+                       call_stack = true;
        }
 
        for (i = 0; i < num; i++) {
@@ -555,6 +590,13 @@ static void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc)
                from = rdlbr_from(lbr_idx);
                to   = rdlbr_to(lbr_idx);
 
+               /*
+                * Read LBR call stack entries
+                * until invalid entry (0s) is detected.
+                */
+               if (call_stack && !from)
+                       break;
+
                if (lbr_format == LBR_FORMAT_INFO && need_info) {
                        u64 info;
 
index c9e1e0bef3c36d0dbfbca99b0cf257e33bfb17cd..e17ab885b1e928d17a671eb96f12cf21905bdb93 100644 (file)
@@ -28,7 +28,7 @@
 #define UNCORE_PCI_DEV_TYPE(data)      ((data >> 8) & 0xff)
 #define UNCORE_PCI_DEV_IDX(data)       (data & 0xff)
 #define UNCORE_EXTRA_PCI_DEV           0xff
-#define UNCORE_EXTRA_PCI_DEV_MAX       3
+#define UNCORE_EXTRA_PCI_DEV_MAX       4
 
 #define UNCORE_EVENT_CONSTRAINT(c, n) EVENT_CONSTRAINT(c, n, 0xff)
 
index 87dc0263a2e1e97bd210d1296a82a2330933f558..51d7c117e3c705f82136f422c553164f72e83c8c 100644 (file)
@@ -1029,6 +1029,7 @@ void snbep_uncore_cpu_init(void)
 enum {
        SNBEP_PCI_QPI_PORT0_FILTER,
        SNBEP_PCI_QPI_PORT1_FILTER,
+       BDX_PCI_QPI_PORT2_FILTER,
        HSWEP_PCI_PCU_3,
 };
 
@@ -3286,15 +3287,18 @@ static const struct pci_device_id bdx_uncore_pci_ids[] = {
        },
        { /* QPI Port 0 filter  */
                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f86),
-               .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, 0),
+               .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
+                                                  SNBEP_PCI_QPI_PORT0_FILTER),
        },
        { /* QPI Port 1 filter  */
                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f96),
-               .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, 1),
+               .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
+                                                  SNBEP_PCI_QPI_PORT1_FILTER),
        },
        { /* QPI Port 2 filter  */
                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f46),
-               .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, 2),
+               .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
+                                                  BDX_PCI_QPI_PORT2_FILTER),
        },
        { /* PCU.3 (for Capability registers) */
                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fc0),
index 9f3711470ec165b569cb345d0f17d5152d6bd733..156286335351a43b6692ab07747e97e1fb97b3b2 100644 (file)
@@ -163,6 +163,7 @@ struct intel_excl_cntrs {
        unsigned        core_id;        /* per-core: core id */
 };
 
+struct x86_perf_task_context;
 #define MAX_LBR_ENTRIES                32
 
 enum {
@@ -214,6 +215,8 @@ struct cpu_hw_events {
        struct perf_branch_entry        lbr_entries[MAX_LBR_ENTRIES];
        struct er_account               *lbr_sel;
        u64                             br_sel;
+       struct x86_perf_task_context    *last_task_ctx;
+       int                             last_log_id;
 
        /*
         * Intel host/guest exclude bits
@@ -648,8 +651,10 @@ struct x86_perf_task_context {
        u64 lbr_to[MAX_LBR_ENTRIES];
        u64 lbr_info[MAX_LBR_ENTRIES];
        int tos;
+       int valid_lbrs;
        int lbr_callstack_users;
        int lbr_stack_state;
+       int log_id;
 };
 
 #define x86_add_quirk(func_)                                           \
@@ -668,6 +673,7 @@ do {                                                                        \
 #define PMU_FL_HAS_RSP_1       0x2 /* has 2 equivalent offcore_rsp regs   */
 #define PMU_FL_EXCL_CNTRS      0x4 /* has exclusive counter requirements  */
 #define PMU_FL_EXCL_ENABLED    0x8 /* exclusive counter active */
+#define PMU_FL_PEBS_ALL                0x10 /* all events are valid PEBS events */
 
 #define EVENT_VAR(_id)  event_attr_##_id
 #define EVENT_PTR(_id) &event_attr_##_id.attr.attr
index f68855499391f4637532fa0de4c95ca844e6563e..5b0f613428c252291379e9bbb7cc664c929ce07f 100644 (file)
@@ -31,6 +31,8 @@
 #include <asm/mshyperv.h>
 #include <asm/apic.h>
 
+#include <asm/trace/hyperv.h>
+
 static struct apic orig_apic;
 
 static u64 hv_apic_icr_read(void)
@@ -99,6 +101,9 @@ static bool __send_ipi_mask_ex(const struct cpumask *mask, int vector)
        int nr_bank = 0;
        int ret = 1;
 
+       if (!(ms_hyperv.hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED))
+               return false;
+
        local_irq_save(flags);
        arg = (struct ipi_arg_ex **)this_cpu_ptr(hyperv_pcpu_input_arg);
 
@@ -114,6 +119,8 @@ static bool __send_ipi_mask_ex(const struct cpumask *mask, int vector)
                ipi_arg->vp_set.format = HV_GENERIC_SET_SPARSE_4K;
                nr_bank = cpumask_to_vpset(&(ipi_arg->vp_set), mask);
        }
+       if (nr_bank < 0)
+               goto ipi_mask_ex_done;
        if (!nr_bank)
                ipi_arg->vp_set.format = HV_GENERIC_SET_ALL;
 
@@ -128,10 +135,10 @@ ipi_mask_ex_done:
 static bool __send_ipi_mask(const struct cpumask *mask, int vector)
 {
        int cur_cpu, vcpu;
-       struct ipi_arg_non_ex **arg;
-       struct ipi_arg_non_ex *ipi_arg;
+       struct ipi_arg_non_ex ipi_arg;
        int ret = 1;
-       unsigned long flags;
+
+       trace_hyperv_send_ipi_mask(mask, vector);
 
        if (cpumask_empty(mask))
                return true;
@@ -142,37 +149,43 @@ static bool __send_ipi_mask(const struct cpumask *mask, int vector)
        if ((vector < HV_IPI_LOW_VECTOR) || (vector > HV_IPI_HIGH_VECTOR))
                return false;
 
-       if ((ms_hyperv.hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED))
-               return __send_ipi_mask_ex(mask, vector);
-
-       local_irq_save(flags);
-       arg = (struct ipi_arg_non_ex **)this_cpu_ptr(hyperv_pcpu_input_arg);
-
-       ipi_arg = *arg;
-       if (unlikely(!ipi_arg))
-               goto ipi_mask_done;
-
-       ipi_arg->vector = vector;
-       ipi_arg->reserved = 0;
-       ipi_arg->cpu_mask = 0;
+       /*
+        * From the supplied CPU set we need to figure out if we can get away
+        * with cheaper HVCALL_SEND_IPI hypercall. This is possible when the
+        * highest VP number in the set is < 64. As VP numbers are usually in
+        * ascending order and match Linux CPU ids, here is an optimization:
+        * we check the VP number for the highest bit in the supplied set first
+        * so we can quickly find out if using HVCALL_SEND_IPI_EX hypercall is
+        * a must. We will also check all VP numbers when walking the supplied
+        * CPU set to remain correct in all cases.
+        */
+       if (hv_cpu_number_to_vp_number(cpumask_last(mask)) >= 64)
+               goto do_ex_hypercall;
+
+       ipi_arg.vector = vector;
+       ipi_arg.cpu_mask = 0;
 
        for_each_cpu(cur_cpu, mask) {
                vcpu = hv_cpu_number_to_vp_number(cur_cpu);
+               if (vcpu == VP_INVAL)
+                       return false;
+
                /*
                 * This particular version of the IPI hypercall can
                 * only target upto 64 CPUs.
                 */
                if (vcpu >= 64)
-                       goto ipi_mask_done;
+                       goto do_ex_hypercall;
 
-               __set_bit(vcpu, (unsigned long *)&ipi_arg->cpu_mask);
+               __set_bit(vcpu, (unsigned long *)&ipi_arg.cpu_mask);
        }
 
-       ret = hv_do_hypercall(HVCALL_SEND_IPI, ipi_arg, NULL);
-
-ipi_mask_done:
-       local_irq_restore(flags);
+       ret = hv_do_fast_hypercall16(HVCALL_SEND_IPI, ipi_arg.vector,
+                                    ipi_arg.cpu_mask);
        return ((ret == 0) ? true : false);
+
+do_ex_hypercall:
+       return __send_ipi_mask_ex(mask, vector);
 }
 
 static bool __send_ipi_one(int cpu, int vector)
@@ -228,10 +241,7 @@ static void hv_send_ipi_self(int vector)
 void __init hv_apic_init(void)
 {
        if (ms_hyperv.hints & HV_X64_CLUSTER_IPI_RECOMMENDED) {
-               if ((ms_hyperv.hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED))
-                       pr_info("Hyper-V: Using ext hypercalls for IPI\n");
-               else
-                       pr_info("Hyper-V: Using IPI hypercalls\n");
+               pr_info("Hyper-V: Using IPI hypercalls\n");
                /*
                 * Set the IPI entry points.
                 */
index 4c431e1c1effc42ade651f9a69c3e06136827e17..1ff420217298edf88648d35b56cefb73b8beb788 100644 (file)
@@ -265,7 +265,7 @@ void __init hyperv_init(void)
 {
        u64 guest_id, required_msrs;
        union hv_x64_msr_hypercall_contents hypercall_msr;
-       int cpuhp;
+       int cpuhp, i;
 
        if (x86_hyper_type != X86_HYPER_MS_HYPERV)
                return;
@@ -293,6 +293,9 @@ void __init hyperv_init(void)
        if (!hv_vp_index)
                return;
 
+       for (i = 0; i < num_possible_cpus(); i++)
+               hv_vp_index[i] = VP_INVAL;
+
        hv_vp_assist_page = kcalloc(num_possible_cpus(),
                                    sizeof(*hv_vp_assist_page), GFP_KERNEL);
        if (!hv_vp_assist_page) {
index de27615c51ea3fe951c2d2065734b5a79693c00a..1147e1fed7fff45e28e9c18e59e81542d1647d7f 100644 (file)
@@ -16,6 +16,8 @@
 /* Each gva in gva_list encodes up to 4096 pages to flush */
 #define HV_TLB_FLUSH_UNIT (4096 * PAGE_SIZE)
 
+static u64 hyperv_flush_tlb_others_ex(const struct cpumask *cpus,
+                                     const struct flush_tlb_info *info);
 
 /*
  * Fills in gva_list starting from offset. Returns the number of items added.
@@ -93,10 +95,29 @@ static void hyperv_flush_tlb_others(const struct cpumask *cpus,
        if (cpumask_equal(cpus, cpu_present_mask)) {
                flush->flags |= HV_FLUSH_ALL_PROCESSORS;
        } else {
+               /*
+                * From the supplied CPU set we need to figure out if we can get
+                * away with cheaper HVCALL_FLUSH_VIRTUAL_ADDRESS_{LIST,SPACE}
+                * hypercalls. This is possible when the highest VP number in
+                * the set is < 64. As VP numbers are usually in ascending order
+                * and match Linux CPU ids, here is an optimization: we check
+                * the VP number for the highest bit in the supplied set first
+                * so we can quickly find out if using *_EX hypercalls is a
+                * must. We will also check all VP numbers when walking the
+                * supplied CPU set to remain correct in all cases.
+                */
+               if (hv_cpu_number_to_vp_number(cpumask_last(cpus)) >= 64)
+                       goto do_ex_hypercall;
+
                for_each_cpu(cpu, cpus) {
                        vcpu = hv_cpu_number_to_vp_number(cpu);
-                       if (vcpu >= 64)
+                       if (vcpu == VP_INVAL) {
+                               local_irq_restore(flags);
                                goto do_native;
+                       }
+
+                       if (vcpu >= 64)
+                               goto do_ex_hypercall;
 
                        __set_bit(vcpu, (unsigned long *)
                                  &flush->processor_mask);
@@ -123,7 +144,12 @@ static void hyperv_flush_tlb_others(const struct cpumask *cpus,
                status = hv_do_rep_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST,
                                             gva_n, 0, flush, NULL);
        }
+       goto check_status;
+
+do_ex_hypercall:
+       status = hyperv_flush_tlb_others_ex(cpus, info);
 
+check_status:
        local_irq_restore(flags);
 
        if (!(status & HV_HYPERCALL_RESULT_MASK))
@@ -132,35 +158,22 @@ do_native:
        native_flush_tlb_others(cpus, info);
 }
 
-static void hyperv_flush_tlb_others_ex(const struct cpumask *cpus,
-                                      const struct flush_tlb_info *info)
+static u64 hyperv_flush_tlb_others_ex(const struct cpumask *cpus,
+                                     const struct flush_tlb_info *info)
 {
        int nr_bank = 0, max_gvas, gva_n;
        struct hv_tlb_flush_ex **flush_pcpu;
        struct hv_tlb_flush_ex *flush;
-       u64 status = U64_MAX;
-       unsigned long flags;
+       u64 status;
 
-       trace_hyperv_mmu_flush_tlb_others(cpus, info);
-
-       if (!hv_hypercall_pg)
-               goto do_native;
-
-       if (cpumask_empty(cpus))
-               return;
-
-       local_irq_save(flags);
+       if (!(ms_hyperv.hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED))
+               return U64_MAX;
 
        flush_pcpu = (struct hv_tlb_flush_ex **)
                     this_cpu_ptr(hyperv_pcpu_input_arg);
 
        flush = *flush_pcpu;
 
-       if (unlikely(!flush)) {
-               local_irq_restore(flags);
-               goto do_native;
-       }
-
        if (info->mm) {
                /*
                 * AddressSpace argument must match the CR3 with PCID bits
@@ -176,15 +189,10 @@ static void hyperv_flush_tlb_others_ex(const struct cpumask *cpus,
 
        flush->hv_vp_set.valid_bank_mask = 0;
 
-       if (!cpumask_equal(cpus, cpu_present_mask)) {
-               flush->hv_vp_set.format = HV_GENERIC_SET_SPARSE_4K;
-               nr_bank = cpumask_to_vpset(&(flush->hv_vp_set), cpus);
-       }
-
-       if (!nr_bank) {
-               flush->hv_vp_set.format = HV_GENERIC_SET_ALL;
-               flush->flags |= HV_FLUSH_ALL_PROCESSORS;
-       }
+       flush->hv_vp_set.format = HV_GENERIC_SET_SPARSE_4K;
+       nr_bank = cpumask_to_vpset(&(flush->hv_vp_set), cpus);
+       if (nr_bank < 0)
+               return U64_MAX;
 
        /*
         * We can flush not more than max_gvas with one hypercall. Flush the
@@ -213,12 +221,7 @@ static void hyperv_flush_tlb_others_ex(const struct cpumask *cpus,
                        gva_n, nr_bank, flush, NULL);
        }
 
-       local_irq_restore(flags);
-
-       if (!(status & HV_HYPERCALL_RESULT_MASK))
-               return;
-do_native:
-       native_flush_tlb_others(cpus, info);
+       return status;
 }
 
 void hyperv_setup_mmu_ops(void)
@@ -226,11 +229,6 @@ void hyperv_setup_mmu_ops(void)
        if (!(ms_hyperv.hints & HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED))
                return;
 
-       if (!(ms_hyperv.hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED)) {
-               pr_info("Using hypercall for remote TLB flush\n");
-               pv_mmu_ops.flush_tlb_others = hyperv_flush_tlb_others;
-       } else {
-               pr_info("Using ext hypercall for remote TLB flush\n");
-               pv_mmu_ops.flush_tlb_others = hyperv_flush_tlb_others_ex;
-       }
+       pr_info("Using hypercall for remote TLB flush\n");
+       pv_mmu_ops.flush_tlb_others = hyperv_flush_tlb_others;
 }
index c356098b6fb92b8ff7d42b2fd813c2a8551d3db1..4d4015ddcf2633e9e8388216f9e9c8639e2eced8 100644 (file)
@@ -7,8 +7,6 @@
 #ifndef _ASM_X86_MACH_DEFAULT_APM_H
 #define _ASM_X86_MACH_DEFAULT_APM_H
 
-#include <asm/nospec-branch.h>
-
 #ifdef APM_ZERO_SEGS
 #      define APM_DO_ZERO_SEGS \
                "pushl %%ds\n\t" \
@@ -34,7 +32,6 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
         * N.B. We do NOT need a cld after the BIOS call
         * because we always save and restore the flags.
         */
-       firmware_restrict_branch_speculation_start();
        __asm__ __volatile__(APM_DO_ZERO_SEGS
                "pushl %%edi\n\t"
                "pushl %%ebp\n\t"
@@ -47,7 +44,6 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
                  "=S" (*esi)
                : "a" (func), "b" (ebx_in), "c" (ecx_in)
                : "memory", "cc");
-       firmware_restrict_branch_speculation_end();
 }
 
 static inline bool apm_bios_call_simple_asm(u32 func, u32 ebx_in,
@@ -60,7 +56,6 @@ static inline bool apm_bios_call_simple_asm(u32 func, u32 ebx_in,
         * N.B. We do NOT need a cld after the BIOS call
         * because we always save and restore the flags.
         */
-       firmware_restrict_branch_speculation_start();
        __asm__ __volatile__(APM_DO_ZERO_SEGS
                "pushl %%edi\n\t"
                "pushl %%ebp\n\t"
@@ -73,7 +68,6 @@ static inline bool apm_bios_call_simple_asm(u32 func, u32 ebx_in,
                  "=S" (si)
                : "a" (func), "b" (ebx_in), "c" (ecx_in)
                : "memory", "cc");
-       firmware_restrict_branch_speculation_end();
        return error;
 }
 
index 219faaec51dfa192f69d8893c8844219c0c89029..990770f9e76b5a52af6f85692883a8507af00af6 100644 (file)
 #define _ASM_SI                __ASM_REG(si)
 #define _ASM_DI                __ASM_REG(di)
 
+#ifndef __x86_64__
+/* 32 bit */
+
+#define _ASM_ARG1      _ASM_AX
+#define _ASM_ARG2      _ASM_DX
+#define _ASM_ARG3      _ASM_CX
+
+#define _ASM_ARG1L     eax
+#define _ASM_ARG2L     edx
+#define _ASM_ARG3L     ecx
+
+#define _ASM_ARG1W     ax
+#define _ASM_ARG2W     dx
+#define _ASM_ARG3W     cx
+
+#define _ASM_ARG1B     al
+#define _ASM_ARG2B     dl
+#define _ASM_ARG3B     cl
+
+#else
+/* 64 bit */
+
+#define _ASM_ARG1      _ASM_DI
+#define _ASM_ARG2      _ASM_SI
+#define _ASM_ARG3      _ASM_DX
+#define _ASM_ARG4      _ASM_CX
+#define _ASM_ARG5      r8
+#define _ASM_ARG6      r9
+
+#define _ASM_ARG1Q     rdi
+#define _ASM_ARG2Q     rsi
+#define _ASM_ARG3Q     rdx
+#define _ASM_ARG4Q     rcx
+#define _ASM_ARG5Q     r8
+#define _ASM_ARG6Q     r9
+
+#define _ASM_ARG1L     edi
+#define _ASM_ARG2L     esi
+#define _ASM_ARG3L     edx
+#define _ASM_ARG4L     ecx
+#define _ASM_ARG5L     r8d
+#define _ASM_ARG6L     r9d
+
+#define _ASM_ARG1W     di
+#define _ASM_ARG2W     si
+#define _ASM_ARG3W     dx
+#define _ASM_ARG4W     cx
+#define _ASM_ARG5W     r8w
+#define _ASM_ARG6W     r9w
+
+#define _ASM_ARG1B     dil
+#define _ASM_ARG2B     sil
+#define _ASM_ARG3B     dl
+#define _ASM_ARG4B     cl
+#define _ASM_ARG5B     r8b
+#define _ASM_ARG6B     r9b
+
+#endif
+
 /*
  * Macros to generate condition code outputs from inline assembly,
  * The output operand must be type "bool".
index 0db6bec95489ebc703df2650e98c7f624cc7bff9..b143717b92b3447c480495e23be68470bc7bf463 100644 (file)
@@ -80,6 +80,7 @@ static __always_inline void arch_atomic_sub(int i, atomic_t *v)
  * true if the result is zero, or false for all
  * other cases.
  */
+#define arch_atomic_sub_and_test arch_atomic_sub_and_test
 static __always_inline bool arch_atomic_sub_and_test(int i, atomic_t *v)
 {
        GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, "er", i, "%0", e);
@@ -91,6 +92,7 @@ static __always_inline bool arch_atomic_sub_and_test(int i, atomic_t *v)
  *
  * Atomically increments @v by 1.
  */
+#define arch_atomic_inc arch_atomic_inc
 static __always_inline void arch_atomic_inc(atomic_t *v)
 {
        asm volatile(LOCK_PREFIX "incl %0"
@@ -103,6 +105,7 @@ static __always_inline void arch_atomic_inc(atomic_t *v)
  *
  * Atomically decrements @v by 1.
  */
+#define arch_atomic_dec arch_atomic_dec
 static __always_inline void arch_atomic_dec(atomic_t *v)
 {
        asm volatile(LOCK_PREFIX "decl %0"
@@ -117,6 +120,7 @@ static __always_inline void arch_atomic_dec(atomic_t *v)
  * returns true if the result is 0, or false for all other
  * cases.
  */
+#define arch_atomic_dec_and_test arch_atomic_dec_and_test
 static __always_inline bool arch_atomic_dec_and_test(atomic_t *v)
 {
        GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, "%0", e);
@@ -130,6 +134,7 @@ static __always_inline bool arch_atomic_dec_and_test(atomic_t *v)
  * and returns true if the result is zero, or false for all
  * other cases.
  */
+#define arch_atomic_inc_and_test arch_atomic_inc_and_test
 static __always_inline bool arch_atomic_inc_and_test(atomic_t *v)
 {
        GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, "%0", e);
@@ -144,6 +149,7 @@ static __always_inline bool arch_atomic_inc_and_test(atomic_t *v)
  * if the result is negative, or false when
  * result is greater than or equal to zero.
  */
+#define arch_atomic_add_negative arch_atomic_add_negative
 static __always_inline bool arch_atomic_add_negative(int i, atomic_t *v)
 {
        GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, "er", i, "%0", s);
@@ -173,9 +179,6 @@ static __always_inline int arch_atomic_sub_return(int i, atomic_t *v)
        return arch_atomic_add_return(-i, v);
 }
 
-#define arch_atomic_inc_return(v)  (arch_atomic_add_return(1, v))
-#define arch_atomic_dec_return(v)  (arch_atomic_sub_return(1, v))
-
 static __always_inline int arch_atomic_fetch_add(int i, atomic_t *v)
 {
        return xadd(&v->counter, i);
@@ -199,7 +202,7 @@ static __always_inline bool arch_atomic_try_cmpxchg(atomic_t *v, int *old, int n
 
 static inline int arch_atomic_xchg(atomic_t *v, int new)
 {
-       return xchg(&v->counter, new);
+       return arch_xchg(&v->counter, new);
 }
 
 static inline void arch_atomic_and(int i, atomic_t *v)
@@ -253,27 +256,6 @@ static inline int arch_atomic_fetch_xor(int i, atomic_t *v)
        return val;
 }
 
-/**
- * __arch_atomic_add_unless - add unless the number is already a given value
- * @v: pointer of type atomic_t
- * @a: the amount to add to v...
- * @u: ...unless v is equal to u.
- *
- * Atomically adds @a to @v, so long as @v was not already @u.
- * Returns the old value of @v.
- */
-static __always_inline int __arch_atomic_add_unless(atomic_t *v, int a, int u)
-{
-       int c = arch_atomic_read(v);
-
-       do {
-               if (unlikely(c == u))
-                       break;
-       } while (!arch_atomic_try_cmpxchg(v, &c, c + a));
-
-       return c;
-}
-
 #ifdef CONFIG_X86_32
 # include <asm/atomic64_32.h>
 #else
index 92212bf0484fecdd3922fefd3ab16c71e17b476f..ef959f02d0702b8d16979f1dd3ad6c1f405363e9 100644 (file)
@@ -158,6 +158,7 @@ static inline long long arch_atomic64_inc_return(atomic64_t *v)
                             "S" (v) : "memory", "ecx");
        return a;
 }
+#define arch_atomic64_inc_return arch_atomic64_inc_return
 
 static inline long long arch_atomic64_dec_return(atomic64_t *v)
 {
@@ -166,6 +167,7 @@ static inline long long arch_atomic64_dec_return(atomic64_t *v)
                             "S" (v) : "memory", "ecx");
        return a;
 }
+#define arch_atomic64_dec_return arch_atomic64_dec_return
 
 /**
  * arch_atomic64_add - add integer to atomic64 variable
@@ -197,26 +199,13 @@ static inline long long arch_atomic64_sub(long long i, atomic64_t *v)
        return i;
 }
 
-/**
- * arch_atomic64_sub_and_test - subtract value from variable and test result
- * @i: integer value to subtract
- * @v: pointer to type atomic64_t
- *
- * Atomically subtracts @i from @v and returns
- * true if the result is zero, or false for all
- * other cases.
- */
-static inline int arch_atomic64_sub_and_test(long long i, atomic64_t *v)
-{
-       return arch_atomic64_sub_return(i, v) == 0;
-}
-
 /**
  * arch_atomic64_inc - increment atomic64 variable
  * @v: pointer to type atomic64_t
  *
  * Atomically increments @v by 1.
  */
+#define arch_atomic64_inc arch_atomic64_inc
 static inline void arch_atomic64_inc(atomic64_t *v)
 {
        __alternative_atomic64(inc, inc_return, /* no output */,
@@ -229,52 +218,13 @@ static inline void arch_atomic64_inc(atomic64_t *v)
  *
  * Atomically decrements @v by 1.
  */
+#define arch_atomic64_dec arch_atomic64_dec
 static inline void arch_atomic64_dec(atomic64_t *v)
 {
        __alternative_atomic64(dec, dec_return, /* no output */,
                               "S" (v) : "memory", "eax", "ecx", "edx");
 }
 
-/**
- * arch_atomic64_dec_and_test - decrement and test
- * @v: pointer to type atomic64_t
- *
- * Atomically decrements @v by 1 and
- * returns true if the result is 0, or false for all other
- * cases.
- */
-static inline int arch_atomic64_dec_and_test(atomic64_t *v)
-{
-       return arch_atomic64_dec_return(v) == 0;
-}
-
-/**
- * atomic64_inc_and_test - increment and test
- * @v: pointer to type atomic64_t
- *
- * Atomically increments @v by 1
- * and returns true if the result is zero, or false for all
- * other cases.
- */
-static inline int arch_atomic64_inc_and_test(atomic64_t *v)
-{
-       return arch_atomic64_inc_return(v) == 0;
-}
-
-/**
- * arch_atomic64_add_negative - add and test if negative
- * @i: integer value to add
- * @v: pointer to type atomic64_t
- *
- * Atomically adds @i to @v and returns true
- * if the result is negative, or false when
- * result is greater than or equal to zero.
- */
-static inline int arch_atomic64_add_negative(long long i, atomic64_t *v)
-{
-       return arch_atomic64_add_return(i, v) < 0;
-}
-
 /**
  * arch_atomic64_add_unless - add unless the number is a given value
  * @v: pointer of type atomic64_t
@@ -295,7 +245,7 @@ static inline int arch_atomic64_add_unless(atomic64_t *v, long long a,
        return (int)a;
 }
 
-
+#define arch_atomic64_inc_not_zero arch_atomic64_inc_not_zero
 static inline int arch_atomic64_inc_not_zero(atomic64_t *v)
 {
        int r;
@@ -304,6 +254,7 @@ static inline int arch_atomic64_inc_not_zero(atomic64_t *v)
        return r;
 }
 
+#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
 static inline long long arch_atomic64_dec_if_positive(atomic64_t *v)
 {
        long long r;
index 6106b59d326066f869cc105939c7fb18dfa51310..4343d9b4f30e32056d5f6d5d1d6a273b556a3bba 100644 (file)
@@ -71,6 +71,7 @@ static inline void arch_atomic64_sub(long i, atomic64_t *v)
  * true if the result is zero, or false for all
  * other cases.
  */
+#define arch_atomic64_sub_and_test arch_atomic64_sub_and_test
 static inline bool arch_atomic64_sub_and_test(long i, atomic64_t *v)
 {
        GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, "er", i, "%0", e);
@@ -82,6 +83,7 @@ static inline bool arch_atomic64_sub_and_test(long i, atomic64_t *v)
  *
  * Atomically increments @v by 1.
  */
+#define arch_atomic64_inc arch_atomic64_inc
 static __always_inline void arch_atomic64_inc(atomic64_t *v)
 {
        asm volatile(LOCK_PREFIX "incq %0"
@@ -95,6 +97,7 @@ static __always_inline void arch_atomic64_inc(atomic64_t *v)
  *
  * Atomically decrements @v by 1.
  */
+#define arch_atomic64_dec arch_atomic64_dec
 static __always_inline void arch_atomic64_dec(atomic64_t *v)
 {
        asm volatile(LOCK_PREFIX "decq %0"
@@ -110,6 +113,7 @@ static __always_inline void arch_atomic64_dec(atomic64_t *v)
  * returns true if the result is 0, or false for all other
  * cases.
  */
+#define arch_atomic64_dec_and_test arch_atomic64_dec_and_test
 static inline bool arch_atomic64_dec_and_test(atomic64_t *v)
 {
        GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, "%0", e);
@@ -123,6 +127,7 @@ static inline bool arch_atomic64_dec_and_test(atomic64_t *v)
  * and returns true if the result is zero, or false for all
  * other cases.
  */
+#define arch_atomic64_inc_and_test arch_atomic64_inc_and_test
 static inline bool arch_atomic64_inc_and_test(atomic64_t *v)
 {
        GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, "%0", e);
@@ -137,6 +142,7 @@ static inline bool arch_atomic64_inc_and_test(atomic64_t *v)
  * if the result is negative, or false when
  * result is greater than or equal to zero.
  */
+#define arch_atomic64_add_negative arch_atomic64_add_negative
 static inline bool arch_atomic64_add_negative(long i, atomic64_t *v)
 {
        GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, "er", i, "%0", s);
@@ -169,9 +175,6 @@ static inline long arch_atomic64_fetch_sub(long i, atomic64_t *v)
        return xadd(&v->counter, -i);
 }
 
-#define arch_atomic64_inc_return(v)  (arch_atomic64_add_return(1, (v)))
-#define arch_atomic64_dec_return(v)  (arch_atomic64_sub_return(1, (v)))
-
 static inline long arch_atomic64_cmpxchg(atomic64_t *v, long old, long new)
 {
        return arch_cmpxchg(&v->counter, old, new);
@@ -185,46 +188,7 @@ static __always_inline bool arch_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, l
 
 static inline long arch_atomic64_xchg(atomic64_t *v, long new)
 {
-       return xchg(&v->counter, new);
-}
-
-/**
- * arch_atomic64_add_unless - add unless the number is a given value
- * @v: pointer of type atomic64_t
- * @a: the amount to add to v...
- * @u: ...unless v is equal to u.
- *
- * Atomically adds @a to @v, so long as it was not @u.
- * Returns the old value of @v.
- */
-static inline bool arch_atomic64_add_unless(atomic64_t *v, long a, long u)
-{
-       s64 c = arch_atomic64_read(v);
-       do {
-               if (unlikely(c == u))
-                       return false;
-       } while (!arch_atomic64_try_cmpxchg(v, &c, c + a));
-       return true;
-}
-
-#define arch_atomic64_inc_not_zero(v) arch_atomic64_add_unless((v), 1, 0)
-
-/*
- * arch_atomic64_dec_if_positive - decrement by 1 if old value positive
- * @v: pointer of type atomic_t
- *
- * The function returns the old value of *v minus 1, even if
- * the atomic variable, v, was not decremented.
- */
-static inline long arch_atomic64_dec_if_positive(atomic64_t *v)
-{
-       s64 dec, c = arch_atomic64_read(v);
-       do {
-               dec = c - 1;
-               if (unlikely(dec < 0))
-                       break;
-       } while (!arch_atomic64_try_cmpxchg(v, &c, dec));
-       return dec;
+       return arch_xchg(&v->counter, new);
 }
 
 static inline void arch_atomic64_and(long i, atomic64_t *v)
index 042b5e892ed1063769b253bdf35e31171eb55c4d..14de0432d288414bd1437e44b8cb13facc6f12e9 100644 (file)
@@ -38,7 +38,7 @@ static inline unsigned long array_index_mask_nospec(unsigned long index,
 {
        unsigned long mask;
 
-       asm ("cmp %1,%2; sbb %0,%0;"
+       asm volatile ("cmp %1,%2; sbb %0,%0;"
                        :"=r" (mask)
                        :"g"(size),"r" (index)
                        :"cc");
index e3efd8a0606610d379b77fc413ec7c8d4f6953fd..a55d79b233d334df17941f60ba78c6b8f5acd04d 100644 (file)
@@ -75,7 +75,7 @@ extern void __add_wrong_size(void)
  * use "asm volatile" and "memory" clobbers to prevent gcc from moving
  * information around.
  */
-#define xchg(ptr, v)   __xchg_op((ptr), (v), xchg, "")
+#define arch_xchg(ptr, v)      __xchg_op((ptr), (v), xchg, "")
 
 /*
  * Atomic compare and exchange.  Compare OLD with MEM, if identical,
index bfca3b346c746eaa04f0fa9e892524bd99e35177..072e5459fe2fe33cbcd450fbdc8e3030f075addf 100644 (file)
@@ -10,13 +10,13 @@ static inline void set_64bit(volatile u64 *ptr, u64 val)
 #define arch_cmpxchg64(ptr, o, n)                                      \
 ({                                                                     \
        BUILD_BUG_ON(sizeof(*(ptr)) != 8);                              \
-       cmpxchg((ptr), (o), (n));                                       \
+       arch_cmpxchg((ptr), (o), (n));                                  \
 })
 
 #define arch_cmpxchg64_local(ptr, o, n)                                        \
 ({                                                                     \
        BUILD_BUG_ON(sizeof(*(ptr)) != 8);                              \
-       cmpxchg_local((ptr), (o), (n));                                 \
+       arch_cmpxchg_local((ptr), (o), (n));                            \
 })
 
 #define system_has_cmpxchg_double() boot_cpu_has(X86_FEATURE_CX16)
index 5701f5cecd3125fbce64ead21d89d02fc8fa25af..b5c60faf8429397f7f32decfb1569c59f487a9c6 100644 (file)
 #define X86_FEATURE_IBPB               ( 7*32+26) /* Indirect Branch Prediction Barrier */
 #define X86_FEATURE_STIBP              ( 7*32+27) /* Single Thread Indirect Branch Predictors */
 #define X86_FEATURE_ZEN                        ( 7*32+28) /* "" CPU is AMD family 0x17 (Zen) */
+#define X86_FEATURE_IBRS_ENHANCED      ( 7*32+29) /* Enhanced IBRS */
 
 /* Virtualization flags: Linux defined, word 8 */
 #define X86_FEATURE_TPR_SHADOW         ( 8*32+ 0) /* Intel TPR Shadow */
 
 #define X86_FEATURE_VMMCALL            ( 8*32+15) /* Prefer VMMCALL to VMCALL */
 #define X86_FEATURE_XENPV              ( 8*32+16) /* "" Xen paravirtual guest */
-
+#define X86_FEATURE_EPT_AD             ( 8*32+17) /* Intel Extended Page Table access-dirty bit */
 
 /* Intel-defined CPU features, CPUID level 0x00000007:0 (EBX), word 9 */
 #define X86_FEATURE_FSGSBASE           ( 9*32+ 0) /* RDFSBASE, WRFSBASE, RDGSBASE, WRGSBASE instructions*/
index f59c39835a5a93c8d12e48f335459268b229e4af..a1f0e90d0818054c2bfb886fff380cbf336c5929 100644 (file)
@@ -49,11 +49,14 @@ static inline int hw_breakpoint_slots(int type)
        return HBP_NUM;
 }
 
+struct perf_event_attr;
 struct perf_event;
 struct pmu;
 
-extern int arch_check_bp_in_kernelspace(struct perf_event *bp);
-extern int arch_validate_hwbkpt_settings(struct perf_event *bp);
+extern int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw);
+extern int hw_breakpoint_arch_parse(struct perf_event *bp,
+                                   const struct perf_event_attr *attr,
+                                   struct arch_hw_breakpoint *hw);
 extern int hw_breakpoint_exceptions_notify(struct notifier_block *unused,
                                           unsigned long val, void *data);
 
index cf090e584202259e5480af9e3248aa03f6f79873..7ed08a7c3398dc087a580abe82dd31761a8478f0 100644 (file)
 #define INTEL_FAM6_XEON_PHI_KNL                0x57 /* Knights Landing */
 #define INTEL_FAM6_XEON_PHI_KNM                0x85 /* Knights Mill */
 
+/* Useful macros */
+#define INTEL_CPU_FAM_ANY(_family, _model, _driver_data)       \
+{                                                              \
+       .vendor         = X86_VENDOR_INTEL,                     \
+       .family         = _family,                              \
+       .model          = _model,                               \
+       .feature        = X86_FEATURE_ANY,                      \
+       .driver_data    = (kernel_ulong_t)&_driver_data         \
+}
+
+#define INTEL_CPU_FAM6(_model, _driver_data)                   \
+       INTEL_CPU_FAM_ANY(6, INTEL_FAM6_##_model, _driver_data)
+
 #endif /* _ASM_X86_INTEL_FAMILY_H */
index fe04491130aef0095aa6481fb93fe013415ac6f3..52f815a80539e8514c3ce614e9726445edb0d2e3 100644 (file)
@@ -80,35 +80,6 @@ enum intel_mid_cpu_type {
 
 extern enum intel_mid_cpu_type __intel_mid_cpu_chip;
 
-/**
- * struct intel_mid_ops - Interface between intel-mid & sub archs
- * @arch_setup: arch_setup function to re-initialize platform
- *             structures (x86_init, x86_platform_init)
- *
- * This structure can be extended if any new interface is required
- * between intel-mid & its sub arch files.
- */
-struct intel_mid_ops {
-       void (*arch_setup)(void);
-};
-
-/* Helper API's for INTEL_MID_OPS_INIT */
-#define DECLARE_INTEL_MID_OPS_INIT(cpuname, cpuid)                             \
-       [cpuid] = get_##cpuname##_ops
-
-/* Maximum number of CPU ops */
-#define MAX_CPU_OPS(a)                 (sizeof(a)/sizeof(void *))
-
-/*
- * For every new cpu addition, a weak get_<cpuname>_ops() function needs be
- * declared in arch/x86/platform/intel_mid/intel_mid_weak_decls.h.
- */
-#define INTEL_MID_OPS_INIT {                                                   \
-       DECLARE_INTEL_MID_OPS_INIT(penwell, INTEL_MID_CPU_CHIP_PENWELL),        \
-       DECLARE_INTEL_MID_OPS_INIT(cloverview, INTEL_MID_CPU_CHIP_CLOVERVIEW),  \
-       DECLARE_INTEL_MID_OPS_INIT(tangier, INTEL_MID_CPU_CHIP_TANGIER)         \
-};
-
 #ifdef CONFIG_X86_INTEL_MID
 
 static inline enum intel_mid_cpu_type intel_mid_identify_cpu(void)
@@ -136,20 +107,6 @@ enum intel_mid_timer_options {
 
 extern enum intel_mid_timer_options intel_mid_timer_options;
 
-/*
- * Penwell uses spread spectrum clock, so the freq number is not exactly
- * the same as reported by MSR based on SDM.
- */
-#define FSB_FREQ_83SKU                 83200
-#define FSB_FREQ_100SKU                        99840
-#define FSB_FREQ_133SKU                        133000
-
-#define FSB_FREQ_167SKU                        167000
-#define FSB_FREQ_200SKU                        200000
-#define FSB_FREQ_267SKU                        267000
-#define FSB_FREQ_333SKU                        333000
-#define FSB_FREQ_400SKU                        400000
-
 /* Bus Select SoC Fuse value */
 #define BSEL_SOC_FUSE_MASK             0x7
 /* FSB 133MHz */
index 62a9f4966b4298ec2f3aa6c32d0881bd4296956c..ae26df1c27896d20d25ba18555d14625b905077a 100644 (file)
@@ -8,6 +8,7 @@
 
 /* The maximal number of PEBS events: */
 #define MAX_PEBS_EVENTS                8
+#define MAX_FIXED_PEBS_EVENTS  3
 
 /*
  * A debug store configuration.
@@ -23,7 +24,7 @@ struct debug_store {
        u64     pebs_index;
        u64     pebs_absolute_maximum;
        u64     pebs_interrupt_threshold;
-       u64     pebs_event_reset[MAX_PEBS_EVENTS];
+       u64     pebs_event_reset[MAX_PEBS_EVENTS + MAX_FIXED_PEBS_EVENTS];
 } __aligned(PAGE_SIZE);
 
 DECLARE_PER_CPU_PAGE_ALIGNED(struct debug_store, cpu_debug_store);
index 89f08955fff733c688a5ce4f4a0b8d74050ee617..c14f2a74b2be7495f1ee00c92322a58cc43d10a6 100644 (file)
@@ -13,7 +13,9 @@
  * Interrupt control:
  */
 
-static inline unsigned long native_save_fl(void)
+/* Declaration required for gcc < 4.9 to prevent -Werror=missing-prototypes */
+extern inline unsigned long native_save_fl(void);
+extern inline unsigned long native_save_fl(void)
 {
        unsigned long flags;
 
index 367d99cff42673a68b2658d15414ba649bbd941e..c8cec1b39b88f8fbe221f9fdca67047cf8ec295a 100644 (file)
@@ -78,7 +78,7 @@ struct arch_specific_insn {
         * boostable = true: This instruction has been boosted: we have
         * added a relative jump after the instruction copy in insn,
         * so no single-step and fixup are needed (unless there's
-        * a post_handler or break_handler).
+        * a post_handler).
         */
        bool boostable;
        bool if_modifier;
@@ -111,9 +111,6 @@ struct kprobe_ctlblk {
        unsigned long kprobe_status;
        unsigned long kprobe_old_flags;
        unsigned long kprobe_saved_flags;
-       unsigned long *jprobe_saved_sp;
-       struct pt_regs jprobe_saved_regs;
-       kprobe_opcode_t jprobes_stack[MAX_STACK_SIZE];
        struct prev_kprobe prev_kprobe;
 };
 
diff --git a/arch/x86/include/asm/kvm_guest.h b/arch/x86/include/asm/kvm_guest.h
deleted file mode 100644 (file)
index 4618526..0000000
+++ /dev/null
@@ -1,7 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_X86_KVM_GUEST_H
-#define _ASM_X86_KVM_GUEST_H
-
-int kvm_setup_vsyscall_timeinfo(void);
-
-#endif /* _ASM_X86_KVM_GUEST_H */
index 3aea2658323a5ea5c76f2524b7315eb08149d9e1..4c723632c036e4c53f320e5caf39dbf472a41486 100644 (file)
@@ -7,7 +7,6 @@
 #include <uapi/asm/kvm_para.h>
 
 extern void kvmclock_init(void);
-extern int kvm_register_clock(char *txt);
 
 #ifdef CONFIG_KVM_GUEST
 bool kvm_check_and_clear_guest_paused(void);
index bbc796eb0a3b91ee1d1686060ee4ef10ac44fff8..eeeb9289c764db96099caea715682411d79396c8 100644 (file)
@@ -71,12 +71,7 @@ struct ldt_struct {
 
 static inline void *ldt_slot_va(int slot)
 {
-#ifdef CONFIG_X86_64
        return (void *)(LDT_BASE_ADDR + LDT_SLOT_STRIDE * slot);
-#else
-       BUG();
-       return (void *)fix_to_virt(FIX_HOLE);
-#endif
 }
 
 /*
index 3cd14311edfad6d04c5ea382b2b6845f66a9d6f8..19886fef1dfc2c5c747ef4798048318c7214bd43 100644 (file)
@@ -9,6 +9,8 @@
 #include <asm/hyperv-tlfs.h>
 #include <asm/nospec-branch.h>
 
+#define VP_INVAL       U32_MAX
+
 struct ms_hyperv_info {
        u32 features;
        u32 misc_features;
@@ -20,7 +22,6 @@ struct ms_hyperv_info {
 
 extern struct ms_hyperv_info ms_hyperv;
 
-
 /*
  * Generate the guest ID.
  */
@@ -193,6 +194,40 @@ static inline u64 hv_do_fast_hypercall8(u16 code, u64 input1)
                return hv_status;
 }
 
+/* Fast hypercall with 16 bytes of input */
+static inline u64 hv_do_fast_hypercall16(u16 code, u64 input1, u64 input2)
+{
+       u64 hv_status, control = (u64)code | HV_HYPERCALL_FAST_BIT;
+
+#ifdef CONFIG_X86_64
+       {
+               __asm__ __volatile__("mov %4, %%r8\n"
+                                    CALL_NOSPEC
+                                    : "=a" (hv_status), ASM_CALL_CONSTRAINT,
+                                      "+c" (control), "+d" (input1)
+                                    : "r" (input2),
+                                      THUNK_TARGET(hv_hypercall_pg)
+                                    : "cc", "r8", "r9", "r10", "r11");
+       }
+#else
+       {
+               u32 input1_hi = upper_32_bits(input1);
+               u32 input1_lo = lower_32_bits(input1);
+               u32 input2_hi = upper_32_bits(input2);
+               u32 input2_lo = lower_32_bits(input2);
+
+               __asm__ __volatile__ (CALL_NOSPEC
+                                     : "=A"(hv_status),
+                                       "+c"(input1_lo), ASM_CALL_CONSTRAINT
+                                     : "A" (control), "b" (input1_hi),
+                                       "D"(input2_hi), "S"(input2_lo),
+                                       THUNK_TARGET(hv_hypercall_pg)
+                                     : "cc");
+       }
+#endif
+               return hv_status;
+}
+
 /*
  * Rep hypercalls. Callers of this functions are supposed to ensure that
  * rep_count and varhead_size comply with Hyper-V hypercall definition.
@@ -281,6 +316,8 @@ static inline int cpumask_to_vpset(struct hv_vpset *vpset,
         */
        for_each_cpu(cpu, cpus) {
                vcpu = hv_cpu_number_to_vp_number(cpu);
+               if (vcpu == VP_INVAL)
+                       return -1;
                vcpu_bank = vcpu / 64;
                vcpu_offset = vcpu % 64;
                __set_bit(vcpu_offset, (unsigned long *)
index f6f6c63da62f4806d12d6a128bd9f3bb440c93ae..fd2a8c1b88bc157106c4f9346b8e7bec29fe6ba3 100644 (file)
@@ -214,7 +214,7 @@ enum spectre_v2_mitigation {
        SPECTRE_V2_RETPOLINE_MINIMAL_AMD,
        SPECTRE_V2_RETPOLINE_GENERIC,
        SPECTRE_V2_RETPOLINE_AMD,
-       SPECTRE_V2_IBRS,
+       SPECTRE_V2_IBRS_ENHANCED,
 };
 
 /* The Speculative Store Bypass disable variants */
index 9c9dc579bd7db172287f5fdea9628cabaeff56af..46f516dd80ce9fbe0f01406b406ea9d582f6bbcc 100644 (file)
@@ -88,6 +88,7 @@ struct orc_entry {
        unsigned        sp_reg:4;
        unsigned        bp_reg:4;
        unsigned        type:2;
+       unsigned        end:1;
 } __packed;
 
 /*
@@ -101,6 +102,7 @@ struct unwind_hint {
        s16             sp_offset;
        u8              sp_reg;
        u8              type;
+       u8              end;
 };
 #endif /* __ASSEMBLY__ */
 
index a06b07399d172c06b428936dc14e033ae9697298..e9202a0de8f0b6bf7430df41eff55af5aa0b75bd 100644 (file)
@@ -450,9 +450,10 @@ do {                                                                       \
        bool __ret;                                                     \
        typeof(pcp1) __o1 = (o1), __n1 = (n1);                          \
        typeof(pcp2) __o2 = (o2), __n2 = (n2);                          \
-       asm volatile("cmpxchg8b "__percpu_arg(1)"\n\tsetz %0\n\t"       \
-                   : "=a" (__ret), "+m" (pcp1), "+m" (pcp2), "+d" (__o2) \
-                   :  "b" (__n1), "c" (__n2), "a" (__o1));             \
+       asm volatile("cmpxchg8b "__percpu_arg(1)                        \
+                    CC_SET(z)                                          \
+                    : CC_OUT(z) (__ret), "+m" (pcp1), "+m" (pcp2), "+a" (__o1), "+d" (__o2) \
+                    : "b" (__n1), "c" (__n2));                         \
        __ret;                                                          \
 })
 
index ada6410fd2ecf6fdde039a185ce570b20af53fca..fbd578daa66e97416058e961dd440774fa9ed586 100644 (file)
@@ -184,6 +184,9 @@ static inline p4d_t *p4d_alloc_one(struct mm_struct *mm, unsigned long addr)
 
 static inline void p4d_free(struct mm_struct *mm, p4d_t *p4d)
 {
+       if (!pgtable_l5_enabled())
+               return;
+
        BUG_ON((unsigned long)p4d & (PAGE_SIZE-1));
        free_page((unsigned long)p4d);
 }
index 685ffe8a0eaf84d5a3b0374cf7ef31cb3e51bb7c..c399ea5eea41f965a976a931cfec4a40a49e26f9 100644 (file)
@@ -19,6 +19,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
 
 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
 {
+#ifdef CONFIG_PAGE_TABLE_ISOLATION
+       pmd.pud.p4d.pgd = pti_set_user_pgtbl(&pmdp->pud.p4d.pgd, pmd.pud.p4d.pgd);
+#endif
        *pmdp = pmd;
 }
 
@@ -58,6 +61,9 @@ static inline pte_t native_ptep_get_and_clear(pte_t *xp)
 #ifdef CONFIG_SMP
 static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
 {
+#ifdef CONFIG_PAGE_TABLE_ISOLATION
+       pti_set_user_pgtbl(&xp->pud.p4d.pgd, __pgd(0));
+#endif
        return __pmd(xchg((pmdval_t *)xp, 0));
 }
 #else
@@ -67,6 +73,9 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
 #ifdef CONFIG_SMP
 static inline pud_t native_pudp_get_and_clear(pud_t *xp)
 {
+#ifdef CONFIG_PAGE_TABLE_ISOLATION
+       pti_set_user_pgtbl(&xp->p4d.pgd, __pgd(0));
+#endif
        return __pud(xchg((pudval_t *)xp, 0));
 }
 #else
index f982ef808e7e4b58dc010b054a016e0083452992..6deb6cd236e3901c59a3b5ab7e1db0f4f6471548 100644 (file)
@@ -35,4 +35,7 @@ typedef union {
 
 #define PTRS_PER_PTE   1024
 
+/* This covers all VMSPLIT_* and VMSPLIT_*_OPT variants */
+#define PGD_KERNEL_START       (CONFIG_PAGE_OFFSET >> PGDIR_SHIFT)
+
 #endif /* _ASM_X86_PGTABLE_2LEVEL_DEFS_H */
index f24df59c40b2e6d7627b61a774401f9430d79f8c..f2ca3139ca22ed6c14a08ef0eb5aa3adb7c28e6b 100644 (file)
@@ -98,6 +98,9 @@ static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
 
 static inline void native_set_pud(pud_t *pudp, pud_t pud)
 {
+#ifdef CONFIG_PAGE_TABLE_ISOLATION
+       pud.p4d.pgd = pti_set_user_pgtbl(&pudp->p4d.pgd, pud.p4d.pgd);
+#endif
        set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
 }
 
@@ -229,6 +232,10 @@ static inline pud_t native_pudp_get_and_clear(pud_t *pudp)
 {
        union split_pud res, *orig = (union split_pud *)pudp;
 
+#ifdef CONFIG_PAGE_TABLE_ISOLATION
+       pti_set_user_pgtbl(&pudp->p4d.pgd, __pgd(0));
+#endif
+
        /* xchg acts as a barrier before setting of the high bits */
        res.pud_low = xchg(&orig->pud_low, 0);
        res.pud_high = orig->pud_high;
index 6a59a6d0cc508857299afa9b06b58a8d6cc2e5f5..858358a82b14f4624c9a4805be04b438ae57dda4 100644 (file)
@@ -21,9 +21,10 @@ typedef union {
 #endif /* !__ASSEMBLY__ */
 
 #ifdef CONFIG_PARAVIRT
-#define SHARED_KERNEL_PMD      (pv_info.shared_kernel_pmd)
+#define SHARED_KERNEL_PMD      ((!static_cpu_has(X86_FEATURE_PTI) &&   \
+                                (pv_info.shared_kernel_pmd)))
 #else
-#define SHARED_KERNEL_PMD      1
+#define SHARED_KERNEL_PMD      (!static_cpu_has(X86_FEATURE_PTI))
 #endif
 
 /*
@@ -45,5 +46,6 @@ typedef union {
 #define PTRS_PER_PTE   512
 
 #define MAX_POSSIBLE_PHYSMEM_BITS      36
+#define PGD_KERNEL_START       (CONFIG_PAGE_OFFSET >> PGDIR_SHIFT)
 
 #endif /* _ASM_X86_PGTABLE_3LEVEL_DEFS_H */
index 99ecde23c3ec03e02a9aba157e4b31e3c6f53ed7..a1cb3339da8ddbb45896b2ffcadac88146ef430c 100644 (file)
@@ -30,11 +30,14 @@ int __init __early_make_pgtable(unsigned long address, pmdval_t pmd);
 void ptdump_walk_pgd_level(struct seq_file *m, pgd_t *pgd);
 void ptdump_walk_pgd_level_debugfs(struct seq_file *m, pgd_t *pgd, bool user);
 void ptdump_walk_pgd_level_checkwx(void);
+void ptdump_walk_user_pgd_level_checkwx(void);
 
 #ifdef CONFIG_DEBUG_WX
-#define debug_checkwx() ptdump_walk_pgd_level_checkwx()
+#define debug_checkwx()                ptdump_walk_pgd_level_checkwx()
+#define debug_checkwx_user()   ptdump_walk_user_pgd_level_checkwx()
 #else
-#define debug_checkwx() do { } while (0)
+#define debug_checkwx()                do { } while (0)
+#define debug_checkwx_user()   do { } while (0)
 #endif
 
 /*
@@ -640,8 +643,31 @@ static inline int is_new_memtype_allowed(u64 paddr, unsigned long size,
 
 pmd_t *populate_extra_pmd(unsigned long vaddr);
 pte_t *populate_extra_pte(unsigned long vaddr);
+
+#ifdef CONFIG_PAGE_TABLE_ISOLATION
+pgd_t __pti_set_user_pgtbl(pgd_t *pgdp, pgd_t pgd);
+
+/*
+ * Take a PGD location (pgdp) and a pgd value that needs to be set there.
+ * Populates the user and returns the resulting PGD that must be set in
+ * the kernel copy of the page tables.
+ */
+static inline pgd_t pti_set_user_pgtbl(pgd_t *pgdp, pgd_t pgd)
+{
+       if (!static_cpu_has(X86_FEATURE_PTI))
+               return pgd;
+       return __pti_set_user_pgtbl(pgdp, pgd);
+}
+#else   /* CONFIG_PAGE_TABLE_ISOLATION */
+static inline pgd_t pti_set_user_pgtbl(pgd_t *pgdp, pgd_t pgd)
+{
+       return pgd;
+}
+#endif  /* CONFIG_PAGE_TABLE_ISOLATION */
+
 #endif /* __ASSEMBLY__ */
 
+
 #ifdef CONFIG_X86_32
 # include <asm/pgtable_32.h>
 #else
@@ -898,7 +924,7 @@ static inline unsigned long pgd_page_vaddr(pgd_t pgd)
 #define pgd_page(pgd)  pfn_to_page(pgd_pfn(pgd))
 
 /* to find an entry in a page-table-directory. */
-static __always_inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address)
+static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address)
 {
        if (!pgtable_l5_enabled())
                return (p4d_t *)pgd;
@@ -1154,6 +1180,70 @@ static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
        }
 }
 #endif
+/*
+ * Page table pages are page-aligned.  The lower half of the top
+ * level is used for userspace and the top half for the kernel.
+ *
+ * Returns true for parts of the PGD that map userspace and
+ * false for the parts that map the kernel.
+ */
+static inline bool pgdp_maps_userspace(void *__ptr)
+{
+       unsigned long ptr = (unsigned long)__ptr;
+
+       return (((ptr & ~PAGE_MASK) / sizeof(pgd_t)) < PGD_KERNEL_START);
+}
+
+static inline int pgd_large(pgd_t pgd) { return 0; }
+
+#ifdef CONFIG_PAGE_TABLE_ISOLATION
+/*
+ * All top-level PAGE_TABLE_ISOLATION page tables are order-1 pages
+ * (8k-aligned and 8k in size).  The kernel one is at the beginning 4k and
+ * the user one is in the last 4k.  To switch between them, you
+ * just need to flip the 12th bit in their addresses.
+ */
+#define PTI_PGTABLE_SWITCH_BIT PAGE_SHIFT
+
+/*
+ * This generates better code than the inline assembly in
+ * __set_bit().
+ */
+static inline void *ptr_set_bit(void *ptr, int bit)
+{
+       unsigned long __ptr = (unsigned long)ptr;
+
+       __ptr |= BIT(bit);
+       return (void *)__ptr;
+}
+static inline void *ptr_clear_bit(void *ptr, int bit)
+{
+       unsigned long __ptr = (unsigned long)ptr;
+
+       __ptr &= ~BIT(bit);
+       return (void *)__ptr;
+}
+
+static inline pgd_t *kernel_to_user_pgdp(pgd_t *pgdp)
+{
+       return ptr_set_bit(pgdp, PTI_PGTABLE_SWITCH_BIT);
+}
+
+static inline pgd_t *user_to_kernel_pgdp(pgd_t *pgdp)
+{
+       return ptr_clear_bit(pgdp, PTI_PGTABLE_SWITCH_BIT);
+}
+
+static inline p4d_t *kernel_to_user_p4dp(p4d_t *p4dp)
+{
+       return ptr_set_bit(p4dp, PTI_PGTABLE_SWITCH_BIT);
+}
+
+static inline p4d_t *user_to_kernel_p4dp(p4d_t *p4dp)
+{
+       return ptr_clear_bit(p4dp, PTI_PGTABLE_SWITCH_BIT);
+}
+#endif /* CONFIG_PAGE_TABLE_ISOLATION */
 
 /*
  * clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
index 88a056b01db48838d4b064f4e945fa5b283c4f85..b3ec519e39827e58eaeb8a567303e37a6bc2e919 100644 (file)
@@ -34,8 +34,6 @@ static inline void check_pgt_cache(void) { }
 void paging_init(void);
 void sync_initial_page_table(void);
 
-static inline int pgd_large(pgd_t pgd) { return 0; }
-
 /*
  * Define this if things work differently on an i386 and an i486:
  * it will (on an i486) warn about kernel memory accesses that are
index d9a001a4a872a55142ae8162d1ecf6f07af3babf..b0bc0fff5f1f9ed792c910cd92d7d93d3b38ccfb 100644 (file)
@@ -50,13 +50,18 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
        ((FIXADDR_TOT_START - PAGE_SIZE * (CPU_ENTRY_AREA_PAGES + 1))   \
         & PMD_MASK)
 
-#define PKMAP_BASE             \
+#define LDT_BASE_ADDR          \
        ((CPU_ENTRY_AREA_BASE - PAGE_SIZE) & PMD_MASK)
 
+#define LDT_END_ADDR           (LDT_BASE_ADDR + PMD_SIZE)
+
+#define PKMAP_BASE             \
+       ((LDT_BASE_ADDR - PAGE_SIZE) & PMD_MASK)
+
 #ifdef CONFIG_HIGHMEM
 # define VMALLOC_END   (PKMAP_BASE - 2 * PAGE_SIZE)
 #else
-# define VMALLOC_END   (CPU_ENTRY_AREA_BASE - 2 * PAGE_SIZE)
+# define VMALLOC_END   (LDT_BASE_ADDR - 2 * PAGE_SIZE)
 #endif
 
 #define MODULES_VADDR  VMALLOC_START
index 0fdcd21dadbd6422bf40f5cbb2361c08c5fafc14..acb6970e7bcfa805234ab36931d8b1d6a8356122 100644 (file)
@@ -132,91 +132,7 @@ static inline pud_t native_pudp_get_and_clear(pud_t *xp)
 #endif
 }
 
-#ifdef CONFIG_PAGE_TABLE_ISOLATION
-/*
- * All top-level PAGE_TABLE_ISOLATION page tables are order-1 pages
- * (8k-aligned and 8k in size).  The kernel one is at the beginning 4k and
- * the user one is in the last 4k.  To switch between them, you
- * just need to flip the 12th bit in their addresses.
- */
-#define PTI_PGTABLE_SWITCH_BIT PAGE_SHIFT
-
-/*
- * This generates better code than the inline assembly in
- * __set_bit().
- */
-static inline void *ptr_set_bit(void *ptr, int bit)
-{
-       unsigned long __ptr = (unsigned long)ptr;
-
-       __ptr |= BIT(bit);
-       return (void *)__ptr;
-}
-static inline void *ptr_clear_bit(void *ptr, int bit)
-{
-       unsigned long __ptr = (unsigned long)ptr;
-
-       __ptr &= ~BIT(bit);
-       return (void *)__ptr;
-}
-
-static inline pgd_t *kernel_to_user_pgdp(pgd_t *pgdp)
-{
-       return ptr_set_bit(pgdp, PTI_PGTABLE_SWITCH_BIT);
-}
-
-static inline pgd_t *user_to_kernel_pgdp(pgd_t *pgdp)
-{
-       return ptr_clear_bit(pgdp, PTI_PGTABLE_SWITCH_BIT);
-}
-
-static inline p4d_t *kernel_to_user_p4dp(p4d_t *p4dp)
-{
-       return ptr_set_bit(p4dp, PTI_PGTABLE_SWITCH_BIT);
-}
-
-static inline p4d_t *user_to_kernel_p4dp(p4d_t *p4dp)
-{
-       return ptr_clear_bit(p4dp, PTI_PGTABLE_SWITCH_BIT);
-}
-#endif /* CONFIG_PAGE_TABLE_ISOLATION */
-
-/*
- * Page table pages are page-aligned.  The lower half of the top
- * level is used for userspace and the top half for the kernel.
- *
- * Returns true for parts of the PGD that map userspace and
- * false for the parts that map the kernel.
- */
-static inline bool pgdp_maps_userspace(void *__ptr)
-{
-       unsigned long ptr = (unsigned long)__ptr;
-
-       return (ptr & ~PAGE_MASK) < (PAGE_SIZE / 2);
-}
-
-#ifdef CONFIG_PAGE_TABLE_ISOLATION
-pgd_t __pti_set_user_pgd(pgd_t *pgdp, pgd_t pgd);
-
-/*
- * Take a PGD location (pgdp) and a pgd value that needs to be set there.
- * Populates the user and returns the resulting PGD that must be set in
- * the kernel copy of the page tables.
- */
-static inline pgd_t pti_set_user_pgd(pgd_t *pgdp, pgd_t pgd)
-{
-       if (!static_cpu_has(X86_FEATURE_PTI))
-               return pgd;
-       return __pti_set_user_pgd(pgdp, pgd);
-}
-#else
-static inline pgd_t pti_set_user_pgd(pgd_t *pgdp, pgd_t pgd)
-{
-       return pgd;
-}
-#endif
-
-static __always_inline void native_set_p4d(p4d_t *p4dp, p4d_t p4d)
+static inline void native_set_p4d(p4d_t *p4dp, p4d_t p4d)
 {
        pgd_t pgd;
 
@@ -226,18 +142,18 @@ static __always_inline void native_set_p4d(p4d_t *p4dp, p4d_t p4d)
        }
 
        pgd = native_make_pgd(native_p4d_val(p4d));
-       pgd = pti_set_user_pgd((pgd_t *)p4dp, pgd);
+       pgd = pti_set_user_pgtbl((pgd_t *)p4dp, pgd);
        *p4dp = native_make_p4d(native_pgd_val(pgd));
 }
 
-static __always_inline void native_p4d_clear(p4d_t *p4d)
+static inline void native_p4d_clear(p4d_t *p4d)
 {
        native_set_p4d(p4d, native_make_p4d(0));
 }
 
 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
 {
-       *pgdp = pti_set_user_pgd(pgdp, pgd);
+       *pgdp = pti_set_user_pgtbl(pgdp, pgd);
 }
 
 static inline void native_pgd_clear(pgd_t *pgd)
@@ -255,7 +171,6 @@ extern void sync_global_pgds(unsigned long start, unsigned long end);
 /*
  * Level 4 access.
  */
-static inline int pgd_large(pgd_t pgd) { return 0; }
 #define mk_kernel_pgd(address) __pgd((address) | _KERNPG_TABLE)
 
 /* PUD - Level3 access */
index 054765ab2da2478a92b728ca691488ba24650d21..04edd2d58211a78e3261993bd8d0e088e3b4c4ef 100644 (file)
@@ -115,6 +115,7 @@ extern unsigned int ptrs_per_p4d;
 #define LDT_PGD_ENTRY_L5       -112UL
 #define LDT_PGD_ENTRY          (pgtable_l5_enabled() ? LDT_PGD_ENTRY_L5 : LDT_PGD_ENTRY_L4)
 #define LDT_BASE_ADDR          (LDT_PGD_ENTRY << PGDIR_SHIFT)
+#define LDT_END_ADDR           (LDT_BASE_ADDR + PGDIR_SIZE)
 
 #define __VMALLOC_BASE_L4      0xffffc90000000000UL
 #define __VMALLOC_BASE_L5      0xffa0000000000000UL
@@ -153,4 +154,6 @@ extern unsigned int ptrs_per_p4d;
 
 #define EARLY_DYNAMIC_PAGE_TABLES      64
 
+#define PGD_KERNEL_START       ((PAGE_SIZE / 2) / sizeof(pgd_t))
+
 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
index 99fff853c9444466b6bd63163da17d9676773647..b64acb08a62b94b5c944182ef4b90664ee7f6432 100644 (file)
@@ -50,6 +50,7 @@
 #define _PAGE_GLOBAL   (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
 #define _PAGE_SOFTW1   (_AT(pteval_t, 1) << _PAGE_BIT_SOFTW1)
 #define _PAGE_SOFTW2   (_AT(pteval_t, 1) << _PAGE_BIT_SOFTW2)
+#define _PAGE_SOFTW3   (_AT(pteval_t, 1) << _PAGE_BIT_SOFTW3)
 #define _PAGE_PAT      (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
 #define _PAGE_SPECIAL  (_AT(pteval_t, 1) << _PAGE_BIT_SPECIAL)
@@ -266,14 +267,37 @@ typedef struct pgprot { pgprotval_t pgprot; } pgprot_t;
 
 typedef struct { pgdval_t pgd; } pgd_t;
 
+#ifdef CONFIG_X86_PAE
+
+/*
+ * PHYSICAL_PAGE_MASK might be non-constant when SME is compiled in, so we can't
+ * use it here.
+ */
+
+#define PGD_PAE_PAGE_MASK      ((signed long)PAGE_MASK)
+#define PGD_PAE_PHYS_MASK      (((1ULL << __PHYSICAL_MASK_SHIFT)-1) & PGD_PAE_PAGE_MASK)
+
+/*
+ * PAE allows Base Address, P, PWT, PCD and AVL bits to be set in PGD entries.
+ * All other bits are Reserved MBZ
+ */
+#define PGD_ALLOWED_BITS       (PGD_PAE_PHYS_MASK | _PAGE_PRESENT | \
+                                _PAGE_PWT | _PAGE_PCD | \
+                                _PAGE_SOFTW1 | _PAGE_SOFTW2 | _PAGE_SOFTW3)
+
+#else
+/* No need to mask any bits for !PAE */
+#define PGD_ALLOWED_BITS       (~0ULL)
+#endif
+
 static inline pgd_t native_make_pgd(pgdval_t val)
 {
-       return (pgd_t) { val };
+       return (pgd_t) { val & PGD_ALLOWED_BITS };
 }
 
 static inline pgdval_t native_pgd_val(pgd_t pgd)
 {
-       return pgd.pgd;
+       return pgd.pgd & PGD_ALLOWED_BITS;
 }
 
 static inline pgdval_t pgd_flags(pgd_t pgd)
index 625a52a5594f53b3ddc889843b92d228cc39a8e8..02c2cbda4a74ed83bfccc653df9b526be69676da 100644 (file)
 #define CR3_PCID_MASK  0xFFFull
 #define CR3_NOFLUSH    BIT_ULL(63)
 
-#ifdef CONFIG_PAGE_TABLE_ISOLATION
-# define X86_CR3_PTI_PCID_USER_BIT     11
-#endif
-
 #else
 /*
  * CR3_ADDR_MASK needs at least bits 31:5 set on PAE systems, and we save
@@ -53,4 +49,8 @@
 #define CR3_NOFLUSH    0
 #endif
 
+#ifdef CONFIG_PAGE_TABLE_ISOLATION
+# define X86_CR3_PTI_PCID_USER_BIT     11
+#endif
+
 #endif /* _ASM_X86_PROCESSOR_FLAGS_H */
index cfd29ee8c3da930eabc38a96d2aacbbbc1c965c3..59663c08c949321e7fe0f3cc12c58eb4642da5cf 100644 (file)
@@ -966,6 +966,7 @@ static inline uint32_t hypervisor_cpuid_base(const char *sig, uint32_t leaves)
 
 extern unsigned long arch_align_stack(unsigned long sp);
 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
+extern void free_kernel_image_pages(void *begin, void *end);
 
 void default_idle(void);
 #ifdef CONFIG_XEN
index 38a17f1d5c9d674c86696cdd22f4a93b2116d4c0..5df09a0b80b801bcd6cc9adc780dec594d84ccad 100644 (file)
@@ -6,10 +6,9 @@
 #ifdef CONFIG_PAGE_TABLE_ISOLATION
 extern void pti_init(void);
 extern void pti_check_boottime_disable(void);
-extern void pti_clone_kernel_text(void);
+extern void pti_finalize(void);
 #else
 static inline void pti_check_boottime_disable(void) { }
-static inline void pti_clone_kernel_text(void) { }
 #endif
 
 #endif /* __ASSEMBLY__ */
index 9ef5ee03d2d79268dc627dd8371653a7e2ec6c33..159622ee067488ed9baae2ba5627298b6844df32 100644 (file)
@@ -43,7 +43,7 @@ asm    (".pushsection .text;"
        "push  %rdx;"
        "mov   $0x1,%eax;"
        "xor   %edx,%edx;"
-       "lock cmpxchg %dl,(%rdi);"
+       LOCK_PREFIX "cmpxchg %dl,(%rdi);"
        "cmp   $0x1,%al;"
        "jne   .slowpath;"
        "pop   %rdx;"
index 4cf11d88d3b35f48b89fc0413d6db70ec9877075..19b90521954c906248c19234bfd722116a0d0fec 100644 (file)
@@ -5,6 +5,7 @@
  * PaX/grsecurity.
  */
 #include <linux/refcount.h>
+#include <asm/bug.h>
 
 /*
  * This is the first portion of the refcount error handling, which lives in
index 5c019d23d06b1168da0ea965d7c35bebd4d02307..4a911a382adedfdcd332b8b4884db6ff06e10533 100644 (file)
@@ -7,6 +7,7 @@
 
 extern char __brk_base[], __brk_limit[];
 extern struct exception_table_entry __stop___ex_table[];
+extern char __end_rodata_aligned[];
 
 #if defined(CONFIG_X86_64)
 extern char __end_rodata_hpage_align[];
index bd090367236cee9ff92103139082f42b3a45c993..34cffcef7375dfa15cb30832972aa3d71e86d678 100644 (file)
@@ -46,6 +46,7 @@ int set_memory_np(unsigned long addr, int numpages);
 int set_memory_4k(unsigned long addr, int numpages);
 int set_memory_encrypted(unsigned long addr, int numpages);
 int set_memory_decrypted(unsigned long addr, int numpages);
+int set_memory_np_noalias(unsigned long addr, int numpages);
 
 int set_memory_array_uc(unsigned long *addr, int addrinarray);
 int set_memory_array_wc(unsigned long *addr, int addrinarray);
index eb5f7999a8934c7af8b6c5b170b05e5772264783..36bd243843d6dc9b281a7986d71aaf2cf0041be8 100644 (file)
@@ -87,15 +87,25 @@ static inline void refresh_sysenter_cs(struct thread_struct *thread)
 #endif
 
 /* This is used when switching tasks or entering/exiting vm86 mode. */
-static inline void update_sp0(struct task_struct *task)
+static inline void update_task_stack(struct task_struct *task)
 {
-       /* On x86_64, sp0 always points to the entry trampoline stack, which is constant: */
+       /* sp0 always points to the entry trampoline stack, which is constant: */
 #ifdef CONFIG_X86_32
-       load_sp0(task->thread.sp0);
+       if (static_cpu_has(X86_FEATURE_XENPV))
+               load_sp0(task->thread.sp0);
+       else
+               this_cpu_write(cpu_tss_rw.x86_tss.sp1, task->thread.sp0);
 #else
+       /*
+        * x86-64 updates x86_tss.sp1 via cpu_current_top_of_stack. That
+        * doesn't work on x86-32 because sp1 and
+        * cpu_current_top_of_stack have different values (because of
+        * the non-zero stack-padding on 32bit).
+        */
        if (static_cpu_has(X86_FEATURE_XENPV))
                load_sp0(task_top_of_stack(task));
 #endif
+
 }
 
 #endif /* _ASM_X86_SWITCH_TO_H */
index 2ecd34e2d46c9a34bacbc329f2744cd23c32bcf3..e85ff65c43c3efc85702841ab77bffe145778707 100644 (file)
@@ -37,5 +37,6 @@ extern void *text_poke_early(void *addr, const void *opcode, size_t len);
 extern void *text_poke(void *addr, const void *opcode, size_t len);
 extern int poke_int3_handler(struct pt_regs *regs);
 extern void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler);
+extern int after_bootmem;
 
 #endif /* _ASM_X86_TEXT_PATCHING_H */
index 6690cd3fc8b13ddd4318294a09bae5e57c0073cf..511bf5fae8b82abb7ebbd7f4173f70afae9d1538 100644 (file)
@@ -148,22 +148,6 @@ static inline unsigned long build_cr3_noflush(pgd_t *pgd, u16 asid)
 #define __flush_tlb_one_user(addr) __native_flush_tlb_one_user(addr)
 #endif
 
-static inline bool tlb_defer_switch_to_init_mm(void)
-{
-       /*
-        * If we have PCID, then switching to init_mm is reasonably
-        * fast.  If we don't have PCID, then switching to init_mm is
-        * quite slow, so we try to defer it in the hopes that we can
-        * avoid it entirely.  The latter approach runs the risk of
-        * receiving otherwise unnecessary IPIs.
-        *
-        * This choice is just a heuristic.  The tlb code can handle this
-        * function returning true or false regardless of whether we have
-        * PCID.
-        */
-       return !static_cpu_has(X86_FEATURE_PCID);
-}
-
 struct tlb_context {
        u64 ctx_id;
        u64 tlb_gen;
@@ -554,4 +538,9 @@ extern void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch);
        native_flush_tlb_others(mask, info)
 #endif
 
+extern void tlb_flush_remove_tables(struct mm_struct *mm);
+extern void tlb_flush_remove_tables_local(void *arg);
+
+#define HAVE_TLB_FLUSH_REMOVE_TABLES
+
 #endif /* _ASM_X86_TLBFLUSH_H */
index 4253bca9998904e882803690e023d970d20dd5cd..9c0d4b588e3fce6ab4a98a0ed6120a76a0ed4c85 100644 (file)
@@ -28,6 +28,21 @@ TRACE_EVENT(hyperv_mmu_flush_tlb_others,
                      __entry->addr, __entry->end)
        );
 
+TRACE_EVENT(hyperv_send_ipi_mask,
+           TP_PROTO(const struct cpumask *cpus,
+                    int vector),
+           TP_ARGS(cpus, vector),
+           TP_STRUCT__entry(
+                   __field(unsigned int, ncpus)
+                   __field(int, vector)
+                   ),
+           TP_fast_assign(__entry->ncpus = cpumask_weight(cpus);
+                          __entry->vector = vector;
+                   ),
+           TP_printk("ncpus %d vector %x",
+                     __entry->ncpus, __entry->vector)
+       );
+
 #endif /* CONFIG_HYPERV */
 
 #undef TRACE_INCLUDE_PATH
index 2701d221583a26071b16c1caf1d34b3de7c7b007..eb5bbfeccb66133bf6cb0408b0785178b3230d1c 100644 (file)
@@ -33,13 +33,13 @@ static inline cycles_t get_cycles(void)
 extern struct system_counterval_t convert_art_to_tsc(u64 art);
 extern struct system_counterval_t convert_art_ns_to_tsc(u64 art_ns);
 
-extern void tsc_early_delay_calibrate(void);
+extern void tsc_early_init(void);
 extern void tsc_init(void);
 extern void mark_tsc_unstable(char *reason);
 extern int unsynchronized_tsc(void);
 extern int check_tsc_unstable(void);
 extern void mark_tsc_async_resets(char *reason);
-extern unsigned long native_calibrate_cpu(void);
+extern unsigned long native_calibrate_cpu_early(void);
 extern unsigned long native_calibrate_tsc(void);
 extern unsigned long long native_sched_clock_from_tsc(u64 tsc);
 
index 62acb613114b2322088083f7a9ccc85495a5afa4..a9d637bc301d7dd0086b5126a5ebac8f042c62c9 100644 (file)
@@ -52,7 +52,12 @@ copy_to_user_mcsafe(void *to, const void *from, unsigned len)
        unsigned long ret;
 
        __uaccess_begin();
-       ret = memcpy_mcsafe(to, from, len);
+       /*
+        * Note, __memcpy_mcsafe() is explicitly used since it can
+        * handle exceptions / faults.  memcpy_mcsafe() may fall back to
+        * memcpy() which lacks this handling.
+        */
+       ret = __memcpy_mcsafe(to, from, len);
        __uaccess_end();
        return ret;
 }
index bae46fc6b9de3e97794f0576cae455ba15150cd7..0bcdb127936178cf1d53d0b9bfcf616f585dfc43 100644 (file)
@@ -26,7 +26,7 @@
  * the debuginfo as necessary.  It will also warn if it sees any
  * inconsistencies.
  */
-.macro UNWIND_HINT sp_reg=ORC_REG_SP sp_offset=0 type=ORC_TYPE_CALL
+.macro UNWIND_HINT sp_reg=ORC_REG_SP sp_offset=0 type=ORC_TYPE_CALL end=0
 #ifdef CONFIG_STACK_VALIDATION
 .Lunwind_hint_ip_\@:
        .pushsection .discard.unwind_hints
                .short \sp_offset
                .byte \sp_reg
                .byte \type
+               .byte \end
+               .balign 4
        .popsection
 #endif
 .endm
 
 .macro UNWIND_HINT_EMPTY
-       UNWIND_HINT sp_reg=ORC_REG_UNDEFINED
+       UNWIND_HINT sp_reg=ORC_REG_UNDEFINED end=1
 .endm
 
 .macro UNWIND_HINT_REGS base=%rsp offset=0 indirect=0 extra=1 iret=0
 
 #else /* !__ASSEMBLY__ */
 
-#define UNWIND_HINT(sp_reg, sp_offset, type)                   \
+#define UNWIND_HINT(sp_reg, sp_offset, type, end)              \
        "987: \n\t"                                             \
        ".pushsection .discard.unwind_hints\n\t"                \
        /* struct unwind_hint */                                \
        ".long 987b - .\n\t"                                    \
-       ".short " __stringify(sp_offset) "\n\t"         \
+       ".short " __stringify(sp_offset) "\n\t"                 \
        ".byte " __stringify(sp_reg) "\n\t"                     \
        ".byte " __stringify(type) "\n\t"                       \
+       ".byte " __stringify(end) "\n\t"                        \
+       ".balign 4 \n\t"                                        \
        ".popsection\n\t"
 
-#define UNWIND_HINT_SAVE UNWIND_HINT(0, 0, UNWIND_HINT_TYPE_SAVE)
+#define UNWIND_HINT_SAVE UNWIND_HINT(0, 0, UNWIND_HINT_TYPE_SAVE, 0)
 
-#define UNWIND_HINT_RESTORE UNWIND_HINT(0, 0, UNWIND_HINT_TYPE_RESTORE)
+#define UNWIND_HINT_RESTORE UNWIND_HINT(0, 0, UNWIND_HINT_TYPE_RESTORE, 0)
 
 #endif /* __ASSEMBLY__ */
 
index 425e6b8b95478248dd3a32122b1aca408691cadf..6aa8499e1f62042a510aaafb2a1ef1e3a89804bd 100644 (file)
 #define VMX_MISC_PREEMPTION_TIMER_RATE_MASK    0x0000001f
 #define VMX_MISC_SAVE_EFER_LMA                 0x00000020
 #define VMX_MISC_ACTIVITY_HLT                  0x00000040
+#define VMX_MISC_ZERO_LEN_INS                  0x40000000
 
 /* VMFUNC functions */
 #define VMX_VMFUNC_EPTP_SWITCHING               0x00000001
@@ -351,11 +352,13 @@ enum vmcs_field {
 #define VECTORING_INFO_VALID_MASK              INTR_INFO_VALID_MASK
 
 #define INTR_TYPE_EXT_INTR              (0 << 8) /* external interrupt */
+#define INTR_TYPE_RESERVED              (1 << 8) /* reserved */
 #define INTR_TYPE_NMI_INTR             (2 << 8) /* NMI */
 #define INTR_TYPE_HARD_EXCEPTION       (3 << 8) /* processor exception */
 #define INTR_TYPE_SOFT_INTR             (4 << 8) /* software interrupt */
 #define INTR_TYPE_PRIV_SW_EXCEPTION    (5 << 8) /* ICE breakpoint - undocumented */
 #define INTR_TYPE_SOFT_EXCEPTION       (6 << 8) /* software exception */
+#define INTR_TYPE_OTHER_EVENT           (7 << 8) /* other event */
 
 /* GUEST_INTERRUPTIBILITY_INFO flags. */
 #define GUEST_INTR_STATE_STI           0x00000001
index 02d6f5cf4e70800188994e7e64f52916a9d7d83e..8824d01c0c352d6dbd2c12e228bd0de9ca335166 100644 (file)
@@ -61,6 +61,7 @@ obj-y                 += alternative.o i8253.o hw_breakpoint.o
 obj-y                  += tsc.o tsc_msr.o io_delay.o rtc.o
 obj-y                  += pci-iommu_table.o
 obj-y                  += resource.o
+obj-y                  += irqflags.o
 
 obj-y                          += process.o
 obj-y                          += fpu/
index a481763a37763ada0586d2486f3a062066a3712d..014f214da5815d62f8f094f5e69f670c440ef69b 100644 (file)
@@ -668,6 +668,7 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode,
        local_irq_save(flags);
        memcpy(addr, opcode, len);
        local_irq_restore(flags);
+       sync_core();
        /* Could also do a CLFLUSH here to speed up CPU recovery; but
           that causes hangs on some VIA CPUs. */
        return addr;
@@ -693,6 +694,12 @@ void *text_poke(void *addr, const void *opcode, size_t len)
        struct page *pages[2];
        int i;
 
+       /*
+        * While boot memory allocator is runnig we cannot use struct
+        * pages as they are not yet initialized.
+        */
+       BUG_ON(!after_bootmem);
+
        if (!core_kernel_text((unsigned long)addr)) {
                pages[0] = vmalloc_to_page(addr);
                pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
index 2aabd4cb0e3f1d8ccaca0890991144c3f9d10519..07fa222f0c524068162eec219f7032c0855f9e49 100644 (file)
@@ -573,6 +573,9 @@ static u32 skx_deadline_rev(void)
        case 0x04: return 0x02000014;
        }
 
+       if (boot_cpu_data.x86_stepping > 4)
+               return 0;
+
        return ~0U;
 }
 
@@ -937,7 +940,7 @@ static int __init calibrate_APIC_clock(void)
 
        if (levt->features & CLOCK_EVT_FEAT_DUMMY) {
                pr_warning("APIC timer disabled due to verification failure\n");
-                       return -1;
+               return -1;
        }
 
        return 0;
index 35aaee4fc0287adde2e59186fc7bd093a3015cdd..0954315842c09cf627dd448d858c822a529a455a 100644 (file)
@@ -218,7 +218,8 @@ static int reserve_irq_vector(struct irq_data *irqd)
        return 0;
 }
 
-static int allocate_vector(struct irq_data *irqd, const struct cpumask *dest)
+static int
+assign_vector_locked(struct irq_data *irqd, const struct cpumask *dest)
 {
        struct apic_chip_data *apicd = apic_chip_data(irqd);
        bool resvd = apicd->has_reserved;
@@ -245,22 +246,12 @@ static int allocate_vector(struct irq_data *irqd, const struct cpumask *dest)
                return -EBUSY;
 
        vector = irq_matrix_alloc(vector_matrix, dest, resvd, &cpu);
-       if (vector > 0)
-               apic_update_vector(irqd, vector, cpu);
        trace_vector_alloc(irqd->irq, vector, resvd, vector);
-       return vector;
-}
-
-static int assign_vector_locked(struct irq_data *irqd,
-                               const struct cpumask *dest)
-{
-       struct apic_chip_data *apicd = apic_chip_data(irqd);
-       int vector = allocate_vector(irqd, dest);
-
        if (vector < 0)
                return vector;
+       apic_update_vector(irqd, vector, cpu);
+       apic_update_irq_cfg(irqd, vector, cpu);
 
-       apic_update_irq_cfg(irqd, apicd->vector, apicd->cpu);
        return 0;
 }
 
@@ -433,7 +424,7 @@ static int activate_managed(struct irq_data *irqd)
                pr_err("Managed startup irq %u, no vector available\n",
                       irqd->irq);
        }
-       return ret;
+       return ret;
 }
 
 static int x86_vector_activate(struct irq_domain *dom, struct irq_data *irqd,
index efaf2d4f9c3c7983221298c2ecb37ce367345b1e..391f358ebb4c6d823f0640e7c92d1d762538cf40 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/delay.h>
 #include <linux/crash_dump.h>
 #include <linux/reboot.h>
+#include <linux/memory.h>
 
 #include <asm/uv/uv_mmrs.h>
 #include <asm/uv/uv_hub.h>
@@ -392,6 +393,51 @@ extern int uv_hub_info_version(void)
 }
 EXPORT_SYMBOL(uv_hub_info_version);
 
+/* Default UV memory block size is 2GB */
+static unsigned long mem_block_size __initdata = (2UL << 30);
+
+/* Kernel parameter to specify UV mem block size */
+static int __init parse_mem_block_size(char *ptr)
+{
+       unsigned long size = memparse(ptr, NULL);
+
+       /* Size will be rounded down by set_block_size() below */
+       mem_block_size = size;
+       return 0;
+}
+early_param("uv_memblksize", parse_mem_block_size);
+
+static __init int adj_blksize(u32 lgre)
+{
+       unsigned long base = (unsigned long)lgre << UV_GAM_RANGE_SHFT;
+       unsigned long size;
+
+       for (size = mem_block_size; size > MIN_MEMORY_BLOCK_SIZE; size >>= 1)
+               if (IS_ALIGNED(base, size))
+                       break;
+
+       if (size >= mem_block_size)
+               return 0;
+
+       mem_block_size = size;
+       return 1;
+}
+
+static __init void set_block_size(void)
+{
+       unsigned int order = ffs(mem_block_size);
+
+       if (order) {
+               /* adjust for ffs return of 1..64 */
+               set_memory_block_size_order(order - 1);
+               pr_info("UV: mem_block_size set to 0x%lx\n", mem_block_size);
+       } else {
+               /* bad or zero value, default to 1UL << 31 (2GB) */
+               pr_err("UV: mem_block_size error with 0x%lx\n", mem_block_size);
+               set_memory_block_size_order(31);
+       }
+}
+
 /* Build GAM range lookup table: */
 static __init void build_uv_gr_table(void)
 {
@@ -1180,23 +1226,30 @@ static void __init decode_gam_rng_tbl(unsigned long ptr)
                                        << UV_GAM_RANGE_SHFT);
                int order = 0;
                char suffix[] = " KMGTPE";
+               int flag = ' ';
 
                while (size > 9999 && order < sizeof(suffix)) {
                        size /= 1024;
                        order++;
                }
 
+               /* adjust max block size to current range start */
+               if (gre->type == 1 || gre->type == 2)
+                       if (adj_blksize(lgre))
+                               flag = '*';
+
                if (!index) {
                        pr_info("UV: GAM Range Table...\n");
-                       pr_info("UV:  # %20s %14s %5s %4s %5s %3s %2s\n", "Range", "", "Size", "Type", "NASID", "SID", "PN");
+                       pr_info("UV:  # %20s %14s %6s %4s %5s %3s %2s\n", "Range", "", "Size", "Type", "NASID", "SID", "PN");
                }
-               pr_info("UV: %2d: 0x%014lx-0x%014lx %5lu%c %3d   %04x  %02x %02x\n",
+               pr_info("UV: %2d: 0x%014lx-0x%014lx%c %5lu%c %3d   %04x  %02x %02x\n",
                        index++,
                        (unsigned long)lgre << UV_GAM_RANGE_SHFT,
                        (unsigned long)gre->limit << UV_GAM_RANGE_SHFT,
-                       size, suffix[order],
+                       flag, size, suffix[order],
                        gre->type, gre->nasid, gre->sockid, gre->pnode);
 
+               /* update to next range start */
                lgre = gre->limit;
                if (sock_min > gre->sockid)
                        sock_min = gre->sockid;
@@ -1427,6 +1480,7 @@ static void __init uv_system_init_hub(void)
 
        build_socket_tables();
        build_uv_gr_table();
+       set_block_size();
        uv_init_hub_info(&hub_info);
        uv_possible_blades = num_possible_nodes();
        if (!_node_to_pnode)
index 5d0de79fdab06cbffc55dfd80d094ac0f07742e2..ec00d1ff5098b3701b1f1af324be2576676b7786 100644 (file)
 #include <asm/olpc.h>
 #include <asm/paravirt.h>
 #include <asm/reboot.h>
+#include <asm/nospec-branch.h>
 
 #if defined(CONFIG_APM_DISPLAY_BLANK) && defined(CONFIG_VT)
 extern int (*console_blank_hook)(int);
@@ -614,11 +615,13 @@ static long __apm_bios_call(void *_call)
        gdt[0x40 / 8] = bad_bios_desc;
 
        apm_irq_save(flags);
+       firmware_restrict_branch_speculation_start();
        APM_DO_SAVE_SEGS;
        apm_bios_call_asm(call->func, call->ebx, call->ecx,
                          &call->eax, &call->ebx, &call->ecx, &call->edx,
                          &call->esi);
        APM_DO_RESTORE_SEGS;
+       firmware_restrict_branch_speculation_end();
        apm_irq_restore(flags);
        gdt[0x40 / 8] = save_desc_40;
        put_cpu();
@@ -690,10 +693,12 @@ static long __apm_bios_call_simple(void *_call)
        gdt[0x40 / 8] = bad_bios_desc;
 
        apm_irq_save(flags);
+       firmware_restrict_branch_speculation_start();
        APM_DO_SAVE_SEGS;
        error = apm_bios_call_simple_asm(call->func, call->ebx, call->ecx,
                                         &call->eax);
        APM_DO_RESTORE_SEGS;
+       firmware_restrict_branch_speculation_end();
        apm_irq_restore(flags);
        gdt[0x40 / 8] = save_desc_40;
        put_cpu();
index dcb008c320fe05c2f236993815f18f20ab6e9d5d..01de31db300d3cbf17de120b3a89011f170f65d3 100644 (file)
@@ -103,4 +103,9 @@ void common(void) {
        OFFSET(CPU_ENTRY_AREA_entry_trampoline, cpu_entry_area, entry_trampoline);
        OFFSET(CPU_ENTRY_AREA_entry_stack, cpu_entry_area, entry_stack_page);
        DEFINE(SIZEOF_entry_stack, sizeof(struct entry_stack));
+       DEFINE(MASK_entry_stack, (~(sizeof(struct entry_stack) - 1)));
+
+       /* Offset for sp0 and sp1 into the tss_struct */
+       OFFSET(TSS_sp0, tss_struct, x86_tss.sp0);
+       OFFSET(TSS_sp1, tss_struct, x86_tss.sp1);
 }
index a4a3be399f4b27ab5adf1df10c0a8ee4b2738e13..82826f2275cce92ccb5941c98717962c38e8b61c 100644 (file)
@@ -46,8 +46,14 @@ void foo(void)
        OFFSET(saved_context_gdt_desc, saved_context, gdt_desc);
        BLANK();
 
-       /* Offset from the sysenter stack to tss.sp0 */
-       DEFINE(TSS_sysenter_sp0, offsetof(struct cpu_entry_area, tss.x86_tss.sp0) -
+       /*
+        * Offset from the entry stack to task stack stored in TSS. Kernel entry
+        * happens on the per-cpu entry-stack, and the asm code switches to the
+        * task-stack pointer stored in x86_tss.sp1, which is a copy of
+        * task->thread.sp0 where entry code can find it.
+        */
+       DEFINE(TSS_entry2task_stack,
+              offsetof(struct cpu_entry_area, tss.x86_tss.sp1) -
               offsetofend(struct cpu_entry_area, entry_stack_page.stack));
 
 #ifdef CONFIG_STACKPROTECTOR
index b2dcd161f5149492e8a24f06d7074d527308528c..3b9405e7ba2b5e1da1f35ac978986a342c4c6eff 100644 (file)
@@ -65,8 +65,6 @@ int main(void)
 #undef ENTRY
 
        OFFSET(TSS_ist, tss_struct, x86_tss.ist);
-       OFFSET(TSS_sp0, tss_struct, x86_tss.sp0);
-       OFFSET(TSS_sp1, tss_struct, x86_tss.sp1);
        BLANK();
 
 #ifdef CONFIG_STACKPROTECTOR
index 7a40196967cb308876e4eafce7cda932a1690066..347137e80bf5ace65a2688a78897f6f704e96800 100644 (file)
@@ -35,7 +35,9 @@ obj-$(CONFIG_CPU_SUP_CENTAUR)         += centaur.o
 obj-$(CONFIG_CPU_SUP_TRANSMETA_32)     += transmeta.o
 obj-$(CONFIG_CPU_SUP_UMC_32)           += umc.o
 
-obj-$(CONFIG_INTEL_RDT)        += intel_rdt.o intel_rdt_rdtgroup.o intel_rdt_monitor.o intel_rdt_ctrlmondata.o
+obj-$(CONFIG_INTEL_RDT)        += intel_rdt.o intel_rdt_rdtgroup.o intel_rdt_monitor.o
+obj-$(CONFIG_INTEL_RDT)        += intel_rdt_ctrlmondata.o intel_rdt_pseudo_lock.o
+CFLAGS_intel_rdt_pseudo_lock.o = -I$(src)
 
 obj-$(CONFIG_X86_MCE)                  += mcheck/
 obj-$(CONFIG_MTRR)                     += mtrr/
index 082d7875cef82eb779b68e5105330482f5a419d8..b732438c1a1ef567614277519597321eb7dd637b 100644 (file)
@@ -232,8 +232,6 @@ static void init_amd_k7(struct cpuinfo_x86 *c)
                }
        }
 
-       set_cpu_cap(c, X86_FEATURE_K7);
-
        /* calling is from identify_secondary_cpu() ? */
        if (!c->cpu_index)
                return;
@@ -543,7 +541,9 @@ static void bsp_init_amd(struct cpuinfo_x86 *c)
                nodes_per_socket = ((value >> 3) & 7) + 1;
        }
 
-       if (c->x86 >= 0x15 && c->x86 <= 0x17) {
+       if (!boot_cpu_has(X86_FEATURE_AMD_SSBD) &&
+           !boot_cpu_has(X86_FEATURE_VIRT_SSBD) &&
+           c->x86 >= 0x15 && c->x86 <= 0x17) {
                unsigned int bit;
 
                switch (c->x86) {
@@ -615,6 +615,14 @@ static void early_init_amd(struct cpuinfo_x86 *c)
 
        early_init_amd_mc(c);
 
+#ifdef CONFIG_X86_32
+       if (c->x86 == 6)
+               set_cpu_cap(c, X86_FEATURE_K7);
+#endif
+
+       if (c->x86 >= 0xf)
+               set_cpu_cap(c, X86_FEATURE_K8);
+
        rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy);
 
        /*
@@ -861,9 +869,6 @@ static void init_amd(struct cpuinfo_x86 *c)
 
        init_amd_cacheinfo(c);
 
-       if (c->x86 >= 0xf)
-               set_cpu_cap(c, X86_FEATURE_K8);
-
        if (cpu_has(c, X86_FEATURE_XMM2)) {
                unsigned long long val;
                int ret;
index cd0fda1fff6d3800fbbbf59a19711eba2df0f96c..405a9a61bb895f33c02ca738010f8c140fbb9a0e 100644 (file)
@@ -27,6 +27,7 @@
 #include <asm/pgtable.h>
 #include <asm/set_memory.h>
 #include <asm/intel-family.h>
+#include <asm/hypervisor.h>
 
 static void __init spectre_v2_select_mitigation(void);
 static void __init ssb_select_mitigation(void);
@@ -129,6 +130,7 @@ static const char *spectre_v2_strings[] = {
        [SPECTRE_V2_RETPOLINE_MINIMAL_AMD]      = "Vulnerable: Minimal AMD ASM retpoline",
        [SPECTRE_V2_RETPOLINE_GENERIC]          = "Mitigation: Full generic retpoline",
        [SPECTRE_V2_RETPOLINE_AMD]              = "Mitigation: Full AMD retpoline",
+       [SPECTRE_V2_IBRS_ENHANCED]              = "Mitigation: Enhanced IBRS",
 };
 
 #undef pr_fmt
@@ -154,7 +156,8 @@ x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest)
                guestval |= guest_spec_ctrl & x86_spec_ctrl_mask;
 
                /* SSBD controlled in MSR_SPEC_CTRL */
-               if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD))
+               if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
+                   static_cpu_has(X86_FEATURE_AMD_SSBD))
                        hostval |= ssbd_tif_to_spec_ctrl(ti->flags);
 
                if (hostval != guestval) {
@@ -311,23 +314,6 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
        return cmd;
 }
 
-/* Check for Skylake-like CPUs (for RSB handling) */
-static bool __init is_skylake_era(void)
-{
-       if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
-           boot_cpu_data.x86 == 6) {
-               switch (boot_cpu_data.x86_model) {
-               case INTEL_FAM6_SKYLAKE_MOBILE:
-               case INTEL_FAM6_SKYLAKE_DESKTOP:
-               case INTEL_FAM6_SKYLAKE_X:
-               case INTEL_FAM6_KABYLAKE_MOBILE:
-               case INTEL_FAM6_KABYLAKE_DESKTOP:
-                       return true;
-               }
-       }
-       return false;
-}
-
 static void __init spectre_v2_select_mitigation(void)
 {
        enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
@@ -347,6 +333,13 @@ static void __init spectre_v2_select_mitigation(void)
 
        case SPECTRE_V2_CMD_FORCE:
        case SPECTRE_V2_CMD_AUTO:
+               if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) {
+                       mode = SPECTRE_V2_IBRS_ENHANCED;
+                       /* Force it so VMEXIT will restore correctly */
+                       x86_spec_ctrl_base |= SPEC_CTRL_IBRS;
+                       wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
+                       goto specv2_set_mode;
+               }
                if (IS_ENABLED(CONFIG_RETPOLINE))
                        goto retpoline_auto;
                break;
@@ -384,26 +377,20 @@ retpoline_auto:
                setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
        }
 
+specv2_set_mode:
        spectre_v2_enabled = mode;
        pr_info("%s\n", spectre_v2_strings[mode]);
 
        /*
-        * If neither SMEP nor PTI are available, there is a risk of
-        * hitting userspace addresses in the RSB after a context switch
-        * from a shallow call stack to a deeper one. To prevent this fill
-        * the entire RSB, even when using IBRS.
+        * If spectre v2 protection has been enabled, unconditionally fill
+        * RSB during a context switch; this protects against two independent
+        * issues:
         *
-        * Skylake era CPUs have a separate issue with *underflow* of the
-        * RSB, when they will predict 'ret' targets from the generic BTB.
-        * The proper mitigation for this is IBRS. If IBRS is not supported
-        * or deactivated in favour of retpolines the RSB fill on context
-        * switch is required.
+        *      - RSB underflow (and switch to BTB) on Skylake+
+        *      - SpectreRSB variant of spectre v2 on X86_BUG_SPECTRE_V2 CPUs
         */
-       if ((!boot_cpu_has(X86_FEATURE_PTI) &&
-            !boot_cpu_has(X86_FEATURE_SMEP)) || is_skylake_era()) {
-               setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
-               pr_info("Spectre v2 mitigation: Filling RSB on context switch\n");
-       }
+       setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
+       pr_info("Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch\n");
 
        /* Initialize Indirect Branch Prediction Barrier if supported */
        if (boot_cpu_has(X86_FEATURE_IBPB)) {
@@ -413,9 +400,16 @@ retpoline_auto:
 
        /*
         * Retpoline means the kernel is safe because it has no indirect
-        * branches. But firmware isn't, so use IBRS to protect that.
+        * branches. Enhanced IBRS protects firmware too, so, enable restricted
+        * speculation around firmware calls only when Enhanced IBRS isn't
+        * supported.
+        *
+        * Use "mode" to check Enhanced IBRS instead of boot_cpu_has(), because
+        * the user might select retpoline on the kernel command line and if
+        * the CPU supports Enhanced IBRS, kernel might un-intentionally not
+        * enable IBRS around firmware calls.
         */
-       if (boot_cpu_has(X86_FEATURE_IBRS)) {
+       if (boot_cpu_has(X86_FEATURE_IBRS) && mode != SPECTRE_V2_IBRS_ENHANCED) {
                setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW);
                pr_info("Enabling Restricted Speculation for firmware calls\n");
        }
@@ -532,9 +526,10 @@ static enum ssb_mitigation __init __ssb_select_mitigation(void)
                 * Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD may
                 * use a completely different MSR and bit dependent on family.
                 */
-               if (!static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
+               if (!static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) &&
+                   !static_cpu_has(X86_FEATURE_AMD_SSBD)) {
                        x86_amd_ssb_disable();
-               else {
+               else {
                        x86_spec_ctrl_base |= SPEC_CTRL_SSBD;
                        x86_spec_ctrl_mask |= SPEC_CTRL_SSBD;
                        wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
@@ -664,6 +659,9 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr
                if (boot_cpu_has(X86_FEATURE_PTI))
                        return sprintf(buf, "Mitigation: PTI\n");
 
+               if (hypervisor_is_type(X86_HYPER_XEN_PV))
+                       return sprintf(buf, "Unknown (XEN PV detected, hypervisor mitigation required)\n");
+
                break;
 
        case X86_BUG_SPECTRE_V1:
index 38354c66df81144b7d2998ee42fce7a6b15485cd..0c5fcbd998cf11badefad906a2122400a3512d58 100644 (file)
@@ -671,7 +671,7 @@ void cacheinfo_amd_init_llc_id(struct cpuinfo_x86 *c, int cpu, u8 node_id)
                        num_sharing_cache = ((eax >> 14) & 0xfff) + 1;
 
                if (num_sharing_cache) {
-                       int bits = get_count_order(num_sharing_cache) - 1;
+                       int bits = get_count_order(num_sharing_cache);
 
                        per_cpu(cpu_llc_id, cpu) = c->apicid >> bits;
                }
index 0df7151cfef42cb908c9d76f0b4e78db1620f615..ba6b8bb1c036575933ed13ea77b0ffc909532d92 100644 (file)
@@ -1,3 +1,6 @@
+/* cpu_feature_enabled() cannot be used this early */
+#define USE_EARLY_PGTABLE_L5
+
 #include <linux/bootmem.h>
 #include <linux/linkage.h>
 #include <linux/bitops.h>
@@ -1002,6 +1005,9 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
           !cpu_has(c, X86_FEATURE_AMD_SSB_NO))
                setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS);
 
+       if (ia32_cap & ARCH_CAP_IBRS_ALL)
+               setup_force_cpu_cap(X86_FEATURE_IBRS_ENHANCED);
+
        if (x86_match_cpu(cpu_no_meltdown))
                return;
 
@@ -1012,6 +1018,24 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
        setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);
 }
 
+/*
+ * The NOPL instruction is supposed to exist on all CPUs of family >= 6;
+ * unfortunately, that's not true in practice because of early VIA
+ * chips and (more importantly) broken virtualizers that are not easy
+ * to detect. In the latter case it doesn't even *fail* reliably, so
+ * probing for it doesn't even work. Disable it completely on 32-bit
+ * unless we can find a reliable way to detect all the broken cases.
+ * Enable it explicitly on 64-bit for non-constant inputs of cpu_has().
+ */
+static void detect_nopl(void)
+{
+#ifdef CONFIG_X86_32
+       setup_clear_cpu_cap(X86_FEATURE_NOPL);
+#else
+       setup_force_cpu_cap(X86_FEATURE_NOPL);
+#endif
+}
+
 /*
  * Do minimum CPU detection early.
  * Fields really needed: vendor, cpuid_level, family, model, mask,
@@ -1086,6 +1110,8 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
         */
        if (!pgtable_l5_enabled())
                setup_clear_cpu_cap(X86_FEATURE_LA57);
+
+       detect_nopl();
 }
 
 void __init early_cpu_init(void)
@@ -1121,24 +1147,6 @@ void __init early_cpu_init(void)
        early_identify_cpu(&boot_cpu_data);
 }
 
-/*
- * The NOPL instruction is supposed to exist on all CPUs of family >= 6;
- * unfortunately, that's not true in practice because of early VIA
- * chips and (more importantly) broken virtualizers that are not easy
- * to detect. In the latter case it doesn't even *fail* reliably, so
- * probing for it doesn't even work. Disable it completely on 32-bit
- * unless we can find a reliable way to detect all the broken cases.
- * Enable it explicitly on 64-bit for non-constant inputs of cpu_has().
- */
-static void detect_nopl(struct cpuinfo_x86 *c)
-{
-#ifdef CONFIG_X86_32
-       clear_cpu_cap(c, X86_FEATURE_NOPL);
-#else
-       set_cpu_cap(c, X86_FEATURE_NOPL);
-#endif
-}
-
 static void detect_null_seg_behavior(struct cpuinfo_x86 *c)
 {
 #ifdef CONFIG_X86_64
@@ -1201,8 +1209,6 @@ static void generic_identify(struct cpuinfo_x86 *c)
 
        get_model_name(c); /* Default name */
 
-       detect_nopl(c);
-
        detect_null_seg_behavior(c);
 
        /*
@@ -1801,11 +1807,12 @@ void cpu_init(void)
        enter_lazy_tlb(&init_mm, curr);
 
        /*
-        * Initialize the TSS.  Don't bother initializing sp0, as the initial
-        * task never enters user mode.
+        * Initialize the TSS.  sp0 points to the entry trampoline stack
+        * regardless of what task is running.
         */
        set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss);
        load_TR_desc();
+       load_sp0((unsigned long)(cpu_entry_stack(cpu) + 1));
 
        load_mm_ldt(&init_mm);
 
index eb75564f2d257bfee92b36f311a1ae37133f2721..c050cd6066af0068ce6e08a5f1c8dbd7287addaf 100644 (file)
@@ -465,14 +465,17 @@ static void detect_vmx_virtcap(struct cpuinfo_x86 *c)
 #define X86_VMX_FEATURE_PROC_CTLS2_VIRT_APIC   0x00000001
 #define X86_VMX_FEATURE_PROC_CTLS2_EPT         0x00000002
 #define X86_VMX_FEATURE_PROC_CTLS2_VPID                0x00000020
+#define x86_VMX_FEATURE_EPT_CAP_AD             0x00200000
 
        u32 vmx_msr_low, vmx_msr_high, msr_ctl, msr_ctl2;
+       u32 msr_vpid_cap, msr_ept_cap;
 
        clear_cpu_cap(c, X86_FEATURE_TPR_SHADOW);
        clear_cpu_cap(c, X86_FEATURE_VNMI);
        clear_cpu_cap(c, X86_FEATURE_FLEXPRIORITY);
        clear_cpu_cap(c, X86_FEATURE_EPT);
        clear_cpu_cap(c, X86_FEATURE_VPID);
+       clear_cpu_cap(c, X86_FEATURE_EPT_AD);
 
        rdmsr(MSR_IA32_VMX_PROCBASED_CTLS, vmx_msr_low, vmx_msr_high);
        msr_ctl = vmx_msr_high | vmx_msr_low;
@@ -487,8 +490,13 @@ static void detect_vmx_virtcap(struct cpuinfo_x86 *c)
                if ((msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_VIRT_APIC) &&
                    (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW))
                        set_cpu_cap(c, X86_FEATURE_FLEXPRIORITY);
-               if (msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_EPT)
+               if (msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_EPT) {
                        set_cpu_cap(c, X86_FEATURE_EPT);
+                       rdmsr(MSR_IA32_VMX_EPT_VPID_CAP,
+                             msr_ept_cap, msr_vpid_cap);
+                       if (msr_ept_cap & x86_VMX_FEATURE_EPT_CAP_AD)
+                               set_cpu_cap(c, X86_FEATURE_EPT_AD);
+               }
                if (msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_VPID)
                        set_cpu_cap(c, X86_FEATURE_VPID);
        }
index ec4754f81cbdcdf4971cafe73698500360da3934..abb71ac704433cea9f2eca68b8f3ed60566c22da 100644 (file)
@@ -859,6 +859,8 @@ static __init bool get_rdt_resources(void)
        return (rdt_mon_capable || rdt_alloc_capable);
 }
 
+static enum cpuhp_state rdt_online;
+
 static int __init intel_rdt_late_init(void)
 {
        struct rdt_resource *r;
@@ -880,6 +882,7 @@ static int __init intel_rdt_late_init(void)
                cpuhp_remove_state(state);
                return ret;
        }
+       rdt_online = state;
 
        for_each_alloc_capable_rdt_resource(r)
                pr_info("Intel RDT %s allocation detected\n", r->name);
@@ -891,3 +894,11 @@ static int __init intel_rdt_late_init(void)
 }
 
 late_initcall(intel_rdt_late_init);
+
+static void __exit intel_rdt_exit(void)
+{
+       cpuhp_remove_state(rdt_online);
+       rdtgroup_exit();
+}
+
+__exitcall(intel_rdt_exit);
index 39752825e3760cbcff2c09efc0963e621076653a..4e588f36228f8731b08dffc501288700f481cafe 100644 (file)
@@ -80,6 +80,34 @@ enum rdt_group_type {
        RDT_NUM_GROUP,
 };
 
+/**
+ * enum rdtgrp_mode - Mode of a RDT resource group
+ * @RDT_MODE_SHAREABLE: This resource group allows sharing of its allocations
+ * @RDT_MODE_EXCLUSIVE: No sharing of this resource group's allocations allowed
+ * @RDT_MODE_PSEUDO_LOCKSETUP: Resource group will be used for Pseudo-Locking
+ * @RDT_MODE_PSEUDO_LOCKED: No sharing of this resource group's allocations
+ *                          allowed AND the allocations are Cache Pseudo-Locked
+ *
+ * The mode of a resource group enables control over the allowed overlap
+ * between allocations associated with different resource groups (classes
+ * of service). User is able to modify the mode of a resource group by
+ * writing to the "mode" resctrl file associated with the resource group.
+ *
+ * The "shareable", "exclusive", and "pseudo-locksetup" modes are set by
+ * writing the appropriate text to the "mode" file. A resource group enters
+ * "pseudo-locked" mode after the schemata is written while the resource
+ * group is in "pseudo-locksetup" mode.
+ */
+enum rdtgrp_mode {
+       RDT_MODE_SHAREABLE = 0,
+       RDT_MODE_EXCLUSIVE,
+       RDT_MODE_PSEUDO_LOCKSETUP,
+       RDT_MODE_PSEUDO_LOCKED,
+
+       /* Must be last */
+       RDT_NUM_MODES,
+};
+
 /**
  * struct mongroup - store mon group's data in resctrl fs.
  * @mon_data_kn                kernlfs node for the mon_data directory
@@ -94,6 +122,43 @@ struct mongroup {
        u32                     rmid;
 };
 
+/**
+ * struct pseudo_lock_region - pseudo-lock region information
+ * @r:                 RDT resource to which this pseudo-locked region
+ *                     belongs
+ * @d:                 RDT domain to which this pseudo-locked region
+ *                     belongs
+ * @cbm:               bitmask of the pseudo-locked region
+ * @lock_thread_wq:    waitqueue used to wait on the pseudo-locking thread
+ *                     completion
+ * @thread_done:       variable used by waitqueue to test if pseudo-locking
+ *                     thread completed
+ * @cpu:               core associated with the cache on which the setup code
+ *                     will be run
+ * @line_size:         size of the cache lines
+ * @size:              size of pseudo-locked region in bytes
+ * @kmem:              the kernel memory associated with pseudo-locked region
+ * @minor:             minor number of character device associated with this
+ *                     region
+ * @debugfs_dir:       pointer to this region's directory in the debugfs
+ *                     filesystem
+ * @pm_reqs:           Power management QoS requests related to this region
+ */
+struct pseudo_lock_region {
+       struct rdt_resource     *r;
+       struct rdt_domain       *d;
+       u32                     cbm;
+       wait_queue_head_t       lock_thread_wq;
+       int                     thread_done;
+       int                     cpu;
+       unsigned int            line_size;
+       unsigned int            size;
+       void                    *kmem;
+       unsigned int            minor;
+       struct dentry           *debugfs_dir;
+       struct list_head        pm_reqs;
+};
+
 /**
  * struct rdtgroup - store rdtgroup's data in resctrl file system.
  * @kn:                                kernfs node
@@ -106,16 +171,20 @@ struct mongroup {
  * @type:                      indicates type of this rdtgroup - either
  *                             monitor only or ctrl_mon group
  * @mon:                       mongroup related data
+ * @mode:                      mode of resource group
+ * @plr:                       pseudo-locked region
  */
 struct rdtgroup {
-       struct kernfs_node      *kn;
-       struct list_head        rdtgroup_list;
-       u32                     closid;
-       struct cpumask          cpu_mask;
-       int                     flags;
-       atomic_t                waitcount;
-       enum rdt_group_type     type;
-       struct mongroup         mon;
+       struct kernfs_node              *kn;
+       struct list_head                rdtgroup_list;
+       u32                             closid;
+       struct cpumask                  cpu_mask;
+       int                             flags;
+       atomic_t                        waitcount;
+       enum rdt_group_type             type;
+       struct mongroup                 mon;
+       enum rdtgrp_mode                mode;
+       struct pseudo_lock_region       *plr;
 };
 
 /* rdtgroup.flags */
@@ -148,6 +217,7 @@ extern struct list_head rdt_all_groups;
 extern int max_name_width, max_data_width;
 
 int __init rdtgroup_init(void);
+void __exit rdtgroup_exit(void);
 
 /**
  * struct rftype - describe each file in the resctrl file system
@@ -216,22 +286,24 @@ struct mbm_state {
  * @mbps_val:  When mba_sc is enabled, this holds the bandwidth in MBps
  * @new_ctrl:  new ctrl value to be loaded
  * @have_new_ctrl: did user provide new_ctrl for this domain
+ * @plr:       pseudo-locked region (if any) associated with domain
  */
 struct rdt_domain {
-       struct list_head        list;
-       int                     id;
-       struct cpumask          cpu_mask;
-       unsigned long           *rmid_busy_llc;
-       struct mbm_state        *mbm_total;
-       struct mbm_state        *mbm_local;
-       struct delayed_work     mbm_over;
-       struct delayed_work     cqm_limbo;
-       int                     mbm_work_cpu;
-       int                     cqm_work_cpu;
-       u32                     *ctrl_val;
-       u32                     *mbps_val;
-       u32                     new_ctrl;
-       bool                    have_new_ctrl;
+       struct list_head                list;
+       int                             id;
+       struct cpumask                  cpu_mask;
+       unsigned long                   *rmid_busy_llc;
+       struct mbm_state                *mbm_total;
+       struct mbm_state                *mbm_local;
+       struct delayed_work             mbm_over;
+       struct delayed_work             cqm_limbo;
+       int                             mbm_work_cpu;
+       int                             cqm_work_cpu;
+       u32                             *ctrl_val;
+       u32                             *mbps_val;
+       u32                             new_ctrl;
+       bool                            have_new_ctrl;
+       struct pseudo_lock_region       *plr;
 };
 
 /**
@@ -351,7 +423,7 @@ struct rdt_resource {
        struct rdt_cache        cache;
        struct rdt_membw        membw;
        const char              *format_str;
-       int (*parse_ctrlval)    (char *buf, struct rdt_resource *r,
+       int (*parse_ctrlval)    (void *data, struct rdt_resource *r,
                                 struct rdt_domain *d);
        struct list_head        evt_list;
        int                     num_rmid;
@@ -359,8 +431,8 @@ struct rdt_resource {
        unsigned long           fflags;
 };
 
-int parse_cbm(char *buf, struct rdt_resource *r, struct rdt_domain *d);
-int parse_bw(char *buf, struct rdt_resource *r,  struct rdt_domain *d);
+int parse_cbm(void *_data, struct rdt_resource *r, struct rdt_domain *d);
+int parse_bw(void *_buf, struct rdt_resource *r,  struct rdt_domain *d);
 
 extern struct mutex rdtgroup_mutex;
 
@@ -368,7 +440,7 @@ extern struct rdt_resource rdt_resources_all[];
 extern struct rdtgroup rdtgroup_default;
 DECLARE_STATIC_KEY_FALSE(rdt_alloc_enable_key);
 
-int __init rdtgroup_init(void);
+extern struct dentry *debugfs_resctrl;
 
 enum {
        RDT_RESOURCE_L3,
@@ -439,13 +511,32 @@ void rdt_last_cmd_printf(const char *fmt, ...);
 void rdt_ctrl_update(void *arg);
 struct rdtgroup *rdtgroup_kn_lock_live(struct kernfs_node *kn);
 void rdtgroup_kn_unlock(struct kernfs_node *kn);
+int rdtgroup_kn_mode_restrict(struct rdtgroup *r, const char *name);
+int rdtgroup_kn_mode_restore(struct rdtgroup *r, const char *name,
+                            umode_t mask);
 struct rdt_domain *rdt_find_domain(struct rdt_resource *r, int id,
                                   struct list_head **pos);
 ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
                                char *buf, size_t nbytes, loff_t off);
 int rdtgroup_schemata_show(struct kernfs_open_file *of,
                           struct seq_file *s, void *v);
+bool rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d,
+                          u32 _cbm, int closid, bool exclusive);
+unsigned int rdtgroup_cbm_to_size(struct rdt_resource *r, struct rdt_domain *d,
+                                 u32 cbm);
+enum rdtgrp_mode rdtgroup_mode_by_closid(int closid);
+int rdtgroup_tasks_assigned(struct rdtgroup *r);
+int rdtgroup_locksetup_enter(struct rdtgroup *rdtgrp);
+int rdtgroup_locksetup_exit(struct rdtgroup *rdtgrp);
+bool rdtgroup_cbm_overlaps_pseudo_locked(struct rdt_domain *d, u32 _cbm);
+bool rdtgroup_pseudo_locked_in_hierarchy(struct rdt_domain *d);
+int rdt_pseudo_lock_init(void);
+void rdt_pseudo_lock_release(void);
+int rdtgroup_pseudo_lock_create(struct rdtgroup *rdtgrp);
+void rdtgroup_pseudo_lock_remove(struct rdtgroup *rdtgrp);
 struct rdt_domain *get_domain_from_cpu(int cpu, struct rdt_resource *r);
+int update_domains(struct rdt_resource *r, int closid);
+void closid_free(int closid);
 int alloc_rmid(void);
 void free_rmid(u32 rmid);
 int rdt_get_mon_l3_config(struct rdt_resource *r);
index 116d57b248d3cae479c47848766a730e620aa253..af358ca0516057c8fa7945272867afd737125654 100644 (file)
@@ -64,9 +64,10 @@ static bool bw_validate(char *buf, unsigned long *data, struct rdt_resource *r)
        return true;
 }
 
-int parse_bw(char *buf, struct rdt_resource *r, struct rdt_domain *d)
+int parse_bw(void *_buf, struct rdt_resource *r, struct rdt_domain *d)
 {
        unsigned long data;
+       char *buf = _buf;
 
        if (d->have_new_ctrl) {
                rdt_last_cmd_printf("duplicate domain %d\n", d->id);
@@ -87,7 +88,7 @@ int parse_bw(char *buf, struct rdt_resource *r, struct rdt_domain *d)
  *     are allowed (e.g. FFFFH, 0FF0H, 003CH, etc.).
  * Additionally Haswell requires at least two bits set.
  */
-static bool cbm_validate(char *buf, unsigned long *data, struct rdt_resource *r)
+static bool cbm_validate(char *buf, u32 *data, struct rdt_resource *r)
 {
        unsigned long first_bit, zero_bit, val;
        unsigned int cbm_len = r->cache.cbm_len;
@@ -122,22 +123,64 @@ static bool cbm_validate(char *buf, unsigned long *data, struct rdt_resource *r)
        return true;
 }
 
+struct rdt_cbm_parse_data {
+       struct rdtgroup         *rdtgrp;
+       char                    *buf;
+};
+
 /*
  * Read one cache bit mask (hex). Check that it is valid for the current
  * resource type.
  */
-int parse_cbm(char *buf, struct rdt_resource *r, struct rdt_domain *d)
+int parse_cbm(void *_data, struct rdt_resource *r, struct rdt_domain *d)
 {
-       unsigned long data;
+       struct rdt_cbm_parse_data *data = _data;
+       struct rdtgroup *rdtgrp = data->rdtgrp;
+       u32 cbm_val;
 
        if (d->have_new_ctrl) {
                rdt_last_cmd_printf("duplicate domain %d\n", d->id);
                return -EINVAL;
        }
 
-       if(!cbm_validate(buf, &data, r))
+       /*
+        * Cannot set up more than one pseudo-locked region in a cache
+        * hierarchy.
+        */
+       if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP &&
+           rdtgroup_pseudo_locked_in_hierarchy(d)) {
+               rdt_last_cmd_printf("pseudo-locked region in hierarchy\n");
                return -EINVAL;
-       d->new_ctrl = data;
+       }
+
+       if (!cbm_validate(data->buf, &cbm_val, r))
+               return -EINVAL;
+
+       if ((rdtgrp->mode == RDT_MODE_EXCLUSIVE ||
+            rdtgrp->mode == RDT_MODE_SHAREABLE) &&
+           rdtgroup_cbm_overlaps_pseudo_locked(d, cbm_val)) {
+               rdt_last_cmd_printf("CBM overlaps with pseudo-locked region\n");
+               return -EINVAL;
+       }
+
+       /*
+        * The CBM may not overlap with the CBM of another closid if
+        * either is exclusive.
+        */
+       if (rdtgroup_cbm_overlaps(r, d, cbm_val, rdtgrp->closid, true)) {
+               rdt_last_cmd_printf("overlaps with exclusive group\n");
+               return -EINVAL;
+       }
+
+       if (rdtgroup_cbm_overlaps(r, d, cbm_val, rdtgrp->closid, false)) {
+               if (rdtgrp->mode == RDT_MODE_EXCLUSIVE ||
+                   rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
+                       rdt_last_cmd_printf("overlaps with other group\n");
+                       return -EINVAL;
+               }
+       }
+
+       d->new_ctrl = cbm_val;
        d->have_new_ctrl = true;
 
        return 0;
@@ -149,8 +192,10 @@ int parse_cbm(char *buf, struct rdt_resource *r, struct rdt_domain *d)
  * separated by ";". The "id" is in decimal, and must match one of
  * the "id"s for this resource.
  */
-static int parse_line(char *line, struct rdt_resource *r)
+static int parse_line(char *line, struct rdt_resource *r,
+                     struct rdtgroup *rdtgrp)
 {
+       struct rdt_cbm_parse_data data;
        char *dom = NULL, *id;
        struct rdt_domain *d;
        unsigned long dom_id;
@@ -167,15 +212,32 @@ next:
        dom = strim(dom);
        list_for_each_entry(d, &r->domains, list) {
                if (d->id == dom_id) {
-                       if (r->parse_ctrlval(dom, r, d))
+                       data.buf = dom;
+                       data.rdtgrp = rdtgrp;
+                       if (r->parse_ctrlval(&data, r, d))
                                return -EINVAL;
+                       if (rdtgrp->mode ==  RDT_MODE_PSEUDO_LOCKSETUP) {
+                               /*
+                                * In pseudo-locking setup mode and just
+                                * parsed a valid CBM that should be
+                                * pseudo-locked. Only one locked region per
+                                * resource group and domain so just do
+                                * the required initialization for single
+                                * region and return.
+                                */
+                               rdtgrp->plr->r = r;
+                               rdtgrp->plr->d = d;
+                               rdtgrp->plr->cbm = d->new_ctrl;
+                               d->plr = rdtgrp->plr;
+                               return 0;
+                       }
                        goto next;
                }
        }
        return -EINVAL;
 }
 
-static int update_domains(struct rdt_resource *r, int closid)
+int update_domains(struct rdt_resource *r, int closid)
 {
        struct msr_param msr_param;
        cpumask_var_t cpu_mask;
@@ -220,13 +282,14 @@ done:
        return 0;
 }
 
-static int rdtgroup_parse_resource(char *resname, char *tok, int closid)
+static int rdtgroup_parse_resource(char *resname, char *tok,
+                                  struct rdtgroup *rdtgrp)
 {
        struct rdt_resource *r;
 
        for_each_alloc_enabled_rdt_resource(r) {
-               if (!strcmp(resname, r->name) && closid < r->num_closid)
-                       return parse_line(tok, r);
+               if (!strcmp(resname, r->name) && rdtgrp->closid < r->num_closid)
+                       return parse_line(tok, r, rdtgrp);
        }
        rdt_last_cmd_printf("unknown/unsupported resource name '%s'\n", resname);
        return -EINVAL;
@@ -239,7 +302,7 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
        struct rdt_domain *dom;
        struct rdt_resource *r;
        char *tok, *resname;
-       int closid, ret = 0;
+       int ret = 0;
 
        /* Valid input requires a trailing newline */
        if (nbytes == 0 || buf[nbytes - 1] != '\n')
@@ -253,7 +316,15 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
        }
        rdt_last_cmd_clear();
 
-       closid = rdtgrp->closid;
+       /*
+        * No changes to pseudo-locked region allowed. It has to be removed
+        * and re-created instead.
+        */
+       if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) {
+               ret = -EINVAL;
+               rdt_last_cmd_puts("resource group is pseudo-locked\n");
+               goto out;
+       }
 
        for_each_alloc_enabled_rdt_resource(r) {
                list_for_each_entry(dom, &r->domains, list)
@@ -272,17 +343,27 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
                        ret = -EINVAL;
                        goto out;
                }
-               ret = rdtgroup_parse_resource(resname, tok, closid);
+               ret = rdtgroup_parse_resource(resname, tok, rdtgrp);
                if (ret)
                        goto out;
        }
 
        for_each_alloc_enabled_rdt_resource(r) {
-               ret = update_domains(r, closid);
+               ret = update_domains(r, rdtgrp->closid);
                if (ret)
                        goto out;
        }
 
+       if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
+               /*
+                * If pseudo-locking fails we keep the resource group in
+                * mode RDT_MODE_PSEUDO_LOCKSETUP with its class of service
+                * active and updated for just the domain the pseudo-locked
+                * region was requested for.
+                */
+               ret = rdtgroup_pseudo_lock_create(rdtgrp);
+       }
+
 out:
        rdtgroup_kn_unlock(of->kn);
        return ret ?: nbytes;
@@ -318,10 +399,18 @@ int rdtgroup_schemata_show(struct kernfs_open_file *of,
 
        rdtgrp = rdtgroup_kn_lock_live(of->kn);
        if (rdtgrp) {
-               closid = rdtgrp->closid;
-               for_each_alloc_enabled_rdt_resource(r) {
-                       if (closid < r->num_closid)
-                               show_doms(s, r, closid);
+               if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
+                       for_each_alloc_enabled_rdt_resource(r)
+                               seq_printf(s, "%s:uninitialized\n", r->name);
+               } else if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) {
+                       seq_printf(s, "%s:%d=%x\n", rdtgrp->plr->r->name,
+                                  rdtgrp->plr->d->id, rdtgrp->plr->cbm);
+               } else {
+                       closid = rdtgrp->closid;
+                       for_each_alloc_enabled_rdt_resource(r) {
+                               if (closid < r->num_closid)
+                                       show_doms(s, r, closid);
+                       }
                }
        } else {
                ret = -ENOENT;
diff --git a/arch/x86/kernel/cpu/intel_rdt_pseudo_lock.c b/arch/x86/kernel/cpu/intel_rdt_pseudo_lock.c
new file mode 100644 (file)
index 0000000..40f3903
--- /dev/null
@@ -0,0 +1,1522 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Resource Director Technology (RDT)
+ *
+ * Pseudo-locking support built on top of Cache Allocation Technology (CAT)
+ *
+ * Copyright (C) 2018 Intel Corporation
+ *
+ * Author: Reinette Chatre <reinette.chatre@intel.com>
+ */
+
+#define pr_fmt(fmt)    KBUILD_MODNAME ": " fmt
+
+#include <linux/cacheinfo.h>
+#include <linux/cpu.h>
+#include <linux/cpumask.h>
+#include <linux/debugfs.h>
+#include <linux/kthread.h>
+#include <linux/mman.h>
+#include <linux/pm_qos.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+
+#include <asm/cacheflush.h>
+#include <asm/intel-family.h>
+#include <asm/intel_rdt_sched.h>
+#include <asm/perf_event.h>
+
+#include "intel_rdt.h"
+
+#define CREATE_TRACE_POINTS
+#include "intel_rdt_pseudo_lock_event.h"
+
+/*
+ * MSR_MISC_FEATURE_CONTROL register enables the modification of hardware
+ * prefetcher state. Details about this register can be found in the MSR
+ * tables for specific platforms found in Intel's SDM.
+ */
+#define MSR_MISC_FEATURE_CONTROL       0x000001a4
+
+/*
+ * The bits needed to disable hardware prefetching varies based on the
+ * platform. During initialization we will discover which bits to use.
+ */
+static u64 prefetch_disable_bits;
+
+/*
+ * Major number assigned to and shared by all devices exposing
+ * pseudo-locked regions.
+ */
+static unsigned int pseudo_lock_major;
+static unsigned long pseudo_lock_minor_avail = GENMASK(MINORBITS, 0);
+static struct class *pseudo_lock_class;
+
+/**
+ * get_prefetch_disable_bits - prefetch disable bits of supported platforms
+ *
+ * Capture the list of platforms that have been validated to support
+ * pseudo-locking. This includes testing to ensure pseudo-locked regions
+ * with low cache miss rates can be created under variety of load conditions
+ * as well as that these pseudo-locked regions can maintain their low cache
+ * miss rates under variety of load conditions for significant lengths of time.
+ *
+ * After a platform has been validated to support pseudo-locking its
+ * hardware prefetch disable bits are included here as they are documented
+ * in the SDM.
+ *
+ * When adding a platform here also add support for its cache events to
+ * measure_cycles_perf_fn()
+ *
+ * Return:
+ * If platform is supported, the bits to disable hardware prefetchers, 0
+ * if platform is not supported.
+ */
+static u64 get_prefetch_disable_bits(void)
+{
+       if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL ||
+           boot_cpu_data.x86 != 6)
+               return 0;
+
+       switch (boot_cpu_data.x86_model) {
+       case INTEL_FAM6_BROADWELL_X:
+               /*
+                * SDM defines bits of MSR_MISC_FEATURE_CONTROL register
+                * as:
+                * 0    L2 Hardware Prefetcher Disable (R/W)
+                * 1    L2 Adjacent Cache Line Prefetcher Disable (R/W)
+                * 2    DCU Hardware Prefetcher Disable (R/W)
+                * 3    DCU IP Prefetcher Disable (R/W)
+                * 63:4 Reserved
+                */
+               return 0xF;
+       case INTEL_FAM6_ATOM_GOLDMONT:
+       case INTEL_FAM6_ATOM_GEMINI_LAKE:
+               /*
+                * SDM defines bits of MSR_MISC_FEATURE_CONTROL register
+                * as:
+                * 0     L2 Hardware Prefetcher Disable (R/W)
+                * 1     Reserved
+                * 2     DCU Hardware Prefetcher Disable (R/W)
+                * 63:3  Reserved
+                */
+               return 0x5;
+       }
+
+       return 0;
+}
+
+/*
+ * Helper to write 64bit value to MSR without tracing. Used when
+ * use of the cache should be restricted and use of registers used
+ * for local variables avoided.
+ */
+static inline void pseudo_wrmsrl_notrace(unsigned int msr, u64 val)
+{
+       __wrmsr(msr, (u32)(val & 0xffffffffULL), (u32)(val >> 32));
+}
+
+/**
+ * pseudo_lock_minor_get - Obtain available minor number
+ * @minor: Pointer to where new minor number will be stored
+ *
+ * A bitmask is used to track available minor numbers. Here the next free
+ * minor number is marked as unavailable and returned.
+ *
+ * Return: 0 on success, <0 on failure.
+ */
+static int pseudo_lock_minor_get(unsigned int *minor)
+{
+       unsigned long first_bit;
+
+       first_bit = find_first_bit(&pseudo_lock_minor_avail, MINORBITS);
+
+       if (first_bit == MINORBITS)
+               return -ENOSPC;
+
+       __clear_bit(first_bit, &pseudo_lock_minor_avail);
+       *minor = first_bit;
+
+       return 0;
+}
+
+/**
+ * pseudo_lock_minor_release - Return minor number to available
+ * @minor: The minor number made available
+ */
+static void pseudo_lock_minor_release(unsigned int minor)
+{
+       __set_bit(minor, &pseudo_lock_minor_avail);
+}
+
+/**
+ * region_find_by_minor - Locate a pseudo-lock region by inode minor number
+ * @minor: The minor number of the device representing pseudo-locked region
+ *
+ * When the character device is accessed we need to determine which
+ * pseudo-locked region it belongs to. This is done by matching the minor
+ * number of the device to the pseudo-locked region it belongs.
+ *
+ * Minor numbers are assigned at the time a pseudo-locked region is associated
+ * with a cache instance.
+ *
+ * Return: On success return pointer to resource group owning the pseudo-locked
+ *         region, NULL on failure.
+ */
+static struct rdtgroup *region_find_by_minor(unsigned int minor)
+{
+       struct rdtgroup *rdtgrp, *rdtgrp_match = NULL;
+
+       list_for_each_entry(rdtgrp, &rdt_all_groups, rdtgroup_list) {
+               if (rdtgrp->plr && rdtgrp->plr->minor == minor) {
+                       rdtgrp_match = rdtgrp;
+                       break;
+               }
+       }
+       return rdtgrp_match;
+}
+
+/**
+ * pseudo_lock_pm_req - A power management QoS request list entry
+ * @list:      Entry within the @pm_reqs list for a pseudo-locked region
+ * @req:       PM QoS request
+ */
+struct pseudo_lock_pm_req {
+       struct list_head list;
+       struct dev_pm_qos_request req;
+};
+
+static void pseudo_lock_cstates_relax(struct pseudo_lock_region *plr)
+{
+       struct pseudo_lock_pm_req *pm_req, *next;
+
+       list_for_each_entry_safe(pm_req, next, &plr->pm_reqs, list) {
+               dev_pm_qos_remove_request(&pm_req->req);
+               list_del(&pm_req->list);
+               kfree(pm_req);
+       }
+}
+
+/**
+ * pseudo_lock_cstates_constrain - Restrict cores from entering C6
+ *
+ * To prevent the cache from being affected by power management entering
+ * C6 has to be avoided. This is accomplished by requesting a latency
+ * requirement lower than lowest C6 exit latency of all supported
+ * platforms as found in the cpuidle state tables in the intel_idle driver.
+ * At this time it is possible to do so with a single latency requirement
+ * for all supported platforms.
+ *
+ * Since Goldmont is supported, which is affected by X86_BUG_MONITOR,
+ * the ACPI latencies need to be considered while keeping in mind that C2
+ * may be set to map to deeper sleep states. In this case the latency
+ * requirement needs to prevent entering C2 also.
+ */
+static int pseudo_lock_cstates_constrain(struct pseudo_lock_region *plr)
+{
+       struct pseudo_lock_pm_req *pm_req;
+       int cpu;
+       int ret;
+
+       for_each_cpu(cpu, &plr->d->cpu_mask) {
+               pm_req = kzalloc(sizeof(*pm_req), GFP_KERNEL);
+               if (!pm_req) {
+                       rdt_last_cmd_puts("fail allocating mem for PM QoS\n");
+                       ret = -ENOMEM;
+                       goto out_err;
+               }
+               ret = dev_pm_qos_add_request(get_cpu_device(cpu),
+                                            &pm_req->req,
+                                            DEV_PM_QOS_RESUME_LATENCY,
+                                            30);
+               if (ret < 0) {
+                       rdt_last_cmd_printf("fail to add latency req cpu%d\n",
+                                           cpu);
+                       kfree(pm_req);
+                       ret = -1;
+                       goto out_err;
+               }
+               list_add(&pm_req->list, &plr->pm_reqs);
+       }
+
+       return 0;
+
+out_err:
+       pseudo_lock_cstates_relax(plr);
+       return ret;
+}
+
+/**
+ * pseudo_lock_region_clear - Reset pseudo-lock region data
+ * @plr: pseudo-lock region
+ *
+ * All content of the pseudo-locked region is reset - any memory allocated
+ * freed.
+ *
+ * Return: void
+ */
+static void pseudo_lock_region_clear(struct pseudo_lock_region *plr)
+{
+       plr->size = 0;
+       plr->line_size = 0;
+       kfree(plr->kmem);
+       plr->kmem = NULL;
+       plr->r = NULL;
+       if (plr->d)
+               plr->d->plr = NULL;
+       plr->d = NULL;
+       plr->cbm = 0;
+       plr->debugfs_dir = NULL;
+}
+
+/**
+ * pseudo_lock_region_init - Initialize pseudo-lock region information
+ * @plr: pseudo-lock region
+ *
+ * Called after user provided a schemata to be pseudo-locked. From the
+ * schemata the &struct pseudo_lock_region is on entry already initialized
+ * with the resource, domain, and capacity bitmask. Here the information
+ * required for pseudo-locking is deduced from this data and &struct
+ * pseudo_lock_region initialized further. This information includes:
+ * - size in bytes of the region to be pseudo-locked
+ * - cache line size to know the stride with which data needs to be accessed
+ *   to be pseudo-locked
+ * - a cpu associated with the cache instance on which the pseudo-locking
+ *   flow can be executed
+ *
+ * Return: 0 on success, <0 on failure. Descriptive error will be written
+ * to last_cmd_status buffer.
+ */
+static int pseudo_lock_region_init(struct pseudo_lock_region *plr)
+{
+       struct cpu_cacheinfo *ci;
+       int ret;
+       int i;
+
+       /* Pick the first cpu we find that is associated with the cache. */
+       plr->cpu = cpumask_first(&plr->d->cpu_mask);
+
+       if (!cpu_online(plr->cpu)) {
+               rdt_last_cmd_printf("cpu %u associated with cache not online\n",
+                                   plr->cpu);
+               ret = -ENODEV;
+               goto out_region;
+       }
+
+       ci = get_cpu_cacheinfo(plr->cpu);
+
+       plr->size = rdtgroup_cbm_to_size(plr->r, plr->d, plr->cbm);
+
+       for (i = 0; i < ci->num_leaves; i++) {
+               if (ci->info_list[i].level == plr->r->cache_level) {
+                       plr->line_size = ci->info_list[i].coherency_line_size;
+                       return 0;
+               }
+       }
+
+       ret = -1;
+       rdt_last_cmd_puts("unable to determine cache line size\n");
+out_region:
+       pseudo_lock_region_clear(plr);
+       return ret;
+}
+
+/**
+ * pseudo_lock_init - Initialize a pseudo-lock region
+ * @rdtgrp: resource group to which new pseudo-locked region will belong
+ *
+ * A pseudo-locked region is associated with a resource group. When this
+ * association is created the pseudo-locked region is initialized. The
+ * details of the pseudo-locked region are not known at this time so only
+ * allocation is done and association established.
+ *
+ * Return: 0 on success, <0 on failure
+ */
+static int pseudo_lock_init(struct rdtgroup *rdtgrp)
+{
+       struct pseudo_lock_region *plr;
+
+       plr = kzalloc(sizeof(*plr), GFP_KERNEL);
+       if (!plr)
+               return -ENOMEM;
+
+       init_waitqueue_head(&plr->lock_thread_wq);
+       INIT_LIST_HEAD(&plr->pm_reqs);
+       rdtgrp->plr = plr;
+       return 0;
+}
+
+/**
+ * pseudo_lock_region_alloc - Allocate kernel memory that will be pseudo-locked
+ * @plr: pseudo-lock region
+ *
+ * Initialize the details required to set up the pseudo-locked region and
+ * allocate the contiguous memory that will be pseudo-locked to the cache.
+ *
+ * Return: 0 on success, <0 on failure.  Descriptive error will be written
+ * to last_cmd_status buffer.
+ */
+static int pseudo_lock_region_alloc(struct pseudo_lock_region *plr)
+{
+       int ret;
+
+       ret = pseudo_lock_region_init(plr);
+       if (ret < 0)
+               return ret;
+
+       /*
+        * We do not yet support contiguous regions larger than
+        * KMALLOC_MAX_SIZE.
+        */
+       if (plr->size > KMALLOC_MAX_SIZE) {
+               rdt_last_cmd_puts("requested region exceeds maximum size\n");
+               ret = -E2BIG;
+               goto out_region;
+       }
+
+       plr->kmem = kzalloc(plr->size, GFP_KERNEL);
+       if (!plr->kmem) {
+               rdt_last_cmd_puts("unable to allocate memory\n");
+               ret = -ENOMEM;
+               goto out_region;
+       }
+
+       ret = 0;
+       goto out;
+out_region:
+       pseudo_lock_region_clear(plr);
+out:
+       return ret;
+}
+
+/**
+ * pseudo_lock_free - Free a pseudo-locked region
+ * @rdtgrp: resource group to which pseudo-locked region belonged
+ *
+ * The pseudo-locked region's resources have already been released, or not
+ * yet created at this point. Now it can be freed and disassociated from the
+ * resource group.
+ *
+ * Return: void
+ */
+static void pseudo_lock_free(struct rdtgroup *rdtgrp)
+{
+       pseudo_lock_region_clear(rdtgrp->plr);
+       kfree(rdtgrp->plr);
+       rdtgrp->plr = NULL;
+}
+
+/**
+ * pseudo_lock_fn - Load kernel memory into cache
+ * @_rdtgrp: resource group to which pseudo-lock region belongs
+ *
+ * This is the core pseudo-locking flow.
+ *
+ * First we ensure that the kernel memory cannot be found in the cache.
+ * Then, while taking care that there will be as little interference as
+ * possible, the memory to be loaded is accessed while core is running
+ * with class of service set to the bitmask of the pseudo-locked region.
+ * After this is complete no future CAT allocations will be allowed to
+ * overlap with this bitmask.
+ *
+ * Local register variables are utilized to ensure that the memory region
+ * to be locked is the only memory access made during the critical locking
+ * loop.
+ *
+ * Return: 0. Waiter on waitqueue will be woken on completion.
+ */
+static int pseudo_lock_fn(void *_rdtgrp)
+{
+       struct rdtgroup *rdtgrp = _rdtgrp;
+       struct pseudo_lock_region *plr = rdtgrp->plr;
+       u32 rmid_p, closid_p;
+       unsigned long i;
+#ifdef CONFIG_KASAN
+       /*
+        * The registers used for local register variables are also used
+        * when KASAN is active. When KASAN is active we use a regular
+        * variable to ensure we always use a valid pointer, but the cost
+        * is that this variable will enter the cache through evicting the
+        * memory we are trying to lock into the cache. Thus expect lower
+        * pseudo-locking success rate when KASAN is active.
+        */
+       unsigned int line_size;
+       unsigned int size;
+       void *mem_r;
+#else
+       register unsigned int line_size asm("esi");
+       register unsigned int size asm("edi");
+#ifdef CONFIG_X86_64
+       register void *mem_r asm("rbx");
+#else
+       register void *mem_r asm("ebx");
+#endif /* CONFIG_X86_64 */
+#endif /* CONFIG_KASAN */
+
+       /*
+        * Make sure none of the allocated memory is cached. If it is we
+        * will get a cache hit in below loop from outside of pseudo-locked
+        * region.
+        * wbinvd (as opposed to clflush/clflushopt) is required to
+        * increase likelihood that allocated cache portion will be filled
+        * with associated memory.
+        */
+       native_wbinvd();
+
+       /*
+        * Always called with interrupts enabled. By disabling interrupts
+        * ensure that we will not be preempted during this critical section.
+        */
+       local_irq_disable();
+
+       /*
+        * Call wrmsr and rdmsr as directly as possible to avoid tracing
+        * clobbering local register variables or affecting cache accesses.
+        *
+        * Disable the hardware prefetcher so that when the end of the memory
+        * being pseudo-locked is reached the hardware will not read beyond
+        * the buffer and evict pseudo-locked memory read earlier from the
+        * cache.
+        */
+       __wrmsr(MSR_MISC_FEATURE_CONTROL, prefetch_disable_bits, 0x0);
+       closid_p = this_cpu_read(pqr_state.cur_closid);
+       rmid_p = this_cpu_read(pqr_state.cur_rmid);
+       mem_r = plr->kmem;
+       size = plr->size;
+       line_size = plr->line_size;
+       /*
+        * Critical section begin: start by writing the closid associated
+        * with the capacity bitmask of the cache region being
+        * pseudo-locked followed by reading of kernel memory to load it
+        * into the cache.
+        */
+       __wrmsr(IA32_PQR_ASSOC, rmid_p, rdtgrp->closid);
+       /*
+        * Cache was flushed earlier. Now access kernel memory to read it
+        * into cache region associated with just activated plr->closid.
+        * Loop over data twice:
+        * - In first loop the cache region is shared with the page walker
+        *   as it populates the paging structure caches (including TLB).
+        * - In the second loop the paging structure caches are used and
+        *   cache region is populated with the memory being referenced.
+        */
+       for (i = 0; i < size; i += PAGE_SIZE) {
+               /*
+                * Add a barrier to prevent speculative execution of this
+                * loop reading beyond the end of the buffer.
+                */
+               rmb();
+               asm volatile("mov (%0,%1,1), %%eax\n\t"
+                       :
+                       : "r" (mem_r), "r" (i)
+                       : "%eax", "memory");
+       }
+       for (i = 0; i < size; i += line_size) {
+               /*
+                * Add a barrier to prevent speculative execution of this
+                * loop reading beyond the end of the buffer.
+                */
+               rmb();
+               asm volatile("mov (%0,%1,1), %%eax\n\t"
+                       :
+                       : "r" (mem_r), "r" (i)
+                       : "%eax", "memory");
+       }
+       /*
+        * Critical section end: restore closid with capacity bitmask that
+        * does not overlap with pseudo-locked region.
+        */
+       __wrmsr(IA32_PQR_ASSOC, rmid_p, closid_p);
+
+       /* Re-enable the hardware prefetcher(s) */
+       wrmsr(MSR_MISC_FEATURE_CONTROL, 0x0, 0x0);
+       local_irq_enable();
+
+       plr->thread_done = 1;
+       wake_up_interruptible(&plr->lock_thread_wq);
+       return 0;
+}
+
+/**
+ * rdtgroup_monitor_in_progress - Test if monitoring in progress
+ * @r: resource group being queried
+ *
+ * Return: 1 if monitor groups have been created for this resource
+ * group, 0 otherwise.
+ */
+static int rdtgroup_monitor_in_progress(struct rdtgroup *rdtgrp)
+{
+       return !list_empty(&rdtgrp->mon.crdtgrp_list);
+}
+
+/**
+ * rdtgroup_locksetup_user_restrict - Restrict user access to group
+ * @rdtgrp: resource group needing access restricted
+ *
+ * A resource group used for cache pseudo-locking cannot have cpus or tasks
+ * assigned to it. This is communicated to the user by restricting access
+ * to all the files that can be used to make such changes.
+ *
+ * Permissions restored with rdtgroup_locksetup_user_restore()
+ *
+ * Return: 0 on success, <0 on failure. If a failure occurs during the
+ * restriction of access an attempt will be made to restore permissions but
+ * the state of the mode of these files will be uncertain when a failure
+ * occurs.
+ */
+static int rdtgroup_locksetup_user_restrict(struct rdtgroup *rdtgrp)
+{
+       int ret;
+
+       ret = rdtgroup_kn_mode_restrict(rdtgrp, "tasks");
+       if (ret)
+               return ret;
+
+       ret = rdtgroup_kn_mode_restrict(rdtgrp, "cpus");
+       if (ret)
+               goto err_tasks;
+
+       ret = rdtgroup_kn_mode_restrict(rdtgrp, "cpus_list");
+       if (ret)
+               goto err_cpus;
+
+       if (rdt_mon_capable) {
+               ret = rdtgroup_kn_mode_restrict(rdtgrp, "mon_groups");
+               if (ret)
+                       goto err_cpus_list;
+       }
+
+       ret = 0;
+       goto out;
+
+err_cpus_list:
+       rdtgroup_kn_mode_restore(rdtgrp, "cpus_list", 0777);
+err_cpus:
+       rdtgroup_kn_mode_restore(rdtgrp, "cpus", 0777);
+err_tasks:
+       rdtgroup_kn_mode_restore(rdtgrp, "tasks", 0777);
+out:
+       return ret;
+}
+
+/**
+ * rdtgroup_locksetup_user_restore - Restore user access to group
+ * @rdtgrp: resource group needing access restored
+ *
+ * Restore all file access previously removed using
+ * rdtgroup_locksetup_user_restrict()
+ *
+ * Return: 0 on success, <0 on failure.  If a failure occurs during the
+ * restoration of access an attempt will be made to restrict permissions
+ * again but the state of the mode of these files will be uncertain when
+ * a failure occurs.
+ */
+static int rdtgroup_locksetup_user_restore(struct rdtgroup *rdtgrp)
+{
+       int ret;
+
+       ret = rdtgroup_kn_mode_restore(rdtgrp, "tasks", 0777);
+       if (ret)
+               return ret;
+
+       ret = rdtgroup_kn_mode_restore(rdtgrp, "cpus", 0777);
+       if (ret)
+               goto err_tasks;
+
+       ret = rdtgroup_kn_mode_restore(rdtgrp, "cpus_list", 0777);
+       if (ret)
+               goto err_cpus;
+
+       if (rdt_mon_capable) {
+               ret = rdtgroup_kn_mode_restore(rdtgrp, "mon_groups", 0777);
+               if (ret)
+                       goto err_cpus_list;
+       }
+
+       ret = 0;
+       goto out;
+
+err_cpus_list:
+       rdtgroup_kn_mode_restrict(rdtgrp, "cpus_list");
+err_cpus:
+       rdtgroup_kn_mode_restrict(rdtgrp, "cpus");
+err_tasks:
+       rdtgroup_kn_mode_restrict(rdtgrp, "tasks");
+out:
+       return ret;
+}
+
+/**
+ * rdtgroup_locksetup_enter - Resource group enters locksetup mode
+ * @rdtgrp: resource group requested to enter locksetup mode
+ *
+ * A resource group enters locksetup mode to reflect that it would be used
+ * to represent a pseudo-locked region and is in the process of being set
+ * up to do so. A resource group used for a pseudo-locked region would
+ * lose the closid associated with it so we cannot allow it to have any
+ * tasks or cpus assigned nor permit tasks or cpus to be assigned in the
+ * future. Monitoring of a pseudo-locked region is not allowed either.
+ *
+ * The above and more restrictions on a pseudo-locked region are checked
+ * for and enforced before the resource group enters the locksetup mode.
+ *
+ * Returns: 0 if the resource group successfully entered locksetup mode, <0
+ * on failure. On failure the last_cmd_status buffer is updated with text to
+ * communicate details of failure to the user.
+ */
+int rdtgroup_locksetup_enter(struct rdtgroup *rdtgrp)
+{
+       int ret;
+
+       /*
+        * The default resource group can neither be removed nor lose the
+        * default closid associated with it.
+        */
+       if (rdtgrp == &rdtgroup_default) {
+               rdt_last_cmd_puts("cannot pseudo-lock default group\n");
+               return -EINVAL;
+       }
+
+       /*
+        * Cache Pseudo-locking not supported when CDP is enabled.
+        *
+        * Some things to consider if you would like to enable this
+        * support (using L3 CDP as example):
+        * - When CDP is enabled two separate resources are exposed,
+        *   L3DATA and L3CODE, but they are actually on the same cache.
+        *   The implication for pseudo-locking is that if a
+        *   pseudo-locked region is created on a domain of one
+        *   resource (eg. L3CODE), then a pseudo-locked region cannot
+        *   be created on that same domain of the other resource
+        *   (eg. L3DATA). This is because the creation of a
+        *   pseudo-locked region involves a call to wbinvd that will
+        *   affect all cache allocations on particular domain.
+        * - Considering the previous, it may be possible to only
+        *   expose one of the CDP resources to pseudo-locking and
+        *   hide the other. For example, we could consider to only
+        *   expose L3DATA and since the L3 cache is unified it is
+        *   still possible to place instructions there are execute it.
+        * - If only one region is exposed to pseudo-locking we should
+        *   still keep in mind that availability of a portion of cache
+        *   for pseudo-locking should take into account both resources.
+        *   Similarly, if a pseudo-locked region is created in one
+        *   resource, the portion of cache used by it should be made
+        *   unavailable to all future allocations from both resources.
+        */
+       if (rdt_resources_all[RDT_RESOURCE_L3DATA].alloc_enabled ||
+           rdt_resources_all[RDT_RESOURCE_L2DATA].alloc_enabled) {
+               rdt_last_cmd_puts("CDP enabled\n");
+               return -EINVAL;
+       }
+
+       /*
+        * Not knowing the bits to disable prefetching implies that this
+        * platform does not support Cache Pseudo-Locking.
+        */
+       prefetch_disable_bits = get_prefetch_disable_bits();
+       if (prefetch_disable_bits == 0) {
+               rdt_last_cmd_puts("pseudo-locking not supported\n");
+               return -EINVAL;
+       }
+
+       if (rdtgroup_monitor_in_progress(rdtgrp)) {
+               rdt_last_cmd_puts("monitoring in progress\n");
+               return -EINVAL;
+       }
+
+       if (rdtgroup_tasks_assigned(rdtgrp)) {
+               rdt_last_cmd_puts("tasks assigned to resource group\n");
+               return -EINVAL;
+       }
+
+       if (!cpumask_empty(&rdtgrp->cpu_mask)) {
+               rdt_last_cmd_puts("CPUs assigned to resource group\n");
+               return -EINVAL;
+       }
+
+       if (rdtgroup_locksetup_user_restrict(rdtgrp)) {
+               rdt_last_cmd_puts("unable to modify resctrl permissions\n");
+               return -EIO;
+       }
+
+       ret = pseudo_lock_init(rdtgrp);
+       if (ret) {
+               rdt_last_cmd_puts("unable to init pseudo-lock region\n");
+               goto out_release;
+       }
+
+       /*
+        * If this system is capable of monitoring a rmid would have been
+        * allocated when the control group was created. This is not needed
+        * anymore when this group would be used for pseudo-locking. This
+        * is safe to call on platforms not capable of monitoring.
+        */
+       free_rmid(rdtgrp->mon.rmid);
+
+       ret = 0;
+       goto out;
+
+out_release:
+       rdtgroup_locksetup_user_restore(rdtgrp);
+out:
+       return ret;
+}
+
+/**
+ * rdtgroup_locksetup_exit - resource group exist locksetup mode
+ * @rdtgrp: resource group
+ *
+ * When a resource group exits locksetup mode the earlier restrictions are
+ * lifted.
+ *
+ * Return: 0 on success, <0 on failure
+ */
+int rdtgroup_locksetup_exit(struct rdtgroup *rdtgrp)
+{
+       int ret;
+
+       if (rdt_mon_capable) {
+               ret = alloc_rmid();
+               if (ret < 0) {
+                       rdt_last_cmd_puts("out of RMIDs\n");
+                       return ret;
+               }
+               rdtgrp->mon.rmid = ret;
+       }
+
+       ret = rdtgroup_locksetup_user_restore(rdtgrp);
+       if (ret) {
+               free_rmid(rdtgrp->mon.rmid);
+               return ret;
+       }
+
+       pseudo_lock_free(rdtgrp);
+       return 0;
+}
+
+/**
+ * rdtgroup_cbm_overlaps_pseudo_locked - Test if CBM or portion is pseudo-locked
+ * @d: RDT domain
+ * @_cbm: CBM to test
+ *
+ * @d represents a cache instance and @_cbm a capacity bitmask that is
+ * considered for it. Determine if @_cbm overlaps with any existing
+ * pseudo-locked region on @d.
+ *
+ * Return: true if @_cbm overlaps with pseudo-locked region on @d, false
+ * otherwise.
+ */
+bool rdtgroup_cbm_overlaps_pseudo_locked(struct rdt_domain *d, u32 _cbm)
+{
+       unsigned long *cbm = (unsigned long *)&_cbm;
+       unsigned long *cbm_b;
+       unsigned int cbm_len;
+
+       if (d->plr) {
+               cbm_len = d->plr->r->cache.cbm_len;
+               cbm_b = (unsigned long *)&d->plr->cbm;
+               if (bitmap_intersects(cbm, cbm_b, cbm_len))
+                       return true;
+       }
+       return false;
+}
+
+/**
+ * rdtgroup_pseudo_locked_in_hierarchy - Pseudo-locked region in cache hierarchy
+ * @d: RDT domain under test
+ *
+ * The setup of a pseudo-locked region affects all cache instances within
+ * the hierarchy of the region. It is thus essential to know if any
+ * pseudo-locked regions exist within a cache hierarchy to prevent any
+ * attempts to create new pseudo-locked regions in the same hierarchy.
+ *
+ * Return: true if a pseudo-locked region exists in the hierarchy of @d or
+ *         if it is not possible to test due to memory allocation issue,
+ *         false otherwise.
+ */
+bool rdtgroup_pseudo_locked_in_hierarchy(struct rdt_domain *d)
+{
+       cpumask_var_t cpu_with_psl;
+       struct rdt_resource *r;
+       struct rdt_domain *d_i;
+       bool ret = false;
+
+       if (!zalloc_cpumask_var(&cpu_with_psl, GFP_KERNEL))
+               return true;
+
+       /*
+        * First determine which cpus have pseudo-locked regions
+        * associated with them.
+        */
+       for_each_alloc_enabled_rdt_resource(r) {
+               list_for_each_entry(d_i, &r->domains, list) {
+                       if (d_i->plr)
+                               cpumask_or(cpu_with_psl, cpu_with_psl,
+                                          &d_i->cpu_mask);
+               }
+       }
+
+       /*
+        * Next test if new pseudo-locked region would intersect with
+        * existing region.
+        */
+       if (cpumask_intersects(&d->cpu_mask, cpu_with_psl))
+               ret = true;
+
+       free_cpumask_var(cpu_with_psl);
+       return ret;
+}
+
+/**
+ * measure_cycles_lat_fn - Measure cycle latency to read pseudo-locked memory
+ * @_plr: pseudo-lock region to measure
+ *
+ * There is no deterministic way to test if a memory region is cached. One
+ * way is to measure how long it takes to read the memory, the speed of
+ * access is a good way to learn how close to the cpu the data was. Even
+ * more, if the prefetcher is disabled and the memory is read at a stride
+ * of half the cache line, then a cache miss will be easy to spot since the
+ * read of the first half would be significantly slower than the read of
+ * the second half.
+ *
+ * Return: 0. Waiter on waitqueue will be woken on completion.
+ */
+static int measure_cycles_lat_fn(void *_plr)
+{
+       struct pseudo_lock_region *plr = _plr;
+       unsigned long i;
+       u64 start, end;
+#ifdef CONFIG_KASAN
+       /*
+        * The registers used for local register variables are also used
+        * when KASAN is active. When KASAN is active we use a regular
+        * variable to ensure we always use a valid pointer to access memory.
+        * The cost is that accessing this pointer, which could be in
+        * cache, will be included in the measurement of memory read latency.
+        */
+       void *mem_r;
+#else
+#ifdef CONFIG_X86_64
+       register void *mem_r asm("rbx");
+#else
+       register void *mem_r asm("ebx");
+#endif /* CONFIG_X86_64 */
+#endif /* CONFIG_KASAN */
+
+       local_irq_disable();
+       /*
+        * The wrmsr call may be reordered with the assignment below it.
+        * Call wrmsr as directly as possible to avoid tracing clobbering
+        * local register variable used for memory pointer.
+        */
+       __wrmsr(MSR_MISC_FEATURE_CONTROL, prefetch_disable_bits, 0x0);
+       mem_r = plr->kmem;
+       /*
+        * Dummy execute of the time measurement to load the needed
+        * instructions into the L1 instruction cache.
+        */
+       start = rdtsc_ordered();
+       for (i = 0; i < plr->size; i += 32) {
+               start = rdtsc_ordered();
+               asm volatile("mov (%0,%1,1), %%eax\n\t"
+                            :
+                            : "r" (mem_r), "r" (i)
+                            : "%eax", "memory");
+               end = rdtsc_ordered();
+               trace_pseudo_lock_mem_latency((u32)(end - start));
+       }
+       wrmsr(MSR_MISC_FEATURE_CONTROL, 0x0, 0x0);
+       local_irq_enable();
+       plr->thread_done = 1;
+       wake_up_interruptible(&plr->lock_thread_wq);
+       return 0;
+}
+
+static int measure_cycles_perf_fn(void *_plr)
+{
+       unsigned long long l3_hits = 0, l3_miss = 0;
+       u64 l3_hit_bits = 0, l3_miss_bits = 0;
+       struct pseudo_lock_region *plr = _plr;
+       unsigned long long l2_hits, l2_miss;
+       u64 l2_hit_bits, l2_miss_bits;
+       unsigned long i;
+#ifdef CONFIG_KASAN
+       /*
+        * The registers used for local register variables are also used
+        * when KASAN is active. When KASAN is active we use regular variables
+        * at the cost of including cache access latency to these variables
+        * in the measurements.
+        */
+       unsigned int line_size;
+       unsigned int size;
+       void *mem_r;
+#else
+       register unsigned int line_size asm("esi");
+       register unsigned int size asm("edi");
+#ifdef CONFIG_X86_64
+       register void *mem_r asm("rbx");
+#else
+       register void *mem_r asm("ebx");
+#endif /* CONFIG_X86_64 */
+#endif /* CONFIG_KASAN */
+
+       /*
+        * Non-architectural event for the Goldmont Microarchitecture
+        * from Intel x86 Architecture Software Developer Manual (SDM):
+        * MEM_LOAD_UOPS_RETIRED D1H (event number)
+        * Umask values:
+        *     L1_HIT   01H
+        *     L2_HIT   02H
+        *     L1_MISS  08H
+        *     L2_MISS  10H
+        *
+        * On Broadwell Microarchitecture the MEM_LOAD_UOPS_RETIRED event
+        * has two "no fix" errata associated with it: BDM35 and BDM100. On
+        * this platform we use the following events instead:
+        *  L2_RQSTS 24H (Documented in https://download.01.org/perfmon/BDW/)
+        *       REFERENCES FFH
+        *       MISS       3FH
+        *  LONGEST_LAT_CACHE 2EH (Documented in SDM)
+        *       REFERENCE 4FH
+        *       MISS      41H
+        */
+
+       /*
+        * Start by setting flags for IA32_PERFEVTSELx:
+        *     OS  (Operating system mode)  0x2
+        *     INT (APIC interrupt enable)  0x10
+        *     EN  (Enable counter)         0x40
+        *
+        * Then add the Umask value and event number to select performance
+        * event.
+        */
+
+       switch (boot_cpu_data.x86_model) {
+       case INTEL_FAM6_ATOM_GOLDMONT:
+       case INTEL_FAM6_ATOM_GEMINI_LAKE:
+               l2_hit_bits = (0x52ULL << 16) | (0x2 << 8) | 0xd1;
+               l2_miss_bits = (0x52ULL << 16) | (0x10 << 8) | 0xd1;
+               break;
+       case INTEL_FAM6_BROADWELL_X:
+               /* On BDW the l2_hit_bits count references, not hits */
+               l2_hit_bits = (0x52ULL << 16) | (0xff << 8) | 0x24;
+               l2_miss_bits = (0x52ULL << 16) | (0x3f << 8) | 0x24;
+               /* On BDW the l3_hit_bits count references, not hits */
+               l3_hit_bits = (0x52ULL << 16) | (0x4f << 8) | 0x2e;
+               l3_miss_bits = (0x52ULL << 16) | (0x41 << 8) | 0x2e;
+               break;
+       default:
+               goto out;
+       }
+
+       local_irq_disable();
+       /*
+        * Call wrmsr direcly to avoid the local register variables from
+        * being overwritten due to reordering of their assignment with
+        * the wrmsr calls.
+        */
+       __wrmsr(MSR_MISC_FEATURE_CONTROL, prefetch_disable_bits, 0x0);
+       /* Disable events and reset counters */
+       pseudo_wrmsrl_notrace(MSR_ARCH_PERFMON_EVENTSEL0, 0x0);
+       pseudo_wrmsrl_notrace(MSR_ARCH_PERFMON_EVENTSEL0 + 1, 0x0);
+       pseudo_wrmsrl_notrace(MSR_ARCH_PERFMON_PERFCTR0, 0x0);
+       pseudo_wrmsrl_notrace(MSR_ARCH_PERFMON_PERFCTR0 + 1, 0x0);
+       if (l3_hit_bits > 0) {
+               pseudo_wrmsrl_notrace(MSR_ARCH_PERFMON_EVENTSEL0 + 2, 0x0);
+               pseudo_wrmsrl_notrace(MSR_ARCH_PERFMON_EVENTSEL0 + 3, 0x0);
+               pseudo_wrmsrl_notrace(MSR_ARCH_PERFMON_PERFCTR0 + 2, 0x0);
+               pseudo_wrmsrl_notrace(MSR_ARCH_PERFMON_PERFCTR0 + 3, 0x0);
+       }
+       /* Set and enable the L2 counters */
+       pseudo_wrmsrl_notrace(MSR_ARCH_PERFMON_EVENTSEL0, l2_hit_bits);
+       pseudo_wrmsrl_notrace(MSR_ARCH_PERFMON_EVENTSEL0 + 1, l2_miss_bits);
+       if (l3_hit_bits > 0) {
+               pseudo_wrmsrl_notrace(MSR_ARCH_PERFMON_EVENTSEL0 + 2,
+                                     l3_hit_bits);
+               pseudo_wrmsrl_notrace(MSR_ARCH_PERFMON_EVENTSEL0 + 3,
+                                     l3_miss_bits);
+       }
+       mem_r = plr->kmem;
+       size = plr->size;
+       line_size = plr->line_size;
+       for (i = 0; i < size; i += line_size) {
+               asm volatile("mov (%0,%1,1), %%eax\n\t"
+                            :
+                            : "r" (mem_r), "r" (i)
+                            : "%eax", "memory");
+       }
+       /*
+        * Call wrmsr directly (no tracing) to not influence
+        * the cache access counters as they are disabled.
+        */
+       pseudo_wrmsrl_notrace(MSR_ARCH_PERFMON_EVENTSEL0,
+                             l2_hit_bits & ~(0x40ULL << 16));
+       pseudo_wrmsrl_notrace(MSR_ARCH_PERFMON_EVENTSEL0 + 1,
+                             l2_miss_bits & ~(0x40ULL << 16));
+       if (l3_hit_bits > 0) {
+               pseudo_wrmsrl_notrace(MSR_ARCH_PERFMON_EVENTSEL0 + 2,
+                                     l3_hit_bits & ~(0x40ULL << 16));
+               pseudo_wrmsrl_notrace(MSR_ARCH_PERFMON_EVENTSEL0 + 3,
+                                     l3_miss_bits & ~(0x40ULL << 16));
+       }
+       l2_hits = native_read_pmc(0);
+       l2_miss = native_read_pmc(1);
+       if (l3_hit_bits > 0) {
+               l3_hits = native_read_pmc(2);
+               l3_miss = native_read_pmc(3);
+       }
+       wrmsr(MSR_MISC_FEATURE_CONTROL, 0x0, 0x0);
+       local_irq_enable();
+       /*
+        * On BDW we count references and misses, need to adjust. Sometimes
+        * the "hits" counter is a bit more than the references, for
+        * example, x references but x + 1 hits. To not report invalid
+        * hit values in this case we treat that as misses eaqual to
+        * references.
+        */
+       if (boot_cpu_data.x86_model == INTEL_FAM6_BROADWELL_X)
+               l2_hits -= (l2_miss > l2_hits ? l2_hits : l2_miss);
+       trace_pseudo_lock_l2(l2_hits, l2_miss);
+       if (l3_hit_bits > 0) {
+               if (boot_cpu_data.x86_model == INTEL_FAM6_BROADWELL_X)
+                       l3_hits -= (l3_miss > l3_hits ? l3_hits : l3_miss);
+               trace_pseudo_lock_l3(l3_hits, l3_miss);
+       }
+
+out:
+       plr->thread_done = 1;
+       wake_up_interruptible(&plr->lock_thread_wq);
+       return 0;
+}
+
+/**
+ * pseudo_lock_measure_cycles - Trigger latency measure to pseudo-locked region
+ *
+ * The measurement of latency to access a pseudo-locked region should be
+ * done from a cpu that is associated with that pseudo-locked region.
+ * Determine which cpu is associated with this region and start a thread on
+ * that cpu to perform the measurement, wait for that thread to complete.
+ *
+ * Return: 0 on success, <0 on failure
+ */
+static int pseudo_lock_measure_cycles(struct rdtgroup *rdtgrp, int sel)
+{
+       struct pseudo_lock_region *plr = rdtgrp->plr;
+       struct task_struct *thread;
+       unsigned int cpu;
+       int ret = -1;
+
+       cpus_read_lock();
+       mutex_lock(&rdtgroup_mutex);
+
+       if (rdtgrp->flags & RDT_DELETED) {
+               ret = -ENODEV;
+               goto out;
+       }
+
+       plr->thread_done = 0;
+       cpu = cpumask_first(&plr->d->cpu_mask);
+       if (!cpu_online(cpu)) {
+               ret = -ENODEV;
+               goto out;
+       }
+
+       if (sel == 1)
+               thread = kthread_create_on_node(measure_cycles_lat_fn, plr,
+                                               cpu_to_node(cpu),
+                                               "pseudo_lock_measure/%u",
+                                               cpu);
+       else if (sel == 2)
+               thread = kthread_create_on_node(measure_cycles_perf_fn, plr,
+                                               cpu_to_node(cpu),
+                                               "pseudo_lock_measure/%u",
+                                               cpu);
+       else
+               goto out;
+
+       if (IS_ERR(thread)) {
+               ret = PTR_ERR(thread);
+               goto out;
+       }
+       kthread_bind(thread, cpu);
+       wake_up_process(thread);
+
+       ret = wait_event_interruptible(plr->lock_thread_wq,
+                                      plr->thread_done == 1);
+       if (ret < 0)
+               goto out;
+
+       ret = 0;
+
+out:
+       mutex_unlock(&rdtgroup_mutex);
+       cpus_read_unlock();
+       return ret;
+}
+
+static ssize_t pseudo_lock_measure_trigger(struct file *file,
+                                          const char __user *user_buf,
+                                          size_t count, loff_t *ppos)
+{
+       struct rdtgroup *rdtgrp = file->private_data;
+       size_t buf_size;
+       char buf[32];
+       int ret;
+       int sel;
+
+       buf_size = min(count, (sizeof(buf) - 1));
+       if (copy_from_user(buf, user_buf, buf_size))
+               return -EFAULT;
+
+       buf[buf_size] = '\0';
+       ret = kstrtoint(buf, 10, &sel);
+       if (ret == 0) {
+               if (sel != 1)
+                       return -EINVAL;
+               ret = debugfs_file_get(file->f_path.dentry);
+               if (ret)
+                       return ret;
+               ret = pseudo_lock_measure_cycles(rdtgrp, sel);
+               if (ret == 0)
+                       ret = count;
+               debugfs_file_put(file->f_path.dentry);
+       }
+
+       return ret;
+}
+
+static const struct file_operations pseudo_measure_fops = {
+       .write = pseudo_lock_measure_trigger,
+       .open = simple_open,
+       .llseek = default_llseek,
+};
+
+/**
+ * rdtgroup_pseudo_lock_create - Create a pseudo-locked region
+ * @rdtgrp: resource group to which pseudo-lock region belongs
+ *
+ * Called when a resource group in the pseudo-locksetup mode receives a
+ * valid schemata that should be pseudo-locked. Since the resource group is
+ * in pseudo-locksetup mode the &struct pseudo_lock_region has already been
+ * allocated and initialized with the essential information. If a failure
+ * occurs the resource group remains in the pseudo-locksetup mode with the
+ * &struct pseudo_lock_region associated with it, but cleared from all
+ * information and ready for the user to re-attempt pseudo-locking by
+ * writing the schemata again.
+ *
+ * Return: 0 if the pseudo-locked region was successfully pseudo-locked, <0
+ * on failure. Descriptive error will be written to last_cmd_status buffer.
+ */
+int rdtgroup_pseudo_lock_create(struct rdtgroup *rdtgrp)
+{
+       struct pseudo_lock_region *plr = rdtgrp->plr;
+       struct task_struct *thread;
+       unsigned int new_minor;
+       struct device *dev;
+       int ret;
+
+       ret = pseudo_lock_region_alloc(plr);
+       if (ret < 0)
+               return ret;
+
+       ret = pseudo_lock_cstates_constrain(plr);
+       if (ret < 0) {
+               ret = -EINVAL;
+               goto out_region;
+       }
+
+       plr->thread_done = 0;
+
+       thread = kthread_create_on_node(pseudo_lock_fn, rdtgrp,
+                                       cpu_to_node(plr->cpu),
+                                       "pseudo_lock/%u", plr->cpu);
+       if (IS_ERR(thread)) {
+               ret = PTR_ERR(thread);
+               rdt_last_cmd_printf("locking thread returned error %d\n", ret);
+               goto out_cstates;
+       }
+
+       kthread_bind(thread, plr->cpu);
+       wake_up_process(thread);
+
+       ret = wait_event_interruptible(plr->lock_thread_wq,
+                                      plr->thread_done == 1);
+       if (ret < 0) {
+               /*
+                * If the thread does not get on the CPU for whatever
+                * reason and the process which sets up the region is
+                * interrupted then this will leave the thread in runnable
+                * state and once it gets on the CPU it will derefence
+                * the cleared, but not freed, plr struct resulting in an
+                * empty pseudo-locking loop.
+                */
+               rdt_last_cmd_puts("locking thread interrupted\n");
+               goto out_cstates;
+       }
+
+       ret = pseudo_lock_minor_get(&new_minor);
+       if (ret < 0) {
+               rdt_last_cmd_puts("unable to obtain a new minor number\n");
+               goto out_cstates;
+       }
+
+       /*
+        * Unlock access but do not release the reference. The
+        * pseudo-locked region will still be here on return.
+        *
+        * The mutex has to be released temporarily to avoid a potential
+        * deadlock with the mm->mmap_sem semaphore which is obtained in
+        * the device_create() and debugfs_create_dir() callpath below
+        * as well as before the mmap() callback is called.
+        */
+       mutex_unlock(&rdtgroup_mutex);
+
+       if (!IS_ERR_OR_NULL(debugfs_resctrl)) {
+               plr->debugfs_dir = debugfs_create_dir(rdtgrp->kn->name,
+                                                     debugfs_resctrl);
+               if (!IS_ERR_OR_NULL(plr->debugfs_dir))
+                       debugfs_create_file("pseudo_lock_measure", 0200,
+                                           plr->debugfs_dir, rdtgrp,
+                                           &pseudo_measure_fops);
+       }
+
+       dev = device_create(pseudo_lock_class, NULL,
+                           MKDEV(pseudo_lock_major, new_minor),
+                           rdtgrp, "%s", rdtgrp->kn->name);
+
+       mutex_lock(&rdtgroup_mutex);
+
+       if (IS_ERR(dev)) {
+               ret = PTR_ERR(dev);
+               rdt_last_cmd_printf("failed to create character device: %d\n",
+                                   ret);
+               goto out_debugfs;
+       }
+
+       /* We released the mutex - check if group was removed while we did so */
+       if (rdtgrp->flags & RDT_DELETED) {
+               ret = -ENODEV;
+               goto out_device;
+       }
+
+       plr->minor = new_minor;
+
+       rdtgrp->mode = RDT_MODE_PSEUDO_LOCKED;
+       closid_free(rdtgrp->closid);
+       rdtgroup_kn_mode_restore(rdtgrp, "cpus", 0444);
+       rdtgroup_kn_mode_restore(rdtgrp, "cpus_list", 0444);
+
+       ret = 0;
+       goto out;
+
+out_device:
+       device_destroy(pseudo_lock_class, MKDEV(pseudo_lock_major, new_minor));
+out_debugfs:
+       debugfs_remove_recursive(plr->debugfs_dir);
+       pseudo_lock_minor_release(new_minor);
+out_cstates:
+       pseudo_lock_cstates_relax(plr);
+out_region:
+       pseudo_lock_region_clear(plr);
+out:
+       return ret;
+}
+
+/**
+ * rdtgroup_pseudo_lock_remove - Remove a pseudo-locked region
+ * @rdtgrp: resource group to which the pseudo-locked region belongs
+ *
+ * The removal of a pseudo-locked region can be initiated when the resource
+ * group is removed from user space via a "rmdir" from userspace or the
+ * unmount of the resctrl filesystem. On removal the resource group does
+ * not go back to pseudo-locksetup mode before it is removed, instead it is
+ * removed directly. There is thus assymmetry with the creation where the
+ * &struct pseudo_lock_region is removed here while it was not created in
+ * rdtgroup_pseudo_lock_create().
+ *
+ * Return: void
+ */
+void rdtgroup_pseudo_lock_remove(struct rdtgroup *rdtgrp)
+{
+       struct pseudo_lock_region *plr = rdtgrp->plr;
+
+       if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
+               /*
+                * Default group cannot be a pseudo-locked region so we can
+                * free closid here.
+                */
+               closid_free(rdtgrp->closid);
+               goto free;
+       }
+
+       pseudo_lock_cstates_relax(plr);
+       debugfs_remove_recursive(rdtgrp->plr->debugfs_dir);
+       device_destroy(pseudo_lock_class, MKDEV(pseudo_lock_major, plr->minor));
+       pseudo_lock_minor_release(plr->minor);
+
+free:
+       pseudo_lock_free(rdtgrp);
+}
+
+static int pseudo_lock_dev_open(struct inode *inode, struct file *filp)
+{
+       struct rdtgroup *rdtgrp;
+
+       mutex_lock(&rdtgroup_mutex);
+
+       rdtgrp = region_find_by_minor(iminor(inode));
+       if (!rdtgrp) {
+               mutex_unlock(&rdtgroup_mutex);
+               return -ENODEV;
+       }
+
+       filp->private_data = rdtgrp;
+       atomic_inc(&rdtgrp->waitcount);
+       /* Perform a non-seekable open - llseek is not supported */
+       filp->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE);
+
+       mutex_unlock(&rdtgroup_mutex);
+
+       return 0;
+}
+
+static int pseudo_lock_dev_release(struct inode *inode, struct file *filp)
+{
+       struct rdtgroup *rdtgrp;
+
+       mutex_lock(&rdtgroup_mutex);
+       rdtgrp = filp->private_data;
+       WARN_ON(!rdtgrp);
+       if (!rdtgrp) {
+               mutex_unlock(&rdtgroup_mutex);
+               return -ENODEV;
+       }
+       filp->private_data = NULL;
+       atomic_dec(&rdtgrp->waitcount);
+       mutex_unlock(&rdtgroup_mutex);
+       return 0;
+}
+
+static int pseudo_lock_dev_mremap(struct vm_area_struct *area)
+{
+       /* Not supported */
+       return -EINVAL;
+}
+
+static const struct vm_operations_struct pseudo_mmap_ops = {
+       .mremap = pseudo_lock_dev_mremap,
+};
+
+static int pseudo_lock_dev_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+       unsigned long vsize = vma->vm_end - vma->vm_start;
+       unsigned long off = vma->vm_pgoff << PAGE_SHIFT;
+       struct pseudo_lock_region *plr;
+       struct rdtgroup *rdtgrp;
+       unsigned long physical;
+       unsigned long psize;
+
+       mutex_lock(&rdtgroup_mutex);
+
+       rdtgrp = filp->private_data;
+       WARN_ON(!rdtgrp);
+       if (!rdtgrp) {
+               mutex_unlock(&rdtgroup_mutex);
+               return -ENODEV;
+       }
+
+       plr = rdtgrp->plr;
+
+       /*
+        * Task is required to run with affinity to the cpus associated
+        * with the pseudo-locked region. If this is not the case the task
+        * may be scheduled elsewhere and invalidate entries in the
+        * pseudo-locked region.
+        */
+       if (!cpumask_subset(&current->cpus_allowed, &plr->d->cpu_mask)) {
+               mutex_unlock(&rdtgroup_mutex);
+               return -EINVAL;
+       }
+
+       physical = __pa(plr->kmem) >> PAGE_SHIFT;
+       psize = plr->size - off;
+
+       if (off > plr->size) {
+               mutex_unlock(&rdtgroup_mutex);
+               return -ENOSPC;
+       }
+
+       /*
+        * Ensure changes are carried directly to the memory being mapped,
+        * do not allow copy-on-write mapping.
+        */
+       if (!(vma->vm_flags & VM_SHARED)) {
+               mutex_unlock(&rdtgroup_mutex);
+               return -EINVAL;
+       }
+
+       if (vsize > psize) {
+               mutex_unlock(&rdtgroup_mutex);
+               return -ENOSPC;
+       }
+
+       memset(plr->kmem + off, 0, vsize);
+
+       if (remap_pfn_range(vma, vma->vm_start, physical + vma->vm_pgoff,
+                           vsize, vma->vm_page_prot)) {
+               mutex_unlock(&rdtgroup_mutex);
+               return -EAGAIN;
+       }
+       vma->vm_ops = &pseudo_mmap_ops;
+       mutex_unlock(&rdtgroup_mutex);
+       return 0;
+}
+
+static const struct file_operations pseudo_lock_dev_fops = {
+       .owner =        THIS_MODULE,
+       .llseek =       no_llseek,
+       .read =         NULL,
+       .write =        NULL,
+       .open =         pseudo_lock_dev_open,
+       .release =      pseudo_lock_dev_release,
+       .mmap =         pseudo_lock_dev_mmap,
+};
+
+static char *pseudo_lock_devnode(struct device *dev, umode_t *mode)
+{
+       struct rdtgroup *rdtgrp;
+
+       rdtgrp = dev_get_drvdata(dev);
+       if (mode)
+               *mode = 0600;
+       return kasprintf(GFP_KERNEL, "pseudo_lock/%s", rdtgrp->kn->name);
+}
+
+int rdt_pseudo_lock_init(void)
+{
+       int ret;
+
+       ret = register_chrdev(0, "pseudo_lock", &pseudo_lock_dev_fops);
+       if (ret < 0)
+               return ret;
+
+       pseudo_lock_major = ret;
+
+       pseudo_lock_class = class_create(THIS_MODULE, "pseudo_lock");
+       if (IS_ERR(pseudo_lock_class)) {
+               ret = PTR_ERR(pseudo_lock_class);
+               unregister_chrdev(pseudo_lock_major, "pseudo_lock");
+               return ret;
+       }
+
+       pseudo_lock_class->devnode = pseudo_lock_devnode;
+       return 0;
+}
+
+void rdt_pseudo_lock_release(void)
+{
+       class_destroy(pseudo_lock_class);
+       pseudo_lock_class = NULL;
+       unregister_chrdev(pseudo_lock_major, "pseudo_lock");
+       pseudo_lock_major = 0;
+}
diff --git a/arch/x86/kernel/cpu/intel_rdt_pseudo_lock_event.h b/arch/x86/kernel/cpu/intel_rdt_pseudo_lock_event.h
new file mode 100644 (file)
index 0000000..2c041e6
--- /dev/null
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM resctrl
+
+#if !defined(_TRACE_PSEUDO_LOCK_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_PSEUDO_LOCK_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(pseudo_lock_mem_latency,
+           TP_PROTO(u32 latency),
+           TP_ARGS(latency),
+           TP_STRUCT__entry(__field(u32, latency)),
+           TP_fast_assign(__entry->latency = latency),
+           TP_printk("latency=%u", __entry->latency)
+          );
+
+TRACE_EVENT(pseudo_lock_l2,
+           TP_PROTO(u64 l2_hits, u64 l2_miss),
+           TP_ARGS(l2_hits, l2_miss),
+           TP_STRUCT__entry(__field(u64, l2_hits)
+                            __field(u64, l2_miss)),
+           TP_fast_assign(__entry->l2_hits = l2_hits;
+                          __entry->l2_miss = l2_miss;),
+           TP_printk("hits=%llu miss=%llu",
+                     __entry->l2_hits, __entry->l2_miss));
+
+TRACE_EVENT(pseudo_lock_l3,
+           TP_PROTO(u64 l3_hits, u64 l3_miss),
+           TP_ARGS(l3_hits, l3_miss),
+           TP_STRUCT__entry(__field(u64, l3_hits)
+                            __field(u64, l3_miss)),
+           TP_fast_assign(__entry->l3_hits = l3_hits;
+                          __entry->l3_miss = l3_miss;),
+           TP_printk("hits=%llu miss=%llu",
+                     __entry->l3_hits, __entry->l3_miss));
+
+#endif /* _TRACE_PSEUDO_LOCK_H */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE intel_rdt_pseudo_lock_event
+#include <trace/define_trace.h>
index 749856a2e736738feab416e3980054e5b5a699b3..d6d7ea7349d016bf479ca9024889c448fadbacf7 100644 (file)
@@ -20,7 +20,9 @@
 
 #define pr_fmt(fmt)    KBUILD_MODNAME ": " fmt
 
+#include <linux/cacheinfo.h>
 #include <linux/cpu.h>
+#include <linux/debugfs.h>
 #include <linux/fs.h>
 #include <linux/sysfs.h>
 #include <linux/kernfs.h>
@@ -55,6 +57,8 @@ static struct kernfs_node *kn_mondata;
 static struct seq_buf last_cmd_status;
 static char last_cmd_status_buf[512];
 
+struct dentry *debugfs_resctrl;
+
 void rdt_last_cmd_clear(void)
 {
        lockdep_assert_held(&rdtgroup_mutex);
@@ -121,11 +125,65 @@ static int closid_alloc(void)
        return closid;
 }
 
-static void closid_free(int closid)
+void closid_free(int closid)
 {
        closid_free_map |= 1 << closid;
 }
 
+/**
+ * closid_allocated - test if provided closid is in use
+ * @closid: closid to be tested
+ *
+ * Return: true if @closid is currently associated with a resource group,
+ * false if @closid is free
+ */
+static bool closid_allocated(unsigned int closid)
+{
+       return (closid_free_map & (1 << closid)) == 0;
+}
+
+/**
+ * rdtgroup_mode_by_closid - Return mode of resource group with closid
+ * @closid: closid if the resource group
+ *
+ * Each resource group is associated with a @closid. Here the mode
+ * of a resource group can be queried by searching for it using its closid.
+ *
+ * Return: mode as &enum rdtgrp_mode of resource group with closid @closid
+ */
+enum rdtgrp_mode rdtgroup_mode_by_closid(int closid)
+{
+       struct rdtgroup *rdtgrp;
+
+       list_for_each_entry(rdtgrp, &rdt_all_groups, rdtgroup_list) {
+               if (rdtgrp->closid == closid)
+                       return rdtgrp->mode;
+       }
+
+       return RDT_NUM_MODES;
+}
+
+static const char * const rdt_mode_str[] = {
+       [RDT_MODE_SHAREABLE]            = "shareable",
+       [RDT_MODE_EXCLUSIVE]            = "exclusive",
+       [RDT_MODE_PSEUDO_LOCKSETUP]     = "pseudo-locksetup",
+       [RDT_MODE_PSEUDO_LOCKED]        = "pseudo-locked",
+};
+
+/**
+ * rdtgroup_mode_str - Return the string representation of mode
+ * @mode: the resource group mode as &enum rdtgroup_mode
+ *
+ * Return: string representation of valid mode, "unknown" otherwise
+ */
+static const char *rdtgroup_mode_str(enum rdtgrp_mode mode)
+{
+       if (mode < RDT_MODE_SHAREABLE || mode >= RDT_NUM_MODES)
+               return "unknown";
+
+       return rdt_mode_str[mode];
+}
+
 /* set uid and gid of rdtgroup dirs and files to that of the creator */
 static int rdtgroup_kn_set_ugid(struct kernfs_node *kn)
 {
@@ -207,8 +265,12 @@ static int rdtgroup_cpus_show(struct kernfs_open_file *of,
        rdtgrp = rdtgroup_kn_lock_live(of->kn);
 
        if (rdtgrp) {
-               seq_printf(s, is_cpu_list(of) ? "%*pbl\n" : "%*pb\n",
-                          cpumask_pr_args(&rdtgrp->cpu_mask));
+               if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED)
+                       seq_printf(s, is_cpu_list(of) ? "%*pbl\n" : "%*pb\n",
+                                  cpumask_pr_args(&rdtgrp->plr->d->cpu_mask));
+               else
+                       seq_printf(s, is_cpu_list(of) ? "%*pbl\n" : "%*pb\n",
+                                  cpumask_pr_args(&rdtgrp->cpu_mask));
        } else {
                ret = -ENOENT;
        }
@@ -394,6 +456,13 @@ static ssize_t rdtgroup_cpus_write(struct kernfs_open_file *of,
                goto unlock;
        }
 
+       if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED ||
+           rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
+               ret = -EINVAL;
+               rdt_last_cmd_puts("pseudo-locking in progress\n");
+               goto unlock;
+       }
+
        if (is_cpu_list(of))
                ret = cpulist_parse(buf, newmask);
        else
@@ -509,6 +578,32 @@ static int __rdtgroup_move_task(struct task_struct *tsk,
        return ret;
 }
 
+/**
+ * rdtgroup_tasks_assigned - Test if tasks have been assigned to resource group
+ * @r: Resource group
+ *
+ * Return: 1 if tasks have been assigned to @r, 0 otherwise
+ */
+int rdtgroup_tasks_assigned(struct rdtgroup *r)
+{
+       struct task_struct *p, *t;
+       int ret = 0;
+
+       lockdep_assert_held(&rdtgroup_mutex);
+
+       rcu_read_lock();
+       for_each_process_thread(p, t) {
+               if ((r->type == RDTCTRL_GROUP && t->closid == r->closid) ||
+                   (r->type == RDTMON_GROUP && t->rmid == r->mon.rmid)) {
+                       ret = 1;
+                       break;
+               }
+       }
+       rcu_read_unlock();
+
+       return ret;
+}
+
 static int rdtgroup_task_write_permission(struct task_struct *task,
                                          struct kernfs_open_file *of)
 {
@@ -570,13 +665,22 @@ static ssize_t rdtgroup_tasks_write(struct kernfs_open_file *of,
        if (kstrtoint(strstrip(buf), 0, &pid) || pid < 0)
                return -EINVAL;
        rdtgrp = rdtgroup_kn_lock_live(of->kn);
+       if (!rdtgrp) {
+               rdtgroup_kn_unlock(of->kn);
+               return -ENOENT;
+       }
        rdt_last_cmd_clear();
 
-       if (rdtgrp)
-               ret = rdtgroup_move_task(pid, rdtgrp, of);
-       else
-               ret = -ENOENT;
+       if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED ||
+           rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
+               ret = -EINVAL;
+               rdt_last_cmd_puts("pseudo-locking in progress\n");
+               goto unlock;
+       }
 
+       ret = rdtgroup_move_task(pid, rdtgrp, of);
+
+unlock:
        rdtgroup_kn_unlock(of->kn);
 
        return ret ?: nbytes;
@@ -662,6 +766,94 @@ static int rdt_shareable_bits_show(struct kernfs_open_file *of,
        return 0;
 }
 
+/**
+ * rdt_bit_usage_show - Display current usage of resources
+ *
+ * A domain is a shared resource that can now be allocated differently. Here
+ * we display the current regions of the domain as an annotated bitmask.
+ * For each domain of this resource its allocation bitmask
+ * is annotated as below to indicate the current usage of the corresponding bit:
+ *   0 - currently unused
+ *   X - currently available for sharing and used by software and hardware
+ *   H - currently used by hardware only but available for software use
+ *   S - currently used and shareable by software only
+ *   E - currently used exclusively by one resource group
+ *   P - currently pseudo-locked by one resource group
+ */
+static int rdt_bit_usage_show(struct kernfs_open_file *of,
+                             struct seq_file *seq, void *v)
+{
+       struct rdt_resource *r = of->kn->parent->priv;
+       u32 sw_shareable = 0, hw_shareable = 0;
+       u32 exclusive = 0, pseudo_locked = 0;
+       struct rdt_domain *dom;
+       int i, hwb, swb, excl, psl;
+       enum rdtgrp_mode mode;
+       bool sep = false;
+       u32 *ctrl;
+
+       mutex_lock(&rdtgroup_mutex);
+       hw_shareable = r->cache.shareable_bits;
+       list_for_each_entry(dom, &r->domains, list) {
+               if (sep)
+                       seq_putc(seq, ';');
+               ctrl = dom->ctrl_val;
+               sw_shareable = 0;
+               exclusive = 0;
+               seq_printf(seq, "%d=", dom->id);
+               for (i = 0; i < r->num_closid; i++, ctrl++) {
+                       if (!closid_allocated(i))
+                               continue;
+                       mode = rdtgroup_mode_by_closid(i);
+                       switch (mode) {
+                       case RDT_MODE_SHAREABLE:
+                               sw_shareable |= *ctrl;
+                               break;
+                       case RDT_MODE_EXCLUSIVE:
+                               exclusive |= *ctrl;
+                               break;
+                       case RDT_MODE_PSEUDO_LOCKSETUP:
+                       /*
+                        * RDT_MODE_PSEUDO_LOCKSETUP is possible
+                        * here but not included since the CBM
+                        * associated with this CLOSID in this mode
+                        * is not initialized and no task or cpu can be
+                        * assigned this CLOSID.
+                        */
+                               break;
+                       case RDT_MODE_PSEUDO_LOCKED:
+                       case RDT_NUM_MODES:
+                               WARN(1,
+                                    "invalid mode for closid %d\n", i);
+                               break;
+                       }
+               }
+               for (i = r->cache.cbm_len - 1; i >= 0; i--) {
+                       pseudo_locked = dom->plr ? dom->plr->cbm : 0;
+                       hwb = test_bit(i, (unsigned long *)&hw_shareable);
+                       swb = test_bit(i, (unsigned long *)&sw_shareable);
+                       excl = test_bit(i, (unsigned long *)&exclusive);
+                       psl = test_bit(i, (unsigned long *)&pseudo_locked);
+                       if (hwb && swb)
+                               seq_putc(seq, 'X');
+                       else if (hwb && !swb)
+                               seq_putc(seq, 'H');
+                       else if (!hwb && swb)
+                               seq_putc(seq, 'S');
+                       else if (excl)
+                               seq_putc(seq, 'E');
+                       else if (psl)
+                               seq_putc(seq, 'P');
+                       else /* Unused bits remain */
+                               seq_putc(seq, '0');
+               }
+               sep = true;
+       }
+       seq_putc(seq, '\n');
+       mutex_unlock(&rdtgroup_mutex);
+       return 0;
+}
+
 static int rdt_min_bw_show(struct kernfs_open_file *of,
                             struct seq_file *seq, void *v)
 {
@@ -740,6 +932,269 @@ static ssize_t max_threshold_occ_write(struct kernfs_open_file *of,
        return nbytes;
 }
 
+/*
+ * rdtgroup_mode_show - Display mode of this resource group
+ */
+static int rdtgroup_mode_show(struct kernfs_open_file *of,
+                             struct seq_file *s, void *v)
+{
+       struct rdtgroup *rdtgrp;
+
+       rdtgrp = rdtgroup_kn_lock_live(of->kn);
+       if (!rdtgrp) {
+               rdtgroup_kn_unlock(of->kn);
+               return -ENOENT;
+       }
+
+       seq_printf(s, "%s\n", rdtgroup_mode_str(rdtgrp->mode));
+
+       rdtgroup_kn_unlock(of->kn);
+       return 0;
+}
+
+/**
+ * rdtgroup_cbm_overlaps - Does CBM for intended closid overlap with other
+ * @r: Resource to which domain instance @d belongs.
+ * @d: The domain instance for which @closid is being tested.
+ * @cbm: Capacity bitmask being tested.
+ * @closid: Intended closid for @cbm.
+ * @exclusive: Only check if overlaps with exclusive resource groups
+ *
+ * Checks if provided @cbm intended to be used for @closid on domain
+ * @d overlaps with any other closids or other hardware usage associated
+ * with this domain. If @exclusive is true then only overlaps with
+ * resource groups in exclusive mode will be considered. If @exclusive
+ * is false then overlaps with any resource group or hardware entities
+ * will be considered.
+ *
+ * Return: false if CBM does not overlap, true if it does.
+ */
+bool rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d,
+                          u32 _cbm, int closid, bool exclusive)
+{
+       unsigned long *cbm = (unsigned long *)&_cbm;
+       unsigned long *ctrl_b;
+       enum rdtgrp_mode mode;
+       u32 *ctrl;
+       int i;
+
+       /* Check for any overlap with regions used by hardware directly */
+       if (!exclusive) {
+               if (bitmap_intersects(cbm,
+                                     (unsigned long *)&r->cache.shareable_bits,
+                                     r->cache.cbm_len))
+                       return true;
+       }
+
+       /* Check for overlap with other resource groups */
+       ctrl = d->ctrl_val;
+       for (i = 0; i < r->num_closid; i++, ctrl++) {
+               ctrl_b = (unsigned long *)ctrl;
+               mode = rdtgroup_mode_by_closid(i);
+               if (closid_allocated(i) && i != closid &&
+                   mode != RDT_MODE_PSEUDO_LOCKSETUP) {
+                       if (bitmap_intersects(cbm, ctrl_b, r->cache.cbm_len)) {
+                               if (exclusive) {
+                                       if (mode == RDT_MODE_EXCLUSIVE)
+                                               return true;
+                                       continue;
+                               }
+                               return true;
+                       }
+               }
+       }
+
+       return false;
+}
+
+/**
+ * rdtgroup_mode_test_exclusive - Test if this resource group can be exclusive
+ *
+ * An exclusive resource group implies that there should be no sharing of
+ * its allocated resources. At the time this group is considered to be
+ * exclusive this test can determine if its current schemata supports this
+ * setting by testing for overlap with all other resource groups.
+ *
+ * Return: true if resource group can be exclusive, false if there is overlap
+ * with allocations of other resource groups and thus this resource group
+ * cannot be exclusive.
+ */
+static bool rdtgroup_mode_test_exclusive(struct rdtgroup *rdtgrp)
+{
+       int closid = rdtgrp->closid;
+       struct rdt_resource *r;
+       struct rdt_domain *d;
+
+       for_each_alloc_enabled_rdt_resource(r) {
+               list_for_each_entry(d, &r->domains, list) {
+                       if (rdtgroup_cbm_overlaps(r, d, d->ctrl_val[closid],
+                                                 rdtgrp->closid, false))
+                               return false;
+               }
+       }
+
+       return true;
+}
+
+/**
+ * rdtgroup_mode_write - Modify the resource group's mode
+ *
+ */
+static ssize_t rdtgroup_mode_write(struct kernfs_open_file *of,
+                                  char *buf, size_t nbytes, loff_t off)
+{
+       struct rdtgroup *rdtgrp;
+       enum rdtgrp_mode mode;
+       int ret = 0;
+
+       /* Valid input requires a trailing newline */
+       if (nbytes == 0 || buf[nbytes - 1] != '\n')
+               return -EINVAL;
+       buf[nbytes - 1] = '\0';
+
+       rdtgrp = rdtgroup_kn_lock_live(of->kn);
+       if (!rdtgrp) {
+               rdtgroup_kn_unlock(of->kn);
+               return -ENOENT;
+       }
+
+       rdt_last_cmd_clear();
+
+       mode = rdtgrp->mode;
+
+       if ((!strcmp(buf, "shareable") && mode == RDT_MODE_SHAREABLE) ||
+           (!strcmp(buf, "exclusive") && mode == RDT_MODE_EXCLUSIVE) ||
+           (!strcmp(buf, "pseudo-locksetup") &&
+            mode == RDT_MODE_PSEUDO_LOCKSETUP) ||
+           (!strcmp(buf, "pseudo-locked") && mode == RDT_MODE_PSEUDO_LOCKED))
+               goto out;
+
+       if (mode == RDT_MODE_PSEUDO_LOCKED) {
+               rdt_last_cmd_printf("cannot change pseudo-locked group\n");
+               ret = -EINVAL;
+               goto out;
+       }
+
+       if (!strcmp(buf, "shareable")) {
+               if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
+                       ret = rdtgroup_locksetup_exit(rdtgrp);
+                       if (ret)
+                               goto out;
+               }
+               rdtgrp->mode = RDT_MODE_SHAREABLE;
+       } else if (!strcmp(buf, "exclusive")) {
+               if (!rdtgroup_mode_test_exclusive(rdtgrp)) {
+                       rdt_last_cmd_printf("schemata overlaps\n");
+                       ret = -EINVAL;
+                       goto out;
+               }
+               if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
+                       ret = rdtgroup_locksetup_exit(rdtgrp);
+                       if (ret)
+                               goto out;
+               }
+               rdtgrp->mode = RDT_MODE_EXCLUSIVE;
+       } else if (!strcmp(buf, "pseudo-locksetup")) {
+               ret = rdtgroup_locksetup_enter(rdtgrp);
+               if (ret)
+                       goto out;
+               rdtgrp->mode = RDT_MODE_PSEUDO_LOCKSETUP;
+       } else {
+               rdt_last_cmd_printf("unknown/unsupported mode\n");
+               ret = -EINVAL;
+       }
+
+out:
+       rdtgroup_kn_unlock(of->kn);
+       return ret ?: nbytes;
+}
+
+/**
+ * rdtgroup_cbm_to_size - Translate CBM to size in bytes
+ * @r: RDT resource to which @d belongs.
+ * @d: RDT domain instance.
+ * @cbm: bitmask for which the size should be computed.
+ *
+ * The bitmask provided associated with the RDT domain instance @d will be
+ * translated into how many bytes it represents. The size in bytes is
+ * computed by first dividing the total cache size by the CBM length to
+ * determine how many bytes each bit in the bitmask represents. The result
+ * is multiplied with the number of bits set in the bitmask.
+ */
+unsigned int rdtgroup_cbm_to_size(struct rdt_resource *r,
+                                 struct rdt_domain *d, u32 cbm)
+{
+       struct cpu_cacheinfo *ci;
+       unsigned int size = 0;
+       int num_b, i;
+
+       num_b = bitmap_weight((unsigned long *)&cbm, r->cache.cbm_len);
+       ci = get_cpu_cacheinfo(cpumask_any(&d->cpu_mask));
+       for (i = 0; i < ci->num_leaves; i++) {
+               if (ci->info_list[i].level == r->cache_level) {
+                       size = ci->info_list[i].size / r->cache.cbm_len * num_b;
+                       break;
+               }
+       }
+
+       return size;
+}
+
+/**
+ * rdtgroup_size_show - Display size in bytes of allocated regions
+ *
+ * The "size" file mirrors the layout of the "schemata" file, printing the
+ * size in bytes of each region instead of the capacity bitmask.
+ *
+ */
+static int rdtgroup_size_show(struct kernfs_open_file *of,
+                             struct seq_file *s, void *v)
+{
+       struct rdtgroup *rdtgrp;
+       struct rdt_resource *r;
+       struct rdt_domain *d;
+       unsigned int size;
+       bool sep = false;
+       u32 cbm;
+
+       rdtgrp = rdtgroup_kn_lock_live(of->kn);
+       if (!rdtgrp) {
+               rdtgroup_kn_unlock(of->kn);
+               return -ENOENT;
+       }
+
+       if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) {
+               seq_printf(s, "%*s:", max_name_width, rdtgrp->plr->r->name);
+               size = rdtgroup_cbm_to_size(rdtgrp->plr->r,
+                                           rdtgrp->plr->d,
+                                           rdtgrp->plr->cbm);
+               seq_printf(s, "%d=%u\n", rdtgrp->plr->d->id, size);
+               goto out;
+       }
+
+       for_each_alloc_enabled_rdt_resource(r) {
+               seq_printf(s, "%*s:", max_name_width, r->name);
+               list_for_each_entry(d, &r->domains, list) {
+                       if (sep)
+                               seq_putc(s, ';');
+                       if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
+                               size = 0;
+                       } else {
+                               cbm = d->ctrl_val[rdtgrp->closid];
+                               size = rdtgroup_cbm_to_size(r, d, cbm);
+                       }
+                       seq_printf(s, "%d=%u", d->id, size);
+                       sep = true;
+               }
+               seq_putc(s, '\n');
+       }
+
+out:
+       rdtgroup_kn_unlock(of->kn);
+
+       return 0;
+}
+
 /* rdtgroup information files for one cache resource. */
 static struct rftype res_common_files[] = {
        {
@@ -791,6 +1246,13 @@ static struct rftype res_common_files[] = {
                .seq_show       = rdt_shareable_bits_show,
                .fflags         = RF_CTRL_INFO | RFTYPE_RES_CACHE,
        },
+       {
+               .name           = "bit_usage",
+               .mode           = 0444,
+               .kf_ops         = &rdtgroup_kf_single_ops,
+               .seq_show       = rdt_bit_usage_show,
+               .fflags         = RF_CTRL_INFO | RFTYPE_RES_CACHE,
+       },
        {
                .name           = "min_bandwidth",
                .mode           = 0444,
@@ -853,6 +1315,22 @@ static struct rftype res_common_files[] = {
                .seq_show       = rdtgroup_schemata_show,
                .fflags         = RF_CTRL_BASE,
        },
+       {
+               .name           = "mode",
+               .mode           = 0644,
+               .kf_ops         = &rdtgroup_kf_single_ops,
+               .write          = rdtgroup_mode_write,
+               .seq_show       = rdtgroup_mode_show,
+               .fflags         = RF_CTRL_BASE,
+       },
+       {
+               .name           = "size",
+               .mode           = 0444,
+               .kf_ops         = &rdtgroup_kf_single_ops,
+               .seq_show       = rdtgroup_size_show,
+               .fflags         = RF_CTRL_BASE,
+       },
+
 };
 
 static int rdtgroup_add_files(struct kernfs_node *kn, unsigned long fflags)
@@ -883,6 +1361,103 @@ error:
        return ret;
 }
 
+/**
+ * rdtgroup_kn_mode_restrict - Restrict user access to named resctrl file
+ * @r: The resource group with which the file is associated.
+ * @name: Name of the file
+ *
+ * The permissions of named resctrl file, directory, or link are modified
+ * to not allow read, write, or execute by any user.
+ *
+ * WARNING: This function is intended to communicate to the user that the
+ * resctrl file has been locked down - that it is not relevant to the
+ * particular state the system finds itself in. It should not be relied
+ * on to protect from user access because after the file's permissions
+ * are restricted the user can still change the permissions using chmod
+ * from the command line.
+ *
+ * Return: 0 on success, <0 on failure.
+ */
+int rdtgroup_kn_mode_restrict(struct rdtgroup *r, const char *name)
+{
+       struct iattr iattr = {.ia_valid = ATTR_MODE,};
+       struct kernfs_node *kn;
+       int ret = 0;
+
+       kn = kernfs_find_and_get_ns(r->kn, name, NULL);
+       if (!kn)
+               return -ENOENT;
+
+       switch (kernfs_type(kn)) {
+       case KERNFS_DIR:
+               iattr.ia_mode = S_IFDIR;
+               break;
+       case KERNFS_FILE:
+               iattr.ia_mode = S_IFREG;
+               break;
+       case KERNFS_LINK:
+               iattr.ia_mode = S_IFLNK;
+               break;
+       }
+
+       ret = kernfs_setattr(kn, &iattr);
+       kernfs_put(kn);
+       return ret;
+}
+
+/**
+ * rdtgroup_kn_mode_restore - Restore user access to named resctrl file
+ * @r: The resource group with which the file is associated.
+ * @name: Name of the file
+ * @mask: Mask of permissions that should be restored
+ *
+ * Restore the permissions of the named file. If @name is a directory the
+ * permissions of its parent will be used.
+ *
+ * Return: 0 on success, <0 on failure.
+ */
+int rdtgroup_kn_mode_restore(struct rdtgroup *r, const char *name,
+                            umode_t mask)
+{
+       struct iattr iattr = {.ia_valid = ATTR_MODE,};
+       struct kernfs_node *kn, *parent;
+       struct rftype *rfts, *rft;
+       int ret, len;
+
+       rfts = res_common_files;
+       len = ARRAY_SIZE(res_common_files);
+
+       for (rft = rfts; rft < rfts + len; rft++) {
+               if (!strcmp(rft->name, name))
+                       iattr.ia_mode = rft->mode & mask;
+       }
+
+       kn = kernfs_find_and_get_ns(r->kn, name, NULL);
+       if (!kn)
+               return -ENOENT;
+
+       switch (kernfs_type(kn)) {
+       case KERNFS_DIR:
+               parent = kernfs_get_parent(kn);
+               if (parent) {
+                       iattr.ia_mode |= parent->mode;
+                       kernfs_put(parent);
+               }
+               iattr.ia_mode |= S_IFDIR;
+               break;
+       case KERNFS_FILE:
+               iattr.ia_mode |= S_IFREG;
+               break;
+       case KERNFS_LINK:
+               iattr.ia_mode |= S_IFLNK;
+               break;
+       }
+
+       ret = kernfs_setattr(kn, &iattr);
+       kernfs_put(kn);
+       return ret;
+}
+
 static int rdtgroup_mkdir_info_resdir(struct rdt_resource *r, char *name,
                                      unsigned long fflags)
 {
@@ -1224,6 +1799,9 @@ void rdtgroup_kn_unlock(struct kernfs_node *kn)
 
        if (atomic_dec_and_test(&rdtgrp->waitcount) &&
            (rdtgrp->flags & RDT_DELETED)) {
+               if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP ||
+                   rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED)
+                       rdtgroup_pseudo_lock_remove(rdtgrp);
                kernfs_unbreak_active_protection(kn);
                kernfs_put(rdtgrp->kn);
                kfree(rdtgrp);
@@ -1289,10 +1867,16 @@ static struct dentry *rdt_mount(struct file_system_type *fs_type,
                rdtgroup_default.mon.mon_data_kn = kn_mondata;
        }
 
+       ret = rdt_pseudo_lock_init();
+       if (ret) {
+               dentry = ERR_PTR(ret);
+               goto out_mondata;
+       }
+
        dentry = kernfs_mount(fs_type, flags, rdt_root,
                              RDTGROUP_SUPER_MAGIC, NULL);
        if (IS_ERR(dentry))
-               goto out_mondata;
+               goto out_psl;
 
        if (rdt_alloc_capable)
                static_branch_enable_cpuslocked(&rdt_alloc_enable_key);
@@ -1310,6 +1894,8 @@ static struct dentry *rdt_mount(struct file_system_type *fs_type,
 
        goto out;
 
+out_psl:
+       rdt_pseudo_lock_release();
 out_mondata:
        if (rdt_mon_capable)
                kernfs_remove(kn_mondata);
@@ -1447,6 +2033,10 @@ static void rmdir_all_sub(void)
                if (rdtgrp == &rdtgroup_default)
                        continue;
 
+               if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP ||
+                   rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED)
+                       rdtgroup_pseudo_lock_remove(rdtgrp);
+
                /*
                 * Give any CPUs back to the default group. We cannot copy
                 * cpu_online_mask because a CPU might have executed the
@@ -1483,6 +2073,8 @@ static void rdt_kill_sb(struct super_block *sb)
                reset_all_ctrls(r);
        cdp_disable_all();
        rmdir_all_sub();
+       rdt_pseudo_lock_release();
+       rdtgroup_default.mode = RDT_MODE_SHAREABLE;
        static_branch_disable_cpuslocked(&rdt_alloc_enable_key);
        static_branch_disable_cpuslocked(&rdt_mon_enable_key);
        static_branch_disable_cpuslocked(&rdt_enable_key);
@@ -1682,6 +2274,114 @@ out_destroy:
        return ret;
 }
 
+/**
+ * cbm_ensure_valid - Enforce validity on provided CBM
+ * @_val:      Candidate CBM
+ * @r:         RDT resource to which the CBM belongs
+ *
+ * The provided CBM represents all cache portions available for use. This
+ * may be represented by a bitmap that does not consist of contiguous ones
+ * and thus be an invalid CBM.
+ * Here the provided CBM is forced to be a valid CBM by only considering
+ * the first set of contiguous bits as valid and clearing all bits.
+ * The intention here is to provide a valid default CBM with which a new
+ * resource group is initialized. The user can follow this with a
+ * modification to the CBM if the default does not satisfy the
+ * requirements.
+ */
+static void cbm_ensure_valid(u32 *_val, struct rdt_resource *r)
+{
+       /*
+        * Convert the u32 _val to an unsigned long required by all the bit
+        * operations within this function. No more than 32 bits of this
+        * converted value can be accessed because all bit operations are
+        * additionally provided with cbm_len that is initialized during
+        * hardware enumeration using five bits from the EAX register and
+        * thus never can exceed 32 bits.
+        */
+       unsigned long *val = (unsigned long *)_val;
+       unsigned int cbm_len = r->cache.cbm_len;
+       unsigned long first_bit, zero_bit;
+
+       if (*val == 0)
+               return;
+
+       first_bit = find_first_bit(val, cbm_len);
+       zero_bit = find_next_zero_bit(val, cbm_len, first_bit);
+
+       /* Clear any remaining bits to ensure contiguous region */
+       bitmap_clear(val, zero_bit, cbm_len - zero_bit);
+}
+
+/**
+ * rdtgroup_init_alloc - Initialize the new RDT group's allocations
+ *
+ * A new RDT group is being created on an allocation capable (CAT)
+ * supporting system. Set this group up to start off with all usable
+ * allocations. That is, all shareable and unused bits.
+ *
+ * All-zero CBM is invalid. If there are no more shareable bits available
+ * on any domain then the entire allocation will fail.
+ */
+static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp)
+{
+       u32 used_b = 0, unused_b = 0;
+       u32 closid = rdtgrp->closid;
+       struct rdt_resource *r;
+       enum rdtgrp_mode mode;
+       struct rdt_domain *d;
+       int i, ret;
+       u32 *ctrl;
+
+       for_each_alloc_enabled_rdt_resource(r) {
+               list_for_each_entry(d, &r->domains, list) {
+                       d->have_new_ctrl = false;
+                       d->new_ctrl = r->cache.shareable_bits;
+                       used_b = r->cache.shareable_bits;
+                       ctrl = d->ctrl_val;
+                       for (i = 0; i < r->num_closid; i++, ctrl++) {
+                               if (closid_allocated(i) && i != closid) {
+                                       mode = rdtgroup_mode_by_closid(i);
+                                       if (mode == RDT_MODE_PSEUDO_LOCKSETUP)
+                                               break;
+                                       used_b |= *ctrl;
+                                       if (mode == RDT_MODE_SHAREABLE)
+                                               d->new_ctrl |= *ctrl;
+                               }
+                       }
+                       if (d->plr && d->plr->cbm > 0)
+                               used_b |= d->plr->cbm;
+                       unused_b = used_b ^ (BIT_MASK(r->cache.cbm_len) - 1);
+                       unused_b &= BIT_MASK(r->cache.cbm_len) - 1;
+                       d->new_ctrl |= unused_b;
+                       /*
+                        * Force the initial CBM to be valid, user can
+                        * modify the CBM based on system availability.
+                        */
+                       cbm_ensure_valid(&d->new_ctrl, r);
+                       if (bitmap_weight((unsigned long *) &d->new_ctrl,
+                                         r->cache.cbm_len) <
+                                       r->cache.min_cbm_bits) {
+                               rdt_last_cmd_printf("no space on %s:%d\n",
+                                                   r->name, d->id);
+                               return -ENOSPC;
+                       }
+                       d->have_new_ctrl = true;
+               }
+       }
+
+       for_each_alloc_enabled_rdt_resource(r) {
+               ret = update_domains(r, rdtgrp->closid);
+               if (ret < 0) {
+                       rdt_last_cmd_puts("failed to initialize allocations\n");
+                       return ret;
+               }
+               rdtgrp->mode = RDT_MODE_SHAREABLE;
+       }
+
+       return 0;
+}
+
 static int mkdir_rdt_prepare(struct kernfs_node *parent_kn,
                             struct kernfs_node *prgrp_kn,
                             const char *name, umode_t mode,
@@ -1700,6 +2400,14 @@ static int mkdir_rdt_prepare(struct kernfs_node *parent_kn,
                goto out_unlock;
        }
 
+       if (rtype == RDTMON_GROUP &&
+           (prdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP ||
+            prdtgrp->mode == RDT_MODE_PSEUDO_LOCKED)) {
+               ret = -EINVAL;
+               rdt_last_cmd_puts("pseudo-locking in progress\n");
+               goto out_unlock;
+       }
+
        /* allocate the rdtgroup. */
        rdtgrp = kzalloc(sizeof(*rdtgrp), GFP_KERNEL);
        if (!rdtgrp) {
@@ -1840,6 +2548,10 @@ static int rdtgroup_mkdir_ctrl_mon(struct kernfs_node *parent_kn,
        ret = 0;
 
        rdtgrp->closid = closid;
+       ret = rdtgroup_init_alloc(rdtgrp);
+       if (ret < 0)
+               goto out_id_free;
+
        list_add(&rdtgrp->rdtgroup_list, &rdt_all_groups);
 
        if (rdt_mon_capable) {
@@ -1850,15 +2562,16 @@ static int rdtgroup_mkdir_ctrl_mon(struct kernfs_node *parent_kn,
                ret = mongroup_create_dir(kn, NULL, "mon_groups", NULL);
                if (ret) {
                        rdt_last_cmd_puts("kernfs subdir error\n");
-                       goto out_id_free;
+                       goto out_del_list;
                }
        }
 
        goto out_unlock;
 
+out_del_list:
+       list_del(&rdtgrp->rdtgroup_list);
 out_id_free:
        closid_free(closid);
-       list_del(&rdtgrp->rdtgroup_list);
 out_common_fail:
        mkdir_rdt_prepare_clean(rdtgrp);
 out_unlock:
@@ -1945,6 +2658,21 @@ static int rdtgroup_rmdir_mon(struct kernfs_node *kn, struct rdtgroup *rdtgrp,
        return 0;
 }
 
+static int rdtgroup_ctrl_remove(struct kernfs_node *kn,
+                               struct rdtgroup *rdtgrp)
+{
+       rdtgrp->flags = RDT_DELETED;
+       list_del(&rdtgrp->rdtgroup_list);
+
+       /*
+        * one extra hold on this, will drop when we kfree(rdtgrp)
+        * in rdtgroup_kn_unlock()
+        */
+       kernfs_get(kn);
+       kernfs_remove(rdtgrp->kn);
+       return 0;
+}
+
 static int rdtgroup_rmdir_ctrl(struct kernfs_node *kn, struct rdtgroup *rdtgrp,
                               cpumask_var_t tmpmask)
 {
@@ -1970,7 +2698,6 @@ static int rdtgroup_rmdir_ctrl(struct kernfs_node *kn, struct rdtgroup *rdtgrp,
        cpumask_or(tmpmask, tmpmask, &rdtgrp->cpu_mask);
        update_closid_rmid(tmpmask, NULL);
 
-       rdtgrp->flags = RDT_DELETED;
        closid_free(rdtgrp->closid);
        free_rmid(rdtgrp->mon.rmid);
 
@@ -1979,14 +2706,7 @@ static int rdtgroup_rmdir_ctrl(struct kernfs_node *kn, struct rdtgroup *rdtgrp,
         */
        free_all_child_rdtgrp(rdtgrp);
 
-       list_del(&rdtgrp->rdtgroup_list);
-
-       /*
-        * one extra hold on this, will drop when we kfree(rdtgrp)
-        * in rdtgroup_kn_unlock()
-        */
-       kernfs_get(kn);
-       kernfs_remove(rdtgrp->kn);
+       rdtgroup_ctrl_remove(kn, rdtgrp);
 
        return 0;
 }
@@ -2014,13 +2734,19 @@ static int rdtgroup_rmdir(struct kernfs_node *kn)
         * If the rdtgroup is a mon group and parent directory
         * is a valid "mon_groups" directory, remove the mon group.
         */
-       if (rdtgrp->type == RDTCTRL_GROUP && parent_kn == rdtgroup_default.kn)
-               ret = rdtgroup_rmdir_ctrl(kn, rdtgrp, tmpmask);
-       else if (rdtgrp->type == RDTMON_GROUP &&
-                is_mon_groups(parent_kn, kn->name))
+       if (rdtgrp->type == RDTCTRL_GROUP && parent_kn == rdtgroup_default.kn) {
+               if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP ||
+                   rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) {
+                       ret = rdtgroup_ctrl_remove(kn, rdtgrp);
+               } else {
+                       ret = rdtgroup_rmdir_ctrl(kn, rdtgrp, tmpmask);
+               }
+       } else if (rdtgrp->type == RDTMON_GROUP &&
+                is_mon_groups(parent_kn, kn->name)) {
                ret = rdtgroup_rmdir_mon(kn, rdtgrp, tmpmask);
-       else
+       } else {
                ret = -EPERM;
+       }
 
 out:
        rdtgroup_kn_unlock(kn);
@@ -2046,7 +2772,8 @@ static int __init rdtgroup_setup_root(void)
        int ret;
 
        rdt_root = kernfs_create_root(&rdtgroup_kf_syscall_ops,
-                                     KERNFS_ROOT_CREATE_DEACTIVATED,
+                                     KERNFS_ROOT_CREATE_DEACTIVATED |
+                                     KERNFS_ROOT_EXTRA_OPEN_PERM_CHECK,
                                      &rdtgroup_default);
        if (IS_ERR(rdt_root))
                return PTR_ERR(rdt_root);
@@ -2102,6 +2829,29 @@ int __init rdtgroup_init(void)
        if (ret)
                goto cleanup_mountpoint;
 
+       /*
+        * Adding the resctrl debugfs directory here may not be ideal since
+        * it would let the resctrl debugfs directory appear on the debugfs
+        * filesystem before the resctrl filesystem is mounted.
+        * It may also be ok since that would enable debugging of RDT before
+        * resctrl is mounted.
+        * The reason why the debugfs directory is created here and not in
+        * rdt_mount() is because rdt_mount() takes rdtgroup_mutex and
+        * during the debugfs directory creation also &sb->s_type->i_mutex_key
+        * (the lockdep class of inode->i_rwsem). Other filesystem
+        * interactions (eg. SyS_getdents) have the lock ordering:
+        * &sb->s_type->i_mutex_key --> &mm->mmap_sem
+        * During mmap(), called with &mm->mmap_sem, the rdtgroup_mutex
+        * is taken, thus creating dependency:
+        * &mm->mmap_sem --> rdtgroup_mutex for the latter that can cause
+        * issues considering the other two lock dependencies.
+        * By creating the debugfs directory here we avoid a dependency
+        * that may cause deadlock (even though file operations cannot
+        * occur until the filesystem is mounted, but I do not know how to
+        * tell lockdep that).
+        */
+       debugfs_resctrl = debugfs_create_dir("resctrl", NULL);
+
        return 0;
 
 cleanup_mountpoint:
@@ -2111,3 +2861,11 @@ cleanup_root:
 
        return ret;
 }
+
+void __exit rdtgroup_exit(void)
+{
+       debugfs_remove_recursive(debugfs_resctrl);
+       unregister_filesystem(&rdt_fs_type);
+       sysfs_remove_mount_point(fs_kobj, "resctrl");
+       kernfs_destroy_root(rdt_root);
+}
index 5bbd06f38ff68f58d1efc980db0fd9fc0af7d89a..f34d89c01edc5c761e0df331da1331f8a0f98f3a 100644 (file)
@@ -160,6 +160,11 @@ static struct severity {
                SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR|MCACOD, MCI_UC_SAR|MCI_ADDR|MCACOD_INSTR),
                USER
                ),
+       MCESEV(
+               PANIC, "Data load in unrecoverable area of kernel",
+               SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR|MCACOD, MCI_UC_SAR|MCI_ADDR|MCACOD_DATA),
+               KERNEL
+               ),
 #endif
        MCESEV(
                PANIC, "Action required: unknown MCACOD",
index e4cf6ff1c2e1d341bb5ca890cd8dba266ce2aa18..4b767284b7f5e59e529c5c7e1ae90174d7d23654 100644 (file)
@@ -123,8 +123,8 @@ void mce_setup(struct mce *m)
 {
        memset(m, 0, sizeof(struct mce));
        m->cpu = m->extcpu = smp_processor_id();
-       /* We hope get_seconds stays lockless */
-       m->time = get_seconds();
+       /* need the internal __ version to avoid deadlocks */
+       m->time = __ktime_get_real_seconds();
        m->cpuvendor = boot_cpu_data.x86_vendor;
        m->cpuid = cpuid_eax(1);
        m->socketid = cpu_data(m->extcpu).phys_proc_id;
@@ -772,23 +772,25 @@ EXPORT_SYMBOL_GPL(machine_check_poll);
 static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp,
                          struct pt_regs *regs)
 {
-       int i, ret = 0;
        char *tmp;
+       int i;
 
        for (i = 0; i < mca_cfg.banks; i++) {
                m->status = mce_rdmsrl(msr_ops.status(i));
-               if (m->status & MCI_STATUS_VAL) {
-                       __set_bit(i, validp);
-                       if (quirk_no_way_out)
-                               quirk_no_way_out(i, m, regs);
-               }
+               if (!(m->status & MCI_STATUS_VAL))
+                       continue;
+
+               __set_bit(i, validp);
+               if (quirk_no_way_out)
+                       quirk_no_way_out(i, m, regs);
 
                if (mce_severity(m, mca_cfg.tolerant, &tmp, true) >= MCE_PANIC_SEVERITY) {
+                       mce_read_aux(m, i);
                        *msg = tmp;
-                       ret = 1;
+                       return 1;
                }
        }
-       return ret;
+       return 0;
 }
 
 /*
@@ -1102,6 +1104,101 @@ static void mce_unmap_kpfn(unsigned long pfn)
 }
 #endif
 
+
+/*
+ * Cases where we avoid rendezvous handler timeout:
+ * 1) If this CPU is offline.
+ *
+ * 2) If crashing_cpu was set, e.g. we're entering kdump and we need to
+ *  skip those CPUs which remain looping in the 1st kernel - see
+ *  crash_nmi_callback().
+ *
+ * Note: there still is a small window between kexec-ing and the new,
+ * kdump kernel establishing a new #MC handler where a broadcasted MCE
+ * might not get handled properly.
+ */
+static bool __mc_check_crashing_cpu(int cpu)
+{
+       if (cpu_is_offline(cpu) ||
+           (crashing_cpu != -1 && crashing_cpu != cpu)) {
+               u64 mcgstatus;
+
+               mcgstatus = mce_rdmsrl(MSR_IA32_MCG_STATUS);
+               if (mcgstatus & MCG_STATUS_RIPV) {
+                       mce_wrmsrl(MSR_IA32_MCG_STATUS, 0);
+                       return true;
+               }
+       }
+       return false;
+}
+
+static void __mc_scan_banks(struct mce *m, struct mce *final,
+                           unsigned long *toclear, unsigned long *valid_banks,
+                           int no_way_out, int *worst)
+{
+       struct mca_config *cfg = &mca_cfg;
+       int severity, i;
+
+       for (i = 0; i < cfg->banks; i++) {
+               __clear_bit(i, toclear);
+               if (!test_bit(i, valid_banks))
+                       continue;
+
+               if (!mce_banks[i].ctl)
+                       continue;
+
+               m->misc = 0;
+               m->addr = 0;
+               m->bank = i;
+
+               m->status = mce_rdmsrl(msr_ops.status(i));
+               if (!(m->status & MCI_STATUS_VAL))
+                       continue;
+
+               /*
+                * Corrected or non-signaled errors are handled by
+                * machine_check_poll(). Leave them alone, unless this panics.
+                */
+               if (!(m->status & (cfg->ser ? MCI_STATUS_S : MCI_STATUS_UC)) &&
+                       !no_way_out)
+                       continue;
+
+               /* Set taint even when machine check was not enabled. */
+               add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
+
+               severity = mce_severity(m, cfg->tolerant, NULL, true);
+
+               /*
+                * When machine check was for corrected/deferred handler don't
+                * touch, unless we're panicking.
+                */
+               if ((severity == MCE_KEEP_SEVERITY ||
+                    severity == MCE_UCNA_SEVERITY) && !no_way_out)
+                       continue;
+
+               __set_bit(i, toclear);
+
+               /* Machine check event was not enabled. Clear, but ignore. */
+               if (severity == MCE_NO_SEVERITY)
+                       continue;
+
+               mce_read_aux(m, i);
+
+               /* assuming valid severity level != 0 */
+               m->severity = severity;
+
+               mce_log(m);
+
+               if (severity > *worst) {
+                       *final = *m;
+                       *worst = severity;
+               }
+       }
+
+       /* mce_clear_state will clear *final, save locally for use later */
+       *m = *final;
+}
+
 /*
  * The actual machine check handler. This only handles real
  * exceptions when something got corrupted coming in through int 18.
@@ -1116,68 +1213,45 @@ static void mce_unmap_kpfn(unsigned long pfn)
  */
 void do_machine_check(struct pt_regs *regs, long error_code)
 {
+       DECLARE_BITMAP(valid_banks, MAX_NR_BANKS);
+       DECLARE_BITMAP(toclear, MAX_NR_BANKS);
        struct mca_config *cfg = &mca_cfg;
+       int cpu = smp_processor_id();
+       char *msg = "Unknown";
        struct mce m, *final;
-       int i;
        int worst = 0;
-       int severity;
 
        /*
         * Establish sequential order between the CPUs entering the machine
         * check handler.
         */
        int order = -1;
+
        /*
         * If no_way_out gets set, there is no safe way to recover from this
         * MCE.  If mca_cfg.tolerant is cranked up, we'll try anyway.
         */
        int no_way_out = 0;
+
        /*
         * If kill_it gets set, there might be a way to recover from this
         * error.
         */
        int kill_it = 0;
-       DECLARE_BITMAP(toclear, MAX_NR_BANKS);
-       DECLARE_BITMAP(valid_banks, MAX_NR_BANKS);
-       char *msg = "Unknown";
 
        /*
         * MCEs are always local on AMD. Same is determined by MCG_STATUS_LMCES
         * on Intel.
         */
        int lmce = 1;
-       int cpu = smp_processor_id();
-
-       /*
-        * Cases where we avoid rendezvous handler timeout:
-        * 1) If this CPU is offline.
-        *
-        * 2) If crashing_cpu was set, e.g. we're entering kdump and we need to
-        *  skip those CPUs which remain looping in the 1st kernel - see
-        *  crash_nmi_callback().
-        *
-        * Note: there still is a small window between kexec-ing and the new,
-        * kdump kernel establishing a new #MC handler where a broadcasted MCE
-        * might not get handled properly.
-        */
-       if (cpu_is_offline(cpu) ||
-           (crashing_cpu != -1 && crashing_cpu != cpu)) {
-               u64 mcgstatus;
 
-               mcgstatus = mce_rdmsrl(MSR_IA32_MCG_STATUS);
-               if (mcgstatus & MCG_STATUS_RIPV) {
-                       mce_wrmsrl(MSR_IA32_MCG_STATUS, 0);
-                       return;
-               }
-       }
+       if (__mc_check_crashing_cpu(cpu))
+               return;
 
        ist_enter(regs);
 
        this_cpu_inc(mce_exception_count);
 
-       if (!cfg->banks)
-               goto out;
-
        mce_gather_info(&m, regs);
        m.tsc = rdtsc();
 
@@ -1205,75 +1279,20 @@ void do_machine_check(struct pt_regs *regs, long error_code)
                lmce = m.mcgstatus & MCG_STATUS_LMCES;
 
        /*
+        * Local machine check may already know that we have to panic.
+        * Broadcast machine check begins rendezvous in mce_start()
         * Go through all banks in exclusion of the other CPUs. This way we
         * don't report duplicated events on shared banks because the first one
-        * to see it will clear it. If this is a Local MCE, then no need to
-        * perform rendezvous.
+        * to see it will clear it.
         */
-       if (!lmce)
+       if (lmce) {
+               if (no_way_out)
+                       mce_panic("Fatal local machine check", &m, msg);
+       } else {
                order = mce_start(&no_way_out);
-
-       for (i = 0; i < cfg->banks; i++) {
-               __clear_bit(i, toclear);
-               if (!test_bit(i, valid_banks))
-                       continue;
-               if (!mce_banks[i].ctl)
-                       continue;
-
-               m.misc = 0;
-               m.addr = 0;
-               m.bank = i;
-
-               m.status = mce_rdmsrl(msr_ops.status(i));
-               if ((m.status & MCI_STATUS_VAL) == 0)
-                       continue;
-
-               /*
-                * Non uncorrected or non signaled errors are handled by
-                * machine_check_poll. Leave them alone, unless this panics.
-                */
-               if (!(m.status & (cfg->ser ? MCI_STATUS_S : MCI_STATUS_UC)) &&
-                       !no_way_out)
-                       continue;
-
-               /*
-                * Set taint even when machine check was not enabled.
-                */
-               add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
-
-               severity = mce_severity(&m, cfg->tolerant, NULL, true);
-
-               /*
-                * When machine check was for corrected/deferred handler don't
-                * touch, unless we're panicing.
-                */
-               if ((severity == MCE_KEEP_SEVERITY ||
-                    severity == MCE_UCNA_SEVERITY) && !no_way_out)
-                       continue;
-               __set_bit(i, toclear);
-               if (severity == MCE_NO_SEVERITY) {
-                       /*
-                        * Machine check event was not enabled. Clear, but
-                        * ignore.
-                        */
-                       continue;
-               }
-
-               mce_read_aux(&m, i);
-
-               /* assuming valid severity level != 0 */
-               m.severity = severity;
-
-               mce_log(&m);
-
-               if (severity > worst) {
-                       *final = m;
-                       worst = severity;
-               }
        }
 
-       /* mce_clear_state will clear *final, save locally for use later */
-       m = *final;
+       __mc_scan_banks(&m, final, toclear, valid_banks, no_way_out, &worst);
 
        if (!no_way_out)
                mce_clear_state(toclear);
@@ -1287,12 +1306,17 @@ void do_machine_check(struct pt_regs *regs, long error_code)
                        no_way_out = worst >= MCE_PANIC_SEVERITY;
        } else {
                /*
-                * Local MCE skipped calling mce_reign()
-                * If we found a fatal error, we need to panic here.
+                * If there was a fatal machine check we should have
+                * already called mce_panic earlier in this function.
+                * Since we re-read the banks, we might have found
+                * something new. Check again to see if we found a
+                * fatal error. We call "mce_severity()" again to
+                * make sure we have the right "msg".
                 */
-                if (worst >= MCE_PANIC_SEVERITY && mca_cfg.tolerant < 3)
-                       mce_panic("Machine check from unknown source",
-                               NULL, NULL);
+               if (worst >= MCE_PANIC_SEVERITY && mca_cfg.tolerant < 3) {
+                       mce_severity(&m, cfg->tolerant, &msg, true);
+                       mce_panic("Local fatal machine check!", &m, msg);
+               }
        }
 
        /*
@@ -1307,7 +1331,7 @@ void do_machine_check(struct pt_regs *regs, long error_code)
        if (worst > 0)
                mce_report_event(regs);
        mce_wrmsrl(MSR_IA32_MCG_STATUS, 0);
-out:
+
        sync_core();
 
        if (worst != MCE_AR_SEVERITY && !kill_it)
@@ -2153,9 +2177,6 @@ static ssize_t store_int_with_restart(struct device *s,
        if (check_interval == old_check_interval)
                return ret;
 
-       if (check_interval < 1)
-               check_interval = 1;
-
        mutex_lock(&mce_sysfs_mutex);
        mce_restart();
        mutex_unlock(&mce_sysfs_mutex);
index 1c2cfa0644aa979c97cc01a42925a44c25f9f852..97ccf4c3b45bec517605813b1f24518b10466002 100644 (file)
@@ -190,8 +190,11 @@ static void save_microcode_patch(void *data, unsigned int size)
                        p = memdup_patch(data, size);
                        if (!p)
                                pr_err("Error allocating buffer %p\n", data);
-                       else
+                       else {
                                list_replace(&iter->plist, &p->plist);
+                               kfree(iter->data);
+                               kfree(iter);
+                       }
                }
        }
 
index 4021d3859499c77c14eaa1c40864c752547df68c..40eee6cc412484470daba013f2a197439163707a 100644 (file)
@@ -106,7 +106,8 @@ mtrr_write(struct file *file, const char __user *buf, size_t len, loff_t * ppos)
 
        memset(line, 0, LINE_SIZE);
 
-       length = strncpy_from_user(line, buf, LINE_SIZE - 1);
+       len = min_t(size_t, len, LINE_SIZE - 1);
+       length = strncpy_from_user(line, buf, len);
        if (length < 0)
                return length;
 
index 666a284116ac43e2633f69893db17c332e26d314..9c8652974f8ed1f6a8ed20f97ac843ec332706f2 100644 (file)
@@ -22,8 +22,6 @@
 #include <asm/stacktrace.h>
 #include <asm/unwind.h>
 
-#define OPCODE_BUFSIZE 64
-
 int panic_on_unrecovered_nmi;
 int panic_on_io_nmi;
 static int die_counter;
@@ -93,26 +91,18 @@ static void printk_stack_address(unsigned long address, int reliable,
  */
 void show_opcodes(u8 *rip, const char *loglvl)
 {
-       unsigned int code_prologue = OPCODE_BUFSIZE * 2 / 3;
+#define PROLOGUE_SIZE 42
+#define EPILOGUE_SIZE 21
+#define OPCODE_BUFSIZE (PROLOGUE_SIZE + 1 + EPILOGUE_SIZE)
        u8 opcodes[OPCODE_BUFSIZE];
-       u8 *ip;
-       int i;
-
-       printk("%sCode: ", loglvl);
-
-       ip = (u8 *)rip - code_prologue;
-       if (probe_kernel_read(opcodes, ip, OPCODE_BUFSIZE)) {
-               pr_cont("Bad RIP value.\n");
-               return;
-       }
 
-       for (i = 0; i < OPCODE_BUFSIZE; i++, ip++) {
-               if (ip == rip)
-                       pr_cont("<%02x> ", opcodes[i]);
-               else
-                       pr_cont("%02x ", opcodes[i]);
+       if (probe_kernel_read(opcodes, rip - PROLOGUE_SIZE, OPCODE_BUFSIZE)) {
+               printk("%sCode: Bad RIP value.\n", loglvl);
+       } else {
+               printk("%sCode: %" __stringify(PROLOGUE_SIZE) "ph <%02x> %"
+                      __stringify(EPILOGUE_SIZE) "ph\n", loglvl, opcodes,
+                      opcodes[PROLOGUE_SIZE], opcodes + PROLOGUE_SIZE + 1);
        }
-       pr_cont("\n");
 }
 
 void show_ip(struct pt_regs *regs, const char *loglvl)
index d1f25c83144752272401afe8c8aec313d40298ae..c88c23c658c1e99faad3daa236448bc4208901d7 100644 (file)
@@ -1248,6 +1248,7 @@ void __init e820__memblock_setup(void)
 {
        int i;
        u64 end;
+       u64 addr = 0;
 
        /*
         * The bootstrap memblock region count maximum is 128 entries
@@ -1264,13 +1265,21 @@ void __init e820__memblock_setup(void)
                struct e820_entry *entry = &e820_table->entries[i];
 
                end = entry->addr + entry->size;
+               if (addr < entry->addr)
+                       memblock_reserve(addr, entry->addr - addr);
+               addr = end;
                if (end != (resource_size_t)end)
                        continue;
 
+               /*
+                * all !E820_TYPE_RAM ranges (including gap ranges) are put
+                * into memblock.reserved to make sure that struct pages in
+                * such regions are not left uninitialized after bootup.
+                */
                if (entry->type != E820_TYPE_RAM && entry->type != E820_TYPE_RESERVED_KERN)
-                       continue;
-
-               memblock_add(entry->addr, entry->size);
+                       memblock_reserve(entry->addr, entry->size);
+               else
+                       memblock_add(entry->addr, entry->size);
        }
 
        /* Throw away partial pages: */
index a21d6ace648e3006045f5bd13578f3b29d4ea0bf..8047379e575ad39cb47cdbb055131e9bb094bb4d 100644 (file)
@@ -44,7 +44,7 @@ static unsigned int __initdata next_early_pgt;
 pmdval_t early_pmd_flags = __PAGE_KERNEL_LARGE & ~(_PAGE_GLOBAL | _PAGE_NX);
 
 #ifdef CONFIG_X86_5LEVEL
-unsigned int __pgtable_l5_enabled __initdata;
+unsigned int __pgtable_l5_enabled __ro_after_init;
 unsigned int pgdir_shift __ro_after_init = 39;
 EXPORT_SYMBOL(pgdir_shift);
 unsigned int ptrs_per_p4d __ro_after_init = 1;
index abe6df15a8fbb798bf9cb8bfec5252494a943e63..30f9cb2c0b551100616cb2c8ff2a19d79abfd0ad 100644 (file)
@@ -512,11 +512,18 @@ ENTRY(initial_code)
 ENTRY(setup_once_ref)
        .long setup_once
 
+#ifdef CONFIG_PAGE_TABLE_ISOLATION
+#define        PGD_ALIGN       (2 * PAGE_SIZE)
+#define PTI_USER_PGD_FILL      1024
+#else
+#define        PGD_ALIGN       (PAGE_SIZE)
+#define PTI_USER_PGD_FILL      0
+#endif
 /*
  * BSS section
  */
 __PAGE_ALIGNED_BSS
-       .align PAGE_SIZE
+       .align PGD_ALIGN
 #ifdef CONFIG_X86_PAE
 .globl initial_pg_pmd
 initial_pg_pmd:
@@ -526,14 +533,17 @@ initial_pg_pmd:
 initial_page_table:
        .fill 1024,4,0
 #endif
+       .align PGD_ALIGN
 initial_pg_fixmap:
        .fill 1024,4,0
-.globl empty_zero_page
-empty_zero_page:
-       .fill 4096,1,0
 .globl swapper_pg_dir
+       .align PGD_ALIGN
 swapper_pg_dir:
        .fill 1024,4,0
+       .fill PTI_USER_PGD_FILL,4,0
+.globl empty_zero_page
+empty_zero_page:
+       .fill 4096,1,0
 EXPORT_SYMBOL(empty_zero_page)
 
 /*
@@ -542,7 +552,7 @@ EXPORT_SYMBOL(empty_zero_page)
 #ifdef CONFIG_X86_PAE
 __PAGE_ALIGNED_DATA
        /* Page-aligned for the benefit of paravirt? */
-       .align PAGE_SIZE
+       .align PGD_ALIGN
 ENTRY(initial_page_table)
        .long   pa(initial_pg_pmd+PGD_IDENT_ATTR),0     /* low identity map */
 # if KPMDS == 3
index 8344dd2f310a4489da2327c0b360995c37623cce..15ebc2fc166e66c954106ce7c7db3d73d377adae 100644 (file)
@@ -235,7 +235,7 @@ ENTRY(secondary_startup_64)
         *              address given in m16:64.
         */
        pushq   $.Lafter_lret   # put return address on stack for unwinder
-       xorq    %rbp, %rbp      # clear frame pointer
+       xorl    %ebp, %ebp      # clear frame pointer
        movq    initial_code(%rip), %rax
        pushq   $__KERNEL_CS    # set correct cs
        pushq   %rax            # target address in negative space
index 8771766d46b6c3e74d914136578f0b8d6f64b5ad..34a5c171514870af79195679ad9bb90741a57992 100644 (file)
@@ -169,28 +169,29 @@ void arch_uninstall_hw_breakpoint(struct perf_event *bp)
                set_dr_addr_mask(0, i);
 }
 
-/*
- * Check for virtual address in kernel space.
- */
-int arch_check_bp_in_kernelspace(struct perf_event *bp)
+static int arch_bp_generic_len(int x86_len)
 {
-       unsigned int len;
-       unsigned long va;
-       struct arch_hw_breakpoint *info = counter_arch_bp(bp);
-
-       va = info->address;
-       len = bp->attr.bp_len;
-
-       /*
-        * We don't need to worry about va + len - 1 overflowing:
-        * we already require that va is aligned to a multiple of len.
-        */
-       return (va >= TASK_SIZE_MAX) || ((va + len - 1) >= TASK_SIZE_MAX);
+       switch (x86_len) {
+       case X86_BREAKPOINT_LEN_1:
+               return HW_BREAKPOINT_LEN_1;
+       case X86_BREAKPOINT_LEN_2:
+               return HW_BREAKPOINT_LEN_2;
+       case X86_BREAKPOINT_LEN_4:
+               return HW_BREAKPOINT_LEN_4;
+#ifdef CONFIG_X86_64
+       case X86_BREAKPOINT_LEN_8:
+               return HW_BREAKPOINT_LEN_8;
+#endif
+       default:
+               return -EINVAL;
+       }
 }
 
 int arch_bp_generic_fields(int x86_len, int x86_type,
                           int *gen_len, int *gen_type)
 {
+       int len;
+
        /* Type */
        switch (x86_type) {
        case X86_BREAKPOINT_EXECUTE:
@@ -211,42 +212,47 @@ int arch_bp_generic_fields(int x86_len, int x86_type,
        }
 
        /* Len */
-       switch (x86_len) {
-       case X86_BREAKPOINT_LEN_1:
-               *gen_len = HW_BREAKPOINT_LEN_1;
-               break;
-       case X86_BREAKPOINT_LEN_2:
-               *gen_len = HW_BREAKPOINT_LEN_2;
-               break;
-       case X86_BREAKPOINT_LEN_4:
-               *gen_len = HW_BREAKPOINT_LEN_4;
-               break;
-#ifdef CONFIG_X86_64
-       case X86_BREAKPOINT_LEN_8:
-               *gen_len = HW_BREAKPOINT_LEN_8;
-               break;
-#endif
-       default:
+       len = arch_bp_generic_len(x86_len);
+       if (len < 0)
                return -EINVAL;
-       }
+       *gen_len = len;
 
        return 0;
 }
 
-
-static int arch_build_bp_info(struct perf_event *bp)
+/*
+ * Check for virtual address in kernel space.
+ */
+int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw)
 {
-       struct arch_hw_breakpoint *info = counter_arch_bp(bp);
+       unsigned long va;
+       int len;
 
-       info->address = bp->attr.bp_addr;
+       va = hw->address;
+       len = arch_bp_generic_len(hw->len);
+       WARN_ON_ONCE(len < 0);
+
+       /*
+        * We don't need to worry about va + len - 1 overflowing:
+        * we already require that va is aligned to a multiple of len.
+        */
+       return (va >= TASK_SIZE_MAX) || ((va + len - 1) >= TASK_SIZE_MAX);
+}
+
+static int arch_build_bp_info(struct perf_event *bp,
+                             const struct perf_event_attr *attr,
+                             struct arch_hw_breakpoint *hw)
+{
+       hw->address = attr->bp_addr;
+       hw->mask = 0;
 
        /* Type */
-       switch (bp->attr.bp_type) {
+       switch (attr->bp_type) {
        case HW_BREAKPOINT_W:
-               info->type = X86_BREAKPOINT_WRITE;
+               hw->type = X86_BREAKPOINT_WRITE;
                break;
        case HW_BREAKPOINT_W | HW_BREAKPOINT_R:
-               info->type = X86_BREAKPOINT_RW;
+               hw->type = X86_BREAKPOINT_RW;
                break;
        case HW_BREAKPOINT_X:
                /*
@@ -254,23 +260,23 @@ static int arch_build_bp_info(struct perf_event *bp)
                 * acceptable for kprobes.  On non-kprobes kernels, we don't
                 * allow kernel breakpoints at all.
                 */
-               if (bp->attr.bp_addr >= TASK_SIZE_MAX) {
+               if (attr->bp_addr >= TASK_SIZE_MAX) {
 #ifdef CONFIG_KPROBES
-                       if (within_kprobe_blacklist(bp->attr.bp_addr))
+                       if (within_kprobe_blacklist(attr->bp_addr))
                                return -EINVAL;
 #else
                        return -EINVAL;
 #endif
                }
 
-               info->type = X86_BREAKPOINT_EXECUTE;
+               hw->type = X86_BREAKPOINT_EXECUTE;
                /*
                 * x86 inst breakpoints need to have a specific undefined len.
                 * But we still need to check userspace is not trying to setup
                 * an unsupported length, to get a range breakpoint for example.
                 */
-               if (bp->attr.bp_len == sizeof(long)) {
-                       info->len = X86_BREAKPOINT_LEN_X;
+               if (attr->bp_len == sizeof(long)) {
+                       hw->len = X86_BREAKPOINT_LEN_X;
                        return 0;
                }
        default:
@@ -278,28 +284,26 @@ static int arch_build_bp_info(struct perf_event *bp)
        }
 
        /* Len */
-       info->mask = 0;
-
-       switch (bp->attr.bp_len) {
+       switch (attr->bp_len) {
        case HW_BREAKPOINT_LEN_1:
-               info->len = X86_BREAKPOINT_LEN_1;
+               hw->len = X86_BREAKPOINT_LEN_1;
                break;
        case HW_BREAKPOINT_LEN_2:
-               info->len = X86_BREAKPOINT_LEN_2;
+               hw->len = X86_BREAKPOINT_LEN_2;
                break;
        case HW_BREAKPOINT_LEN_4:
-               info->len = X86_BREAKPOINT_LEN_4;
+               hw->len = X86_BREAKPOINT_LEN_4;
                break;
 #ifdef CONFIG_X86_64
        case HW_BREAKPOINT_LEN_8:
-               info->len = X86_BREAKPOINT_LEN_8;
+               hw->len = X86_BREAKPOINT_LEN_8;
                break;
 #endif
        default:
                /* AMD range breakpoint */
-               if (!is_power_of_2(bp->attr.bp_len))
+               if (!is_power_of_2(attr->bp_len))
                        return -EINVAL;
-               if (bp->attr.bp_addr & (bp->attr.bp_len - 1))
+               if (attr->bp_addr & (attr->bp_len - 1))
                        return -EINVAL;
 
                if (!boot_cpu_has(X86_FEATURE_BPEXT))
@@ -312,8 +316,8 @@ static int arch_build_bp_info(struct perf_event *bp)
                 * breakpoints, then we'll have to check for kprobe-blacklisted
                 * addresses anywhere in the range.
                 */
-               info->mask = bp->attr.bp_len - 1;
-               info->len = X86_BREAKPOINT_LEN_1;
+               hw->mask = attr->bp_len - 1;
+               hw->len = X86_BREAKPOINT_LEN_1;
        }
 
        return 0;
@@ -322,22 +326,23 @@ static int arch_build_bp_info(struct perf_event *bp)
 /*
  * Validate the arch-specific HW Breakpoint register settings
  */
-int arch_validate_hwbkpt_settings(struct perf_event *bp)
+int hw_breakpoint_arch_parse(struct perf_event *bp,
+                            const struct perf_event_attr *attr,
+                            struct arch_hw_breakpoint *hw)
 {
-       struct arch_hw_breakpoint *info = counter_arch_bp(bp);
        unsigned int align;
        int ret;
 
 
-       ret = arch_build_bp_info(bp);
+       ret = arch_build_bp_info(bp, attr, hw);
        if (ret)
                return ret;
 
-       switch (info->len) {
+       switch (hw->len) {
        case X86_BREAKPOINT_LEN_1:
                align = 0;
-               if (info->mask)
-                       align = info->mask;
+               if (hw->mask)
+                       align = hw->mask;
                break;
        case X86_BREAKPOINT_LEN_2:
                align = 1;
@@ -358,7 +363,7 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
         * Check that the low-order bits of the address are appropriate
         * for the alignment implied by len.
         */
-       if (info->address & align)
+       if (hw->address & align)
                return -EINVAL;
 
        return 0;
diff --git a/arch/x86/kernel/irqflags.S b/arch/x86/kernel/irqflags.S
new file mode 100644 (file)
index 0000000..ddeeaac
--- /dev/null
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#include <asm/asm.h>
+#include <asm/export.h>
+#include <linux/linkage.h>
+
+/*
+ * unsigned long native_save_fl(void)
+ */
+ENTRY(native_save_fl)
+       pushf
+       pop %_ASM_AX
+       ret
+ENDPROC(native_save_fl)
+EXPORT_SYMBOL(native_save_fl)
+
+/*
+ * void native_restore_fl(unsigned long flags)
+ * %eax/%rdi: flags
+ */
+ENTRY(native_restore_fl)
+       push %_ASM_ARG1
+       popf
+       ret
+ENDPROC(native_restore_fl)
+EXPORT_SYMBOL(native_restore_fl)
index e56c95be2808af74b93c879a5df48201056329b2..eeea935e9bb53ff9f6cfb0b75778e71c4889af84 100644 (file)
@@ -37,15 +37,18 @@ static void bug_at(unsigned char *ip, int line)
        BUG();
 }
 
-static void __jump_label_transform(struct jump_entry *entry,
-                                  enum jump_label_type type,
-                                  void *(*poker)(void *, const void *, size_t),
-                                  int init)
+static void __ref __jump_label_transform(struct jump_entry *entry,
+                                        enum jump_label_type type,
+                                        void *(*poker)(void *, const void *, size_t),
+                                        int init)
 {
        union jump_code_union code;
        const unsigned char default_nop[] = { STATIC_KEY_INIT_NOP };
        const unsigned char *ideal_nop = ideal_nops[NOP_ATOMIC5];
 
+       if (early_boot_irqs_disabled)
+               poker = text_poke_early;
+
        if (type == JUMP_LABEL_JMP) {
                if (init) {
                        /*
index ae38dccf0c8f2cb76fc959999438c6051d3d87de..2b949f4fd4d86d02ac20199872a625c7cab965e4 100644 (file)
@@ -105,14 +105,4 @@ static inline unsigned long __recover_optprobed_insn(kprobe_opcode_t *buf, unsig
 }
 #endif
 
-#ifdef CONFIG_KPROBES_ON_FTRACE
-extern int skip_singlestep(struct kprobe *p, struct pt_regs *regs,
-                          struct kprobe_ctlblk *kcb);
-#else
-static inline int skip_singlestep(struct kprobe *p, struct pt_regs *regs,
-                                 struct kprobe_ctlblk *kcb)
-{
-       return 0;
-}
-#endif
 #endif
index 6f4d42377fe520c52a1c67ea0f25b27fc3eae2e1..b0d1e81c96bbe297c2b0d573673a40bfb9cf3a99 100644 (file)
@@ -66,8 +66,6 @@
 
 #include "common.h"
 
-void jprobe_return_end(void);
-
 DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
 DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
 
@@ -395,8 +393,6 @@ int __copy_instruction(u8 *dest, u8 *src, u8 *real, struct insn *insn)
                          - (u8 *) real;
                if ((s64) (s32) newdisp != newdisp) {
                        pr_err("Kprobes error: new displacement does not fit into s32 (%llx)\n", newdisp);
-                       pr_err("\tSrc: %p, Dest: %p, old disp: %x\n",
-                               src, real, insn->displacement.value);
                        return 0;
                }
                disp = (u8 *) dest + insn_offset_displacement(insn);
@@ -596,7 +592,6 @@ static void setup_singlestep(struct kprobe *p, struct pt_regs *regs,
                 * stepping.
                 */
                regs->ip = (unsigned long)p->ainsn.insn;
-               preempt_enable_no_resched();
                return;
        }
 #endif
@@ -640,8 +635,7 @@ static int reenter_kprobe(struct kprobe *p, struct pt_regs *regs,
                 * Raise a BUG or we'll continue in an endless reentering loop
                 * and eventually a stack overflow.
                 */
-               printk(KERN_WARNING "Unrecoverable kprobe detected at %p.\n",
-                      p->addr);
+               pr_err("Unrecoverable kprobe detected.\n");
                dump_kprobe(p);
                BUG();
        default:
@@ -669,12 +663,10 @@ int kprobe_int3_handler(struct pt_regs *regs)
 
        addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t));
        /*
-        * We don't want to be preempted for the entire
-        * duration of kprobe processing. We conditionally
-        * re-enable preemption at the end of this function,
-        * and also in reenter_kprobe() and setup_singlestep().
+        * We don't want to be preempted for the entire duration of kprobe
+        * processing. Since int3 and debug trap disables irqs and we clear
+        * IF while singlestepping, it must be no preemptible.
         */
-       preempt_disable();
 
        kcb = get_kprobe_ctlblk();
        p = get_kprobe(addr);
@@ -690,13 +682,14 @@ int kprobe_int3_handler(struct pt_regs *regs)
                        /*
                         * If we have no pre-handler or it returned 0, we
                         * continue with normal processing.  If we have a
-                        * pre-handler and it returned non-zero, it prepped
-                        * for calling the break_handler below on re-entry
-                        * for jprobe processing, so get out doing nothing
-                        * more here.
+                        * pre-handler and it returned non-zero, that means
+                        * user handler setup registers to exit to another
+                        * instruction, we must skip the single stepping.
                         */
                        if (!p->pre_handler || !p->pre_handler(p, regs))
                                setup_singlestep(p, regs, kcb, 0);
+                       else
+                               reset_current_kprobe();
                        return 1;
                }
        } else if (*addr != BREAKPOINT_INSTRUCTION) {
@@ -710,18 +703,9 @@ int kprobe_int3_handler(struct pt_regs *regs)
                 * the original instruction.
                 */
                regs->ip = (unsigned long)addr;
-               preempt_enable_no_resched();
                return 1;
-       } else if (kprobe_running()) {
-               p = __this_cpu_read(current_kprobe);
-               if (p->break_handler && p->break_handler(p, regs)) {
-                       if (!skip_singlestep(p, regs, kcb))
-                               setup_singlestep(p, regs, kcb, 0);
-                       return 1;
-               }
        } /* else: not a kprobe fault; let the kernel handle it */
 
-       preempt_enable_no_resched();
        return 0;
 }
 NOKPROBE_SYMBOL(kprobe_int3_handler);
@@ -972,8 +956,6 @@ int kprobe_debug_handler(struct pt_regs *regs)
        }
        reset_current_kprobe();
 out:
-       preempt_enable_no_resched();
-
        /*
         * if somebody else is singlestepping across a probe point, flags
         * will have TF set, in which case, continue the remaining processing
@@ -1020,7 +1002,6 @@ int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
                        restore_previous_kprobe(kcb);
                else
                        reset_current_kprobe();
-               preempt_enable_no_resched();
        } else if (kcb->kprobe_status == KPROBE_HIT_ACTIVE ||
                   kcb->kprobe_status == KPROBE_HIT_SSDONE) {
                /*
@@ -1083,93 +1064,6 @@ int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val,
 }
 NOKPROBE_SYMBOL(kprobe_exceptions_notify);
 
-int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
-{
-       struct jprobe *jp = container_of(p, struct jprobe, kp);
-       unsigned long addr;
-       struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
-
-       kcb->jprobe_saved_regs = *regs;
-       kcb->jprobe_saved_sp = stack_addr(regs);
-       addr = (unsigned long)(kcb->jprobe_saved_sp);
-
-       /*
-        * As Linus pointed out, gcc assumes that the callee
-        * owns the argument space and could overwrite it, e.g.
-        * tailcall optimization. So, to be absolutely safe
-        * we also save and restore enough stack bytes to cover
-        * the argument area.
-        * Use __memcpy() to avoid KASAN stack out-of-bounds reports as we copy
-        * raw stack chunk with redzones:
-        */
-       __memcpy(kcb->jprobes_stack, (kprobe_opcode_t *)addr, MIN_STACK_SIZE(addr));
-       regs->ip = (unsigned long)(jp->entry);
-
-       /*
-        * jprobes use jprobe_return() which skips the normal return
-        * path of the function, and this messes up the accounting of the
-        * function graph tracer to get messed up.
-        *
-        * Pause function graph tracing while performing the jprobe function.
-        */
-       pause_graph_tracing();
-       return 1;
-}
-NOKPROBE_SYMBOL(setjmp_pre_handler);
-
-void jprobe_return(void)
-{
-       struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
-
-       /* Unpoison stack redzones in the frames we are going to jump over. */
-       kasan_unpoison_stack_above_sp_to(kcb->jprobe_saved_sp);
-
-       asm volatile (
-#ifdef CONFIG_X86_64
-                       "       xchg   %%rbx,%%rsp      \n"
-#else
-                       "       xchgl   %%ebx,%%esp     \n"
-#endif
-                       "       int3                    \n"
-                       "       .globl jprobe_return_end\n"
-                       "       jprobe_return_end:      \n"
-                       "       nop                     \n"::"b"
-                       (kcb->jprobe_saved_sp):"memory");
-}
-NOKPROBE_SYMBOL(jprobe_return);
-NOKPROBE_SYMBOL(jprobe_return_end);
-
-int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
-{
-       struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
-       u8 *addr = (u8 *) (regs->ip - 1);
-       struct jprobe *jp = container_of(p, struct jprobe, kp);
-       void *saved_sp = kcb->jprobe_saved_sp;
-
-       if ((addr > (u8 *) jprobe_return) &&
-           (addr < (u8 *) jprobe_return_end)) {
-               if (stack_addr(regs) != saved_sp) {
-                       struct pt_regs *saved_regs = &kcb->jprobe_saved_regs;
-                       printk(KERN_ERR
-                              "current sp %p does not match saved sp %p\n",
-                              stack_addr(regs), saved_sp);
-                       printk(KERN_ERR "Saved registers for jprobe %p\n", jp);
-                       show_regs(saved_regs);
-                       printk(KERN_ERR "Current registers\n");
-                       show_regs(regs);
-                       BUG();
-               }
-               /* It's OK to start function graph tracing again */
-               unpause_graph_tracing();
-               *regs = kcb->jprobe_saved_regs;
-               __memcpy(saved_sp, kcb->jprobes_stack, MIN_STACK_SIZE(saved_sp));
-               preempt_enable_no_resched();
-               return 1;
-       }
-       return 0;
-}
-NOKPROBE_SYMBOL(longjmp_break_handler);
-
 bool arch_within_kprobe_blacklist(unsigned long addr)
 {
        bool is_in_entry_trampoline_section = false;
index 8dc0161cec8f470f365220097f9b0f97a252c803..ef819e19650bc8bb4c2d35eb9e9bd763ec7c9f5d 100644 (file)
 
 #include "common.h"
 
-static nokprobe_inline
-void __skip_singlestep(struct kprobe *p, struct pt_regs *regs,
-                     struct kprobe_ctlblk *kcb, unsigned long orig_ip)
-{
-       /*
-        * Emulate singlestep (and also recover regs->ip)
-        * as if there is a 5byte nop
-        */
-       regs->ip = (unsigned long)p->addr + MCOUNT_INSN_SIZE;
-       if (unlikely(p->post_handler)) {
-               kcb->kprobe_status = KPROBE_HIT_SSDONE;
-               p->post_handler(p, regs, 0);
-       }
-       __this_cpu_write(current_kprobe, NULL);
-       if (orig_ip)
-               regs->ip = orig_ip;
-}
-
-int skip_singlestep(struct kprobe *p, struct pt_regs *regs,
-                   struct kprobe_ctlblk *kcb)
-{
-       if (kprobe_ftrace(p)) {
-               __skip_singlestep(p, regs, kcb, 0);
-               preempt_enable_no_resched();
-               return 1;
-       }
-       return 0;
-}
-NOKPROBE_SYMBOL(skip_singlestep);
-
 /* Ftrace callback handler for kprobes -- called under preepmt disabed */
 void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
                           struct ftrace_ops *ops, struct pt_regs *regs)
@@ -75,18 +45,25 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
                /* Kprobe handler expects regs->ip = ip + 1 as breakpoint hit */
                regs->ip = ip + sizeof(kprobe_opcode_t);
 
-               /* To emulate trap based kprobes, preempt_disable here */
-               preempt_disable();
                __this_cpu_write(current_kprobe, p);
                kcb->kprobe_status = KPROBE_HIT_ACTIVE;
                if (!p->pre_handler || !p->pre_handler(p, regs)) {
-                       __skip_singlestep(p, regs, kcb, orig_ip);
-                       preempt_enable_no_resched();
+                       /*
+                        * Emulate singlestep (and also recover regs->ip)
+                        * as if there is a 5byte nop
+                        */
+                       regs->ip = (unsigned long)p->addr + MCOUNT_INSN_SIZE;
+                       if (unlikely(p->post_handler)) {
+                               kcb->kprobe_status = KPROBE_HIT_SSDONE;
+                               p->post_handler(p, regs, 0);
+                       }
+                       regs->ip = orig_ip;
                }
                /*
-                * If pre_handler returns !0, it sets regs->ip and
-                * resets current kprobe, and keep preempt count +1.
+                * If pre_handler returns !0, it changes regs->ip. We have to
+                * skip emulating post_handler.
                 */
+               __this_cpu_write(current_kprobe, NULL);
        }
 }
 NOKPROBE_SYMBOL(kprobe_ftrace_handler);
index 203d398802a3cb4d5d8a0c9183c5f5de60b7f5d6..eaf02f2e73005731e28dc22bd5d91d10fd26d2aa 100644 (file)
@@ -491,7 +491,6 @@ int setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter)
                regs->ip = (unsigned long)op->optinsn.insn + TMPL_END_IDX;
                if (!reenter)
                        reset_current_kprobe();
-               preempt_enable_no_resched();
                return 1;
        }
        return 0;
index 5b2300b818af9333f8d57f6b082f426b8556b606..09aaabb2bbf156176e6e800a5b5f26e1acd31d4c 100644 (file)
@@ -45,7 +45,6 @@
 #include <asm/apic.h>
 #include <asm/apicdef.h>
 #include <asm/hypervisor.h>
-#include <asm/kvm_guest.h>
 
 static int kvmapf = 1;
 
@@ -66,15 +65,6 @@ static int __init parse_no_stealacc(char *arg)
 
 early_param("no-steal-acc", parse_no_stealacc);
 
-static int kvmclock_vsyscall = 1;
-static int __init parse_no_kvmclock_vsyscall(char *arg)
-{
-        kvmclock_vsyscall = 0;
-        return 0;
-}
-
-early_param("no-kvmclock-vsyscall", parse_no_kvmclock_vsyscall);
-
 static DEFINE_PER_CPU_DECRYPTED(struct kvm_vcpu_pv_apf_data, apf_reason) __aligned(64);
 static DEFINE_PER_CPU_DECRYPTED(struct kvm_steal_time, steal_time) __aligned(64);
 static int has_steal_clock = 0;
@@ -154,7 +144,7 @@ void kvm_async_pf_task_wait(u32 token, int interrupt_kernel)
 
        for (;;) {
                if (!n.halted)
-                       prepare_to_swait(&n.wq, &wait, TASK_UNINTERRUPTIBLE);
+                       prepare_to_swait_exclusive(&n.wq, &wait, TASK_UNINTERRUPTIBLE);
                if (hlist_unhashed(&n.link))
                        break;
 
@@ -188,7 +178,7 @@ static void apf_task_wake_one(struct kvm_task_sleep_node *n)
        if (n->halted)
                smp_send_reschedule(n->cpu);
        else if (swq_has_sleeper(&n->wq))
-               swake_up(&n->wq);
+               swake_up_one(&n->wq);
 }
 
 static void apf_task_wake_all(void)
@@ -560,9 +550,6 @@ static void __init kvm_guest_init(void)
        if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
                apic_set_eoi_write(kvm_guest_apic_eoi_write);
 
-       if (kvmclock_vsyscall)
-               kvm_setup_vsyscall_timeinfo();
-
 #ifdef CONFIG_SMP
        smp_ops.smp_prepare_cpus = kvm_smp_prepare_cpus;
        smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu;
@@ -628,6 +615,7 @@ const __initconst struct hypervisor_x86 x86_hyper_kvm = {
        .name                   = "KVM",
        .detect                 = kvm_detect,
        .type                   = X86_HYPER_KVM,
+       .init.init_platform     = kvmclock_init,
        .init.guest_late_init   = kvm_guest_init,
        .init.x2apic_available  = kvm_para_available,
 };
index bf8d1eb7fca3d97976b7747f49a5e5d77d18edde..d2edd7e6c2947d8cec23ef138c4fe822ba8e08e9 100644 (file)
 #include <asm/apic.h>
 #include <linux/percpu.h>
 #include <linux/hardirq.h>
-#include <linux/memblock.h>
+#include <linux/cpuhotplug.h>
 #include <linux/sched.h>
 #include <linux/sched/clock.h>
+#include <linux/mm.h>
 
+#include <asm/hypervisor.h>
 #include <asm/mem_encrypt.h>
 #include <asm/x86_init.h>
 #include <asm/reboot.h>
 #include <asm/kvmclock.h>
 
-static int kvmclock __ro_after_init = 1;
-static int msr_kvm_system_time = MSR_KVM_SYSTEM_TIME;
-static int msr_kvm_wall_clock = MSR_KVM_WALL_CLOCK;
-static u64 kvm_sched_clock_offset;
+static int kvmclock __initdata = 1;
+static int kvmclock_vsyscall __initdata = 1;
+static int msr_kvm_system_time __ro_after_init = MSR_KVM_SYSTEM_TIME;
+static int msr_kvm_wall_clock __ro_after_init = MSR_KVM_WALL_CLOCK;
+static u64 kvm_sched_clock_offset __ro_after_init;
 
-static int parse_no_kvmclock(char *arg)
+static int __init parse_no_kvmclock(char *arg)
 {
        kvmclock = 0;
        return 0;
 }
 early_param("no-kvmclock", parse_no_kvmclock);
 
-/* The hypervisor will put information about time periodically here */
-static struct pvclock_vsyscall_time_info *hv_clock;
-static struct pvclock_wall_clock *wall_clock;
+static int __init parse_no_kvmclock_vsyscall(char *arg)
+{
+       kvmclock_vsyscall = 0;
+       return 0;
+}
+early_param("no-kvmclock-vsyscall", parse_no_kvmclock_vsyscall);
+
+/* Aligned to page sizes to match whats mapped via vsyscalls to userspace */
+#define HV_CLOCK_SIZE  (sizeof(struct pvclock_vsyscall_time_info) * NR_CPUS)
+#define HVC_BOOT_ARRAY_SIZE \
+       (PAGE_SIZE / sizeof(struct pvclock_vsyscall_time_info))
+
+static struct pvclock_vsyscall_time_info
+                       hv_clock_boot[HVC_BOOT_ARRAY_SIZE] __aligned(PAGE_SIZE);
+static struct pvclock_wall_clock wall_clock;
+static DEFINE_PER_CPU(struct pvclock_vsyscall_time_info *, hv_clock_per_cpu);
+
+static inline struct pvclock_vcpu_time_info *this_cpu_pvti(void)
+{
+       return &this_cpu_read(hv_clock_per_cpu)->pvti;
+}
+
+static inline struct pvclock_vsyscall_time_info *this_cpu_hvclock(void)
+{
+       return this_cpu_read(hv_clock_per_cpu);
+}
 
 /*
  * The wallclock is the time of day when we booted. Since then, some time may
@@ -55,21 +81,10 @@ static struct pvclock_wall_clock *wall_clock;
  */
 static void kvm_get_wallclock(struct timespec64 *now)
 {
-       struct pvclock_vcpu_time_info *vcpu_time;
-       int low, high;
-       int cpu;
-
-       low = (int)slow_virt_to_phys(wall_clock);
-       high = ((u64)slow_virt_to_phys(wall_clock) >> 32);
-
-       native_write_msr(msr_kvm_wall_clock, low, high);
-
-       cpu = get_cpu();
-
-       vcpu_time = &hv_clock[cpu].pvti;
-       pvclock_read_wallclock(wall_clock, vcpu_time, now);
-
-       put_cpu();
+       wrmsrl(msr_kvm_wall_clock, slow_virt_to_phys(&wall_clock));
+       preempt_disable();
+       pvclock_read_wallclock(&wall_clock, this_cpu_pvti(), now);
+       preempt_enable();
 }
 
 static int kvm_set_wallclock(const struct timespec64 *now)
@@ -79,14 +94,10 @@ static int kvm_set_wallclock(const struct timespec64 *now)
 
 static u64 kvm_clock_read(void)
 {
-       struct pvclock_vcpu_time_info *src;
        u64 ret;
-       int cpu;
 
        preempt_disable_notrace();
-       cpu = smp_processor_id();
-       src = &hv_clock[cpu].pvti;
-       ret = pvclock_clocksource_read(src);
+       ret = pvclock_clocksource_read(this_cpu_pvti());
        preempt_enable_notrace();
        return ret;
 }
@@ -112,11 +123,11 @@ static inline void kvm_sched_clock_init(bool stable)
        kvm_sched_clock_offset = kvm_clock_read();
        pv_time_ops.sched_clock = kvm_sched_clock_read;
 
-       printk(KERN_INFO "kvm-clock: using sched offset of %llu cycles\n",
-                       kvm_sched_clock_offset);
+       pr_info("kvm-clock: using sched offset of %llu cycles",
+               kvm_sched_clock_offset);
 
        BUILD_BUG_ON(sizeof(kvm_sched_clock_offset) >
-                sizeof(((struct pvclock_vcpu_time_info *)NULL)->system_time));
+               sizeof(((struct pvclock_vcpu_time_info *)NULL)->system_time));
 }
 
 /*
@@ -130,18 +141,11 @@ static inline void kvm_sched_clock_init(bool stable)
  */
 static unsigned long kvm_get_tsc_khz(void)
 {
-       struct pvclock_vcpu_time_info *src;
-       int cpu;
-       unsigned long tsc_khz;
-
-       cpu = get_cpu();
-       src = &hv_clock[cpu].pvti;
-       tsc_khz = pvclock_tsc_khz(src);
-       put_cpu();
-       return tsc_khz;
+       setup_force_cpu_cap(X86_FEATURE_TSC_KNOWN_FREQ);
+       return pvclock_tsc_khz(this_cpu_pvti());
 }
 
-static void kvm_get_preset_lpj(void)
+static void __init kvm_get_preset_lpj(void)
 {
        unsigned long khz;
        u64 lpj;
@@ -155,49 +159,40 @@ static void kvm_get_preset_lpj(void)
 
 bool kvm_check_and_clear_guest_paused(void)
 {
+       struct pvclock_vsyscall_time_info *src = this_cpu_hvclock();
        bool ret = false;
-       struct pvclock_vcpu_time_info *src;
-       int cpu = smp_processor_id();
 
-       if (!hv_clock)
+       if (!src)
                return ret;
 
-       src = &hv_clock[cpu].pvti;
-       if ((src->flags & PVCLOCK_GUEST_STOPPED) != 0) {
-               src->flags &= ~PVCLOCK_GUEST_STOPPED;
+       if ((src->pvti.flags & PVCLOCK_GUEST_STOPPED) != 0) {
+               src->pvti.flags &= ~PVCLOCK_GUEST_STOPPED;
                pvclock_touch_watchdogs();
                ret = true;
        }
-
        return ret;
 }
 
 struct clocksource kvm_clock = {
-       .name = "kvm-clock",
-       .read = kvm_clock_get_cycles,
-       .rating = 400,
-       .mask = CLOCKSOURCE_MASK(64),
-       .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+       .name   = "kvm-clock",
+       .read   = kvm_clock_get_cycles,
+       .rating = 400,
+       .mask   = CLOCKSOURCE_MASK(64),
+       .flags  = CLOCK_SOURCE_IS_CONTINUOUS,
 };
 EXPORT_SYMBOL_GPL(kvm_clock);
 
-int kvm_register_clock(char *txt)
+static void kvm_register_clock(char *txt)
 {
-       int cpu = smp_processor_id();
-       int low, high, ret;
-       struct pvclock_vcpu_time_info *src;
-
-       if (!hv_clock)
-               return 0;
+       struct pvclock_vsyscall_time_info *src = this_cpu_hvclock();
+       u64 pa;
 
-       src = &hv_clock[cpu].pvti;
-       low = (int)slow_virt_to_phys(src) | 1;
-       high = ((u64)slow_virt_to_phys(src) >> 32);
-       ret = native_write_msr_safe(msr_kvm_system_time, low, high);
-       printk(KERN_INFO "kvm-clock: cpu %d, msr %x:%x, %s\n",
-              cpu, high, low, txt);
+       if (!src)
+               return;
 
-       return ret;
+       pa = slow_virt_to_phys(&src->pvti) | 0x01ULL;
+       wrmsrl(msr_kvm_system_time, pa);
+       pr_info("kvm-clock: cpu %d, msr %llx, %s", smp_processor_id(), pa, txt);
 }
 
 static void kvm_save_sched_clock_state(void)
@@ -212,11 +207,7 @@ static void kvm_restore_sched_clock_state(void)
 #ifdef CONFIG_X86_LOCAL_APIC
 static void kvm_setup_secondary_clock(void)
 {
-       /*
-        * Now that the first cpu already had this clocksource initialized,
-        * we shouldn't fail.
-        */
-       WARN_ON(kvm_register_clock("secondary cpu clock"));
+       kvm_register_clock("secondary cpu clock");
 }
 #endif
 
@@ -244,98 +235,84 @@ static void kvm_shutdown(void)
        native_machine_shutdown();
 }
 
-static phys_addr_t __init kvm_memblock_alloc(phys_addr_t size,
-                                            phys_addr_t align)
+static int __init kvm_setup_vsyscall_timeinfo(void)
 {
-       phys_addr_t mem;
+#ifdef CONFIG_X86_64
+       u8 flags;
 
-       mem = memblock_alloc(size, align);
-       if (!mem)
+       if (!per_cpu(hv_clock_per_cpu, 0) || !kvmclock_vsyscall)
                return 0;
 
-       if (sev_active()) {
-               if (early_set_memory_decrypted((unsigned long)__va(mem), size))
-                       goto e_free;
-       }
+       flags = pvclock_read_flags(&hv_clock_boot[0].pvti);
+       if (!(flags & PVCLOCK_TSC_STABLE_BIT))
+               return 0;
 
-       return mem;
-e_free:
-       memblock_free(mem, size);
+       kvm_clock.archdata.vclock_mode = VCLOCK_PVCLOCK;
+#endif
        return 0;
 }
+early_initcall(kvm_setup_vsyscall_timeinfo);
 
-static void __init kvm_memblock_free(phys_addr_t addr, phys_addr_t size)
+static int kvmclock_setup_percpu(unsigned int cpu)
 {
-       if (sev_active())
-               early_set_memory_encrypted((unsigned long)__va(addr), size);
+       struct pvclock_vsyscall_time_info *p = per_cpu(hv_clock_per_cpu, cpu);
+
+       /*
+        * The per cpu area setup replicates CPU0 data to all cpu
+        * pointers. So carefully check. CPU0 has been set up in init
+        * already.
+        */
+       if (!cpu || (p && p != per_cpu(hv_clock_per_cpu, 0)))
+               return 0;
+
+       /* Use the static page for the first CPUs, allocate otherwise */
+       if (cpu < HVC_BOOT_ARRAY_SIZE)
+               p = &hv_clock_boot[cpu];
+       else
+               p = kzalloc(sizeof(*p), GFP_KERNEL);
 
-       memblock_free(addr, size);
+       per_cpu(hv_clock_per_cpu, cpu) = p;
+       return p ? 0 : -ENOMEM;
 }
 
 void __init kvmclock_init(void)
 {
-       struct pvclock_vcpu_time_info *vcpu_time;
-       unsigned long mem, mem_wall_clock;
-       int size, cpu, wall_clock_size;
        u8 flags;
 
-       size = PAGE_ALIGN(sizeof(struct pvclock_vsyscall_time_info)*NR_CPUS);
-
-       if (!kvm_para_available())
+       if (!kvm_para_available() || !kvmclock)
                return;
 
-       if (kvmclock && kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE2)) {
+       if (kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE2)) {
                msr_kvm_system_time = MSR_KVM_SYSTEM_TIME_NEW;
                msr_kvm_wall_clock = MSR_KVM_WALL_CLOCK_NEW;
-       } else if (!(kvmclock && kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE)))
-               return;
-
-       wall_clock_size = PAGE_ALIGN(sizeof(struct pvclock_wall_clock));
-       mem_wall_clock = kvm_memblock_alloc(wall_clock_size, PAGE_SIZE);
-       if (!mem_wall_clock)
-               return;
-
-       wall_clock = __va(mem_wall_clock);
-       memset(wall_clock, 0, wall_clock_size);
-
-       mem = kvm_memblock_alloc(size, PAGE_SIZE);
-       if (!mem) {
-               kvm_memblock_free(mem_wall_clock, wall_clock_size);
-               wall_clock = NULL;
+       } else if (!kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE)) {
                return;
        }
 
-       hv_clock = __va(mem);
-       memset(hv_clock, 0, size);
-
-       if (kvm_register_clock("primary cpu clock")) {
-               hv_clock = NULL;
-               kvm_memblock_free(mem, size);
-               kvm_memblock_free(mem_wall_clock, wall_clock_size);
-               wall_clock = NULL;
+       if (cpuhp_setup_state(CPUHP_BP_PREPARE_DYN, "kvmclock:setup_percpu",
+                             kvmclock_setup_percpu, NULL) < 0) {
                return;
        }
 
-       printk(KERN_INFO "kvm-clock: Using msrs %x and %x",
+       pr_info("kvm-clock: Using msrs %x and %x",
                msr_kvm_system_time, msr_kvm_wall_clock);
 
+       this_cpu_write(hv_clock_per_cpu, &hv_clock_boot[0]);
+       kvm_register_clock("primary cpu clock");
+       pvclock_set_pvti_cpu0_va(hv_clock_boot);
+
        if (kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE_STABLE_BIT))
                pvclock_set_flags(PVCLOCK_TSC_STABLE_BIT);
 
-       cpu = get_cpu();
-       vcpu_time = &hv_clock[cpu].pvti;
-       flags = pvclock_read_flags(vcpu_time);
-
+       flags = pvclock_read_flags(&hv_clock_boot[0].pvti);
        kvm_sched_clock_init(flags & PVCLOCK_TSC_STABLE_BIT);
-       put_cpu();
 
        x86_platform.calibrate_tsc = kvm_get_tsc_khz;
        x86_platform.calibrate_cpu = kvm_get_tsc_khz;
        x86_platform.get_wallclock = kvm_get_wallclock;
        x86_platform.set_wallclock = kvm_set_wallclock;
 #ifdef CONFIG_X86_LOCAL_APIC
-       x86_cpuinit.early_percpu_clock_init =
-               kvm_setup_secondary_clock;
+       x86_cpuinit.early_percpu_clock_init = kvm_setup_secondary_clock;
 #endif
        x86_platform.save_sched_clock_state = kvm_save_sched_clock_state;
        x86_platform.restore_sched_clock_state = kvm_restore_sched_clock_state;
@@ -347,34 +324,3 @@ void __init kvmclock_init(void)
        clocksource_register_hz(&kvm_clock, NSEC_PER_SEC);
        pv_info.name = "KVM";
 }
-
-int __init kvm_setup_vsyscall_timeinfo(void)
-{
-#ifdef CONFIG_X86_64
-       int cpu;
-       u8 flags;
-       struct pvclock_vcpu_time_info *vcpu_time;
-       unsigned int size;
-
-       if (!hv_clock)
-               return 0;
-
-       size = PAGE_ALIGN(sizeof(struct pvclock_vsyscall_time_info)*NR_CPUS);
-
-       cpu = get_cpu();
-
-       vcpu_time = &hv_clock[cpu].pvti;
-       flags = pvclock_read_flags(vcpu_time);
-
-       if (!(flags & PVCLOCK_TSC_STABLE_BIT)) {
-               put_cpu();
-               return 1;
-       }
-
-       pvclock_set_pvti_cpu0_va(hv_clock);
-       put_cpu();
-
-       kvm_clock.archdata.vclock_mode = VCLOCK_PVCLOCK;
-#endif
-       return 0;
-}
index c9b14020f4ddf433c5d96f6f4930a65340f214a2..733e6ace0fa4e97dcb77e031d37c89ec82866e0f 100644 (file)
@@ -100,6 +100,102 @@ static struct ldt_struct *alloc_ldt_struct(unsigned int num_entries)
        return new_ldt;
 }
 
+#ifdef CONFIG_PAGE_TABLE_ISOLATION
+
+static void do_sanity_check(struct mm_struct *mm,
+                           bool had_kernel_mapping,
+                           bool had_user_mapping)
+{
+       if (mm->context.ldt) {
+               /*
+                * We already had an LDT.  The top-level entry should already
+                * have been allocated and synchronized with the usermode
+                * tables.
+                */
+               WARN_ON(!had_kernel_mapping);
+               if (static_cpu_has(X86_FEATURE_PTI))
+                       WARN_ON(!had_user_mapping);
+       } else {
+               /*
+                * This is the first time we're mapping an LDT for this process.
+                * Sync the pgd to the usermode tables.
+                */
+               WARN_ON(had_kernel_mapping);
+               if (static_cpu_has(X86_FEATURE_PTI))
+                       WARN_ON(had_user_mapping);
+       }
+}
+
+#ifdef CONFIG_X86_PAE
+
+static pmd_t *pgd_to_pmd_walk(pgd_t *pgd, unsigned long va)
+{
+       p4d_t *p4d;
+       pud_t *pud;
+
+       if (pgd->pgd == 0)
+               return NULL;
+
+       p4d = p4d_offset(pgd, va);
+       if (p4d_none(*p4d))
+               return NULL;
+
+       pud = pud_offset(p4d, va);
+       if (pud_none(*pud))
+               return NULL;
+
+       return pmd_offset(pud, va);
+}
+
+static void map_ldt_struct_to_user(struct mm_struct *mm)
+{
+       pgd_t *k_pgd = pgd_offset(mm, LDT_BASE_ADDR);
+       pgd_t *u_pgd = kernel_to_user_pgdp(k_pgd);
+       pmd_t *k_pmd, *u_pmd;
+
+       k_pmd = pgd_to_pmd_walk(k_pgd, LDT_BASE_ADDR);
+       u_pmd = pgd_to_pmd_walk(u_pgd, LDT_BASE_ADDR);
+
+       if (static_cpu_has(X86_FEATURE_PTI) && !mm->context.ldt)
+               set_pmd(u_pmd, *k_pmd);
+}
+
+static void sanity_check_ldt_mapping(struct mm_struct *mm)
+{
+       pgd_t *k_pgd = pgd_offset(mm, LDT_BASE_ADDR);
+       pgd_t *u_pgd = kernel_to_user_pgdp(k_pgd);
+       bool had_kernel, had_user;
+       pmd_t *k_pmd, *u_pmd;
+
+       k_pmd      = pgd_to_pmd_walk(k_pgd, LDT_BASE_ADDR);
+       u_pmd      = pgd_to_pmd_walk(u_pgd, LDT_BASE_ADDR);
+       had_kernel = (k_pmd->pmd != 0);
+       had_user   = (u_pmd->pmd != 0);
+
+       do_sanity_check(mm, had_kernel, had_user);
+}
+
+#else /* !CONFIG_X86_PAE */
+
+static void map_ldt_struct_to_user(struct mm_struct *mm)
+{
+       pgd_t *pgd = pgd_offset(mm, LDT_BASE_ADDR);
+
+       if (static_cpu_has(X86_FEATURE_PTI) && !mm->context.ldt)
+               set_pgd(kernel_to_user_pgdp(pgd), *pgd);
+}
+
+static void sanity_check_ldt_mapping(struct mm_struct *mm)
+{
+       pgd_t *pgd = pgd_offset(mm, LDT_BASE_ADDR);
+       bool had_kernel = (pgd->pgd != 0);
+       bool had_user   = (kernel_to_user_pgdp(pgd)->pgd != 0);
+
+       do_sanity_check(mm, had_kernel, had_user);
+}
+
+#endif /* CONFIG_X86_PAE */
+
 /*
  * If PTI is enabled, this maps the LDT into the kernelmode and
  * usermode tables for the given mm.
@@ -115,9 +211,8 @@ static struct ldt_struct *alloc_ldt_struct(unsigned int num_entries)
 static int
 map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot)
 {
-#ifdef CONFIG_PAGE_TABLE_ISOLATION
-       bool is_vmalloc, had_top_level_entry;
        unsigned long va;
+       bool is_vmalloc;
        spinlock_t *ptl;
        pgd_t *pgd;
        int i;
@@ -131,13 +226,15 @@ map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot)
         */
        WARN_ON(ldt->slot != -1);
 
+       /* Check if the current mappings are sane */
+       sanity_check_ldt_mapping(mm);
+
        /*
         * Did we already have the top level entry allocated?  We can't
         * use pgd_none() for this because it doens't do anything on
         * 4-level page table kernels.
         */
        pgd = pgd_offset(mm, LDT_BASE_ADDR);
-       had_top_level_entry = (pgd->pgd != 0);
 
        is_vmalloc = is_vmalloc_addr(ldt->entries);
 
@@ -172,41 +269,31 @@ map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot)
                pte_unmap_unlock(ptep, ptl);
        }
 
-       if (mm->context.ldt) {
-               /*
-                * We already had an LDT.  The top-level entry should already
-                * have been allocated and synchronized with the usermode
-                * tables.
-                */
-               WARN_ON(!had_top_level_entry);
-               if (static_cpu_has(X86_FEATURE_PTI))
-                       WARN_ON(!kernel_to_user_pgdp(pgd)->pgd);
-       } else {
-               /*
-                * This is the first time we're mapping an LDT for this process.
-                * Sync the pgd to the usermode tables.
-                */
-               WARN_ON(had_top_level_entry);
-               if (static_cpu_has(X86_FEATURE_PTI)) {
-                       WARN_ON(kernel_to_user_pgdp(pgd)->pgd);
-                       set_pgd(kernel_to_user_pgdp(pgd), *pgd);
-               }
-       }
+       /* Propagate LDT mapping to the user page-table */
+       map_ldt_struct_to_user(mm);
 
        va = (unsigned long)ldt_slot_va(slot);
        flush_tlb_mm_range(mm, va, va + LDT_SLOT_STRIDE, 0);
 
        ldt->slot = slot;
-#endif
        return 0;
 }
 
+#else /* !CONFIG_PAGE_TABLE_ISOLATION */
+
+static int
+map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot)
+{
+       return 0;
+}
+#endif /* CONFIG_PAGE_TABLE_ISOLATION */
+
 static void free_ldt_pgtables(struct mm_struct *mm)
 {
 #ifdef CONFIG_PAGE_TABLE_ISOLATION
        struct mmu_gather tlb;
        unsigned long start = LDT_BASE_ADDR;
-       unsigned long end = start + (1UL << PGDIR_SHIFT);
+       unsigned long end = LDT_END_ADDR;
 
        if (!static_cpu_has(X86_FEATURE_PTI))
                return;
index d1ab07ec8c9aca2090f42153efd1b6ad93d68ffe..5409c2800ab50bde0ee19b873a669250432dcc8d 100644 (file)
@@ -56,7 +56,7 @@ static void load_segments(void)
 
 static void machine_kexec_free_page_tables(struct kimage *image)
 {
-       free_page((unsigned long)image->arch.pgd);
+       free_pages((unsigned long)image->arch.pgd, PGD_ALLOCATION_ORDER);
        image->arch.pgd = NULL;
 #ifdef CONFIG_X86_PAE
        free_page((unsigned long)image->arch.pmd0);
@@ -72,7 +72,8 @@ static void machine_kexec_free_page_tables(struct kimage *image)
 
 static int machine_kexec_alloc_page_tables(struct kimage *image)
 {
-       image->arch.pgd = (pgd_t *)get_zeroed_page(GFP_KERNEL);
+       image->arch.pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
+                                                   PGD_ALLOCATION_ORDER);
 #ifdef CONFIG_X86_PAE
        image->arch.pmd0 = (pmd_t *)get_zeroed_page(GFP_KERNEL);
        image->arch.pmd1 = (pmd_t *)get_zeroed_page(GFP_KERNEL);
index 99dc79e76bdc5497c8e07c6ee32e74ffe04ff492..930c88341e4ec875013d99f8c844ecace51088fb 100644 (file)
@@ -88,10 +88,12 @@ unsigned paravirt_patch_call(void *insnbuf,
        struct branch *b = insnbuf;
        unsigned long delta = (unsigned long)target - (addr+5);
 
-       if (tgt_clobbers & ~site_clobbers)
-               return len;     /* target would clobber too much for this site */
-       if (len < 5)
+       if (len < 5) {
+#ifdef CONFIG_RETPOLINE
+               WARN_ONCE("Failing to patch indirect CALL in %ps\n", (void *)addr);
+#endif
                return len;     /* call too long for patch site */
+       }
 
        b->opcode = 0xe8; /* call */
        b->delta = delta;
@@ -106,8 +108,12 @@ unsigned paravirt_patch_jmp(void *insnbuf, const void *target,
        struct branch *b = insnbuf;
        unsigned long delta = (unsigned long)target - (addr+5);
 
-       if (len < 5)
+       if (len < 5) {
+#ifdef CONFIG_RETPOLINE
+               WARN_ONCE("Failing to patch indirect JMP in %ps\n", (void *)addr);
+#endif
                return len;     /* call too long for patch site */
+       }
 
        b->opcode = 0xe9;       /* jmp */
        b->delta = delta;
index 9edadabf04f66c657f8a29bb56fe994b2559d5cf..9cb98f7b07c9afb1a8118cbf84be125567481ea0 100644 (file)
@@ -20,7 +20,7 @@ DEF_NATIVE(, mov64, "mov %rdi, %rax");
 
 #if defined(CONFIG_PARAVIRT_SPINLOCKS)
 DEF_NATIVE(pv_lock_ops, queued_spin_unlock, "movb $0, (%rdi)");
-DEF_NATIVE(pv_lock_ops, vcpu_is_preempted, "xor %rax, %rax");
+DEF_NATIVE(pv_lock_ops, vcpu_is_preempted, "xor %eax, %eax");
 #endif
 
 unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len)
index 4dfd90a75e63a13fe37469982d727347f9359e01..2e9006c1e240884b5f5fa28e0b4e442f39d412c4 100644 (file)
@@ -60,7 +60,7 @@ void __init check_iommu_entries(struct iommu_table_entry *start,
                        printk(KERN_ERR "CYCLIC DEPENDENCY FOUND! %pS depends on %pS and vice-versa. BREAKING IT.\n",
                               p->detect, q->detect);
                        /* Heavy handed way..*/
-                       x->depend = 0;
+                       x->depend = NULL;
                }
        }
 
index da5190a1ea168df7ab903985a2a4dadd2c0665b0..4a710ffffd9a4e5644b4f14683219809a3b57f33 100644 (file)
@@ -9,6 +9,6 @@ static __init int add_pcspkr(void)
 
        pd = platform_device_register_simple("pcspkr", -1, NULL, 0);
 
-       return IS_ERR(pd) ? PTR_ERR(pd) : 0;
+       return PTR_ERR_OR_ZERO(pd);
 }
 device_initcall(add_pcspkr);
index 30ca2d1a92319726ff31d3ddb8264140bcec17cf..c93fcfdf1673418a352c8eb1085504d8c4ee4ffd 100644 (file)
@@ -57,14 +57,12 @@ __visible DEFINE_PER_CPU_PAGE_ALIGNED(struct tss_struct, cpu_tss_rw) = {
                 */
                .sp0 = (1UL << (BITS_PER_LONG-1)) + 1,
 
-#ifdef CONFIG_X86_64
                /*
                 * .sp1 is cpu_current_top_of_stack.  The init task never
                 * runs user code, but cpu_current_top_of_stack should still
                 * be well defined before the first context switch.
                 */
                .sp1 = TOP_OF_INIT_STACK,
-#endif
 
 #ifdef CONFIG_X86_32
                .ss0 = __KERNEL_DS,
index 0ae659de21eb241313e667321cd47c75cb3a148a..2924fd447e617dd6b4a2aaf1741fefe31c94b676 100644 (file)
@@ -285,7 +285,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
         * current_thread_info().  Refresh the SYSENTER configuration in
         * case prev or next is vm86.
         */
-       update_sp0(next_p);
+       update_task_stack(next_p);
        refresh_sysenter_cs(next);
        this_cpu_write(cpu_current_top_of_stack,
                       (unsigned long)task_stack_page(next_p) +
index 12bb445fb98d6618013be3b78a07aee02ac4d01a..476e3ddf88906d2df48bff128383306271bf14a6 100644 (file)
@@ -478,7 +478,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
        this_cpu_write(cpu_current_top_of_stack, task_top_of_stack(next_p));
 
        /* Reload sp0. */
-       update_sp0(next_p);
+       update_task_stack(next_p);
 
        /*
         * Now maybe reload the debug registers and handle I/O bitmaps
index 697a4ce0430827c89be2cbd86caedfac97e884f7..736348ead4218a0007b715efbc1d56bd1bb73e65 100644 (file)
@@ -645,12 +645,19 @@ static void quirk_intel_brickland_xeon_ras_cap(struct pci_dev *pdev)
 /* Skylake */
 static void quirk_intel_purley_xeon_ras_cap(struct pci_dev *pdev)
 {
-       u32 capid0;
+       u32 capid0, capid5;
 
        pci_read_config_dword(pdev, 0x84, &capid0);
+       pci_read_config_dword(pdev, 0x98, &capid5);
 
-       if ((capid0 & 0xc0) == 0xc0)
+       /*
+        * CAPID0{7:6} indicate whether this is an advanced RAS SKU
+        * CAPID5{8:5} indicate that various NVDIMM usage modes are
+        * enabled, so memory machine check recovery is also enabled.
+        */
+       if ((capid0 & 0xc0) == 0xc0 || (capid5 & 0x1e0))
                static_branch_inc(&mcsafe_key);
+
 }
 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x0ec3, quirk_intel_brickland_xeon_ras_cap);
 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2fc0, quirk_intel_brickland_xeon_ras_cap);
index 2f86d883dd9508992adc57fcfcf3e8056554ebde..5d32c55aeb8bcddf1d3a55b521502c0ef554ec69 100644 (file)
@@ -866,6 +866,8 @@ void __init setup_arch(char **cmdline_p)
 
        idt_setup_early_traps();
        early_cpu_init();
+       arch_init_ideal_nops();
+       jump_label_init();
        early_ioremap_init();
 
        setup_olpc_ofw_pgd();
@@ -1012,6 +1014,7 @@ void __init setup_arch(char **cmdline_p)
         */
        init_hypervisor_platform();
 
+       tsc_early_init();
        x86_init.resources.probe_roms();
 
        /* after parse_early_param, so could debug it */
@@ -1197,11 +1200,6 @@ void __init setup_arch(char **cmdline_p)
 
        memblock_find_dma_reserve();
 
-#ifdef CONFIG_KVM_GUEST
-       kvmclock_init();
-#endif
-
-       tsc_early_delay_calibrate();
        if (!early_xdbc_setup_hardware())
                early_xdbc_register_console();
 
@@ -1272,8 +1270,6 @@ void __init setup_arch(char **cmdline_p)
 
        mcheck_init();
 
-       arch_init_ideal_nops();
-
        register_refined_jiffies(CLOCK_TICK_RATE);
 
 #ifdef CONFIG_EFI
index 445ca11ff8634eb27fb93f93fe362fc4bffaf588..92a3b312a53c465bbde5f006b5707b62671a49ae 100644 (file)
@@ -692,7 +692,7 @@ setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
         * Increment event counter and perform fixup for the pre-signal
         * frame.
         */
-       rseq_signal_deliver(regs);
+       rseq_signal_deliver(ksig, regs);
 
        /* Set up the stack frame */
        if (is_ia32_frame(ksig)) {
index c2f7d1d2a5c36fca041809b727bde36afb347c34..db9656e13ea0418dbf9e619f183bd2176e2d94d6 100644 (file)
@@ -221,6 +221,11 @@ static void notrace start_secondary(void *unused)
 #ifdef CONFIG_X86_32
        /* switch away from the initial page table */
        load_cr3(swapper_pg_dir);
+       /*
+        * Initialize the CR4 shadow before doing anything that could
+        * try to read it.
+        */
+       cr4_init_shadow();
        __flush_tlb_all();
 #endif
        load_current_idt();
index 093f2ea5dd56b613d03ccaeeb87fd50900f64ba1..7627455047c2d58e2db96cc6af1186debc2bdff1 100644 (file)
@@ -81,16 +81,6 @@ EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
 
 #ifdef CONFIG_HAVE_RELIABLE_STACKTRACE
 
-#define STACKTRACE_DUMP_ONCE(task) ({                          \
-       static bool __section(.data.unlikely) __dumped;         \
-                                                               \
-       if (!__dumped) {                                        \
-               __dumped = true;                                \
-               WARN_ON(1);                                     \
-               show_stack(task, NULL);                         \
-       }                                                       \
-})
-
 static int __always_inline
 __save_stack_trace_reliable(struct stack_trace *trace,
                            struct task_struct *task)
@@ -99,30 +89,25 @@ __save_stack_trace_reliable(struct stack_trace *trace,
        struct pt_regs *regs;
        unsigned long addr;
 
-       for (unwind_start(&state, task, NULL, NULL); !unwind_done(&state);
+       for (unwind_start(&state, task, NULL, NULL);
+            !unwind_done(&state) && !unwind_error(&state);
             unwind_next_frame(&state)) {
 
                regs = unwind_get_entry_regs(&state, NULL);
                if (regs) {
+                       /* Success path for user tasks */
+                       if (user_mode(regs))
+                               goto success;
+
                        /*
                         * Kernel mode registers on the stack indicate an
                         * in-kernel interrupt or exception (e.g., preemption
                         * or a page fault), which can make frame pointers
                         * unreliable.
                         */
-                       if (!user_mode(regs))
-                               return -EINVAL;
 
-                       /*
-                        * The last frame contains the user mode syscall
-                        * pt_regs.  Skip it and finish the unwind.
-                        */
-                       unwind_next_frame(&state);
-                       if (!unwind_done(&state)) {
-                               STACKTRACE_DUMP_ONCE(task);
+                       if (IS_ENABLED(CONFIG_FRAME_POINTER))
                                return -EINVAL;
-                       }
-                       break;
                }
 
                addr = unwind_get_return_address(&state);
@@ -132,21 +117,22 @@ __save_stack_trace_reliable(struct stack_trace *trace,
                 * generated code which __kernel_text_address() doesn't know
                 * about.
                 */
-               if (!addr) {
-                       STACKTRACE_DUMP_ONCE(task);
+               if (!addr)
                        return -EINVAL;
-               }
 
                if (save_stack_address(trace, addr, false))
                        return -EINVAL;
        }
 
        /* Check for stack corruption */
-       if (unwind_error(&state)) {
-               STACKTRACE_DUMP_ONCE(task);
+       if (unwind_error(&state))
+               return -EINVAL;
+
+       /* Success path for non-user tasks, i.e. kthreads and idle tasks */
+       if (!(task->flags & (PF_KTHREAD | PF_IDLE)))
                return -EINVAL;
-       }
 
+success:
        if (trace->nr_entries < trace->max_entries)
                trace->entries[trace->nr_entries++] = ULONG_MAX;
 
index a535dd64de6397b02b3f53cd685584ebf7ebf445..e6db475164edec4f33e6f056cde5cbdfbe51a556 100644 (file)
@@ -835,16 +835,18 @@ static void math_error(struct pt_regs *regs, int error_code, int trapnr)
        char *str = (trapnr == X86_TRAP_MF) ? "fpu exception" :
                                                "simd exception";
 
-       if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, SIGFPE) == NOTIFY_STOP)
-               return;
        cond_local_irq_enable(regs);
 
        if (!user_mode(regs)) {
-               if (!fixup_exception(regs, trapnr)) {
-                       task->thread.error_code = error_code;
-                       task->thread.trap_nr = trapnr;
+               if (fixup_exception(regs, trapnr))
+                       return;
+
+               task->thread.error_code = error_code;
+               task->thread.trap_nr = trapnr;
+
+               if (notify_die(DIE_TRAP, str, regs, error_code,
+                                       trapnr, SIGFPE) != NOTIFY_STOP)
                        die(str, regs, error_code);
-               }
                return;
        }
 
index 74392d9d51e0a799a7f4fcb974a55e7989ed400a..1463468ba9a0a5dc3914c92f88fffb491dc36d4f 100644 (file)
@@ -33,16 +33,13 @@ EXPORT_SYMBOL(cpu_khz);
 unsigned int __read_mostly tsc_khz;
 EXPORT_SYMBOL(tsc_khz);
 
+#define KHZ    1000
+
 /*
  * TSC can be unstable due to cpufreq or due to unsynced TSCs
  */
 static int __read_mostly tsc_unstable;
 
-/* native_sched_clock() is called before tsc_init(), so
-   we must start with the TSC soft disabled to prevent
-   erroneous rdtsc usage on !boot_cpu_has(X86_FEATURE_TSC) processors */
-static int __read_mostly tsc_disabled = -1;
-
 static DEFINE_STATIC_KEY_FALSE(__use_tsc);
 
 int tsc_clocksource_reliable;
@@ -106,23 +103,6 @@ void cyc2ns_read_end(void)
  *                      -johnstul@us.ibm.com "math is hard, lets go shopping!"
  */
 
-static void cyc2ns_data_init(struct cyc2ns_data *data)
-{
-       data->cyc2ns_mul = 0;
-       data->cyc2ns_shift = 0;
-       data->cyc2ns_offset = 0;
-}
-
-static void __init cyc2ns_init(int cpu)
-{
-       struct cyc2ns *c2n = &per_cpu(cyc2ns, cpu);
-
-       cyc2ns_data_init(&c2n->data[0]);
-       cyc2ns_data_init(&c2n->data[1]);
-
-       seqcount_init(&c2n->seq);
-}
-
 static inline unsigned long long cycles_2_ns(unsigned long long cyc)
 {
        struct cyc2ns_data data;
@@ -138,18 +118,11 @@ static inline unsigned long long cycles_2_ns(unsigned long long cyc)
        return ns;
 }
 
-static void set_cyc2ns_scale(unsigned long khz, int cpu, unsigned long long tsc_now)
+static void __set_cyc2ns_scale(unsigned long khz, int cpu, unsigned long long tsc_now)
 {
        unsigned long long ns_now;
        struct cyc2ns_data data;
        struct cyc2ns *c2n;
-       unsigned long flags;
-
-       local_irq_save(flags);
-       sched_clock_idle_sleep_event();
-
-       if (!khz)
-               goto done;
 
        ns_now = cycles_2_ns(tsc_now);
 
@@ -181,12 +154,55 @@ static void set_cyc2ns_scale(unsigned long khz, int cpu, unsigned long long tsc_
        c2n->data[0] = data;
        raw_write_seqcount_latch(&c2n->seq);
        c2n->data[1] = data;
+}
+
+static void set_cyc2ns_scale(unsigned long khz, int cpu, unsigned long long tsc_now)
+{
+       unsigned long flags;
+
+       local_irq_save(flags);
+       sched_clock_idle_sleep_event();
+
+       if (khz)
+               __set_cyc2ns_scale(khz, cpu, tsc_now);
 
-done:
        sched_clock_idle_wakeup_event();
        local_irq_restore(flags);
 }
 
+/*
+ * Initialize cyc2ns for boot cpu
+ */
+static void __init cyc2ns_init_boot_cpu(void)
+{
+       struct cyc2ns *c2n = this_cpu_ptr(&cyc2ns);
+
+       seqcount_init(&c2n->seq);
+       __set_cyc2ns_scale(tsc_khz, smp_processor_id(), rdtsc());
+}
+
+/*
+ * Secondary CPUs do not run through tsc_init(), so set up
+ * all the scale factors for all CPUs, assuming the same
+ * speed as the bootup CPU. (cpufreq notifiers will fix this
+ * up if their speed diverges)
+ */
+static void __init cyc2ns_init_secondary_cpus(void)
+{
+       unsigned int cpu, this_cpu = smp_processor_id();
+       struct cyc2ns *c2n = this_cpu_ptr(&cyc2ns);
+       struct cyc2ns_data *data = c2n->data;
+
+       for_each_possible_cpu(cpu) {
+               if (cpu != this_cpu) {
+                       seqcount_init(&c2n->seq);
+                       c2n = per_cpu_ptr(&cyc2ns, cpu);
+                       c2n->data[0] = data[0];
+                       c2n->data[1] = data[1];
+               }
+       }
+}
+
 /*
  * Scheduler clock - returns current time in nanosec units.
  */
@@ -248,8 +264,7 @@ EXPORT_SYMBOL_GPL(check_tsc_unstable);
 #ifdef CONFIG_X86_TSC
 int __init notsc_setup(char *str)
 {
-       pr_warn("Kernel compiled with CONFIG_X86_TSC, cannot disable TSC completely\n");
-       tsc_disabled = 1;
+       mark_tsc_unstable("boot parameter notsc");
        return 1;
 }
 #else
@@ -665,30 +680,17 @@ static unsigned long cpu_khz_from_cpuid(void)
        return eax_base_mhz * 1000;
 }
 
-/**
- * native_calibrate_cpu - calibrate the cpu on boot
+/*
+ * calibrate cpu using pit, hpet, and ptimer methods. They are available
+ * later in boot after acpi is initialized.
  */
-unsigned long native_calibrate_cpu(void)
+static unsigned long pit_hpet_ptimer_calibrate_cpu(void)
 {
        u64 tsc1, tsc2, delta, ref1, ref2;
        unsigned long tsc_pit_min = ULONG_MAX, tsc_ref_min = ULONG_MAX;
-       unsigned long flags, latch, ms, fast_calibrate;
+       unsigned long flags, latch, ms;
        int hpet = is_hpet_enabled(), i, loopmin;
 
-       fast_calibrate = cpu_khz_from_cpuid();
-       if (fast_calibrate)
-               return fast_calibrate;
-
-       fast_calibrate = cpu_khz_from_msr();
-       if (fast_calibrate)
-               return fast_calibrate;
-
-       local_irq_save(flags);
-       fast_calibrate = quick_pit_calibrate();
-       local_irq_restore(flags);
-       if (fast_calibrate)
-               return fast_calibrate;
-
        /*
         * Run 5 calibration loops to get the lowest frequency value
         * (the best estimate). We use two different calibration modes
@@ -831,6 +833,37 @@ unsigned long native_calibrate_cpu(void)
        return tsc_pit_min;
 }
 
+/**
+ * native_calibrate_cpu_early - can calibrate the cpu early in boot
+ */
+unsigned long native_calibrate_cpu_early(void)
+{
+       unsigned long flags, fast_calibrate = cpu_khz_from_cpuid();
+
+       if (!fast_calibrate)
+               fast_calibrate = cpu_khz_from_msr();
+       if (!fast_calibrate) {
+               local_irq_save(flags);
+               fast_calibrate = quick_pit_calibrate();
+               local_irq_restore(flags);
+       }
+       return fast_calibrate;
+}
+
+
+/**
+ * native_calibrate_cpu - calibrate the cpu
+ */
+static unsigned long native_calibrate_cpu(void)
+{
+       unsigned long tsc_freq = native_calibrate_cpu_early();
+
+       if (!tsc_freq)
+               tsc_freq = pit_hpet_ptimer_calibrate_cpu();
+
+       return tsc_freq;
+}
+
 void recalibrate_cpu_khz(void)
 {
 #ifndef CONFIG_SMP
@@ -1307,7 +1340,7 @@ unreg:
 
 static int __init init_tsc_clocksource(void)
 {
-       if (!boot_cpu_has(X86_FEATURE_TSC) || tsc_disabled > 0 || !tsc_khz)
+       if (!boot_cpu_has(X86_FEATURE_TSC) || !tsc_khz)
                return 0;
 
        if (tsc_unstable)
@@ -1341,40 +1374,22 @@ unreg:
  */
 device_initcall(init_tsc_clocksource);
 
-void __init tsc_early_delay_calibrate(void)
+static bool __init determine_cpu_tsc_frequencies(bool early)
 {
-       unsigned long lpj;
-
-       if (!boot_cpu_has(X86_FEATURE_TSC))
-               return;
-
-       cpu_khz = x86_platform.calibrate_cpu();
-       tsc_khz = x86_platform.calibrate_tsc();
-
-       tsc_khz = tsc_khz ? : cpu_khz;
-       if (!tsc_khz)
-               return;
-
-       lpj = tsc_khz * 1000;
-       do_div(lpj, HZ);
-       loops_per_jiffy = lpj;
-}
-
-void __init tsc_init(void)
-{
-       u64 lpj, cyc;
-       int cpu;
-
-       if (!boot_cpu_has(X86_FEATURE_TSC)) {
-               setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER);
-               return;
+       /* Make sure that cpu and tsc are not already calibrated */
+       WARN_ON(cpu_khz || tsc_khz);
+
+       if (early) {
+               cpu_khz = x86_platform.calibrate_cpu();
+               tsc_khz = x86_platform.calibrate_tsc();
+       } else {
+               /* We should not be here with non-native cpu calibration */
+               WARN_ON(x86_platform.calibrate_cpu != native_calibrate_cpu);
+               cpu_khz = pit_hpet_ptimer_calibrate_cpu();
        }
 
-       cpu_khz = x86_platform.calibrate_cpu();
-       tsc_khz = x86_platform.calibrate_tsc();
-
        /*
-        * Trust non-zero tsc_khz as authorative,
+        * Trust non-zero tsc_khz as authoritative,
         * and use it to sanity check cpu_khz,
         * which will be off if system timer is off.
         */
@@ -1383,52 +1398,78 @@ void __init tsc_init(void)
        else if (abs(cpu_khz - tsc_khz) * 10 > tsc_khz)
                cpu_khz = tsc_khz;
 
-       if (!tsc_khz) {
-               mark_tsc_unstable("could not calculate TSC khz");
-               setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER);
-               return;
-       }
+       if (tsc_khz == 0)
+               return false;
 
        pr_info("Detected %lu.%03lu MHz processor\n",
-               (unsigned long)cpu_khz / 1000,
-               (unsigned long)cpu_khz % 1000);
+               (unsigned long)cpu_khz / KHZ,
+               (unsigned long)cpu_khz % KHZ);
 
        if (cpu_khz != tsc_khz) {
                pr_info("Detected %lu.%03lu MHz TSC",
-                       (unsigned long)tsc_khz / 1000,
-                       (unsigned long)tsc_khz % 1000);
+                       (unsigned long)tsc_khz / KHZ,
+                       (unsigned long)tsc_khz % KHZ);
        }
+       return true;
+}
+
+static unsigned long __init get_loops_per_jiffy(void)
+{
+       unsigned long lpj = tsc_khz * KHZ;
 
+       do_div(lpj, HZ);
+       return lpj;
+}
+
+static void __init tsc_enable_sched_clock(void)
+{
        /* Sanitize TSC ADJUST before cyc2ns gets initialized */
        tsc_store_and_check_tsc_adjust(true);
+       cyc2ns_init_boot_cpu();
+       static_branch_enable(&__use_tsc);
+}
+
+void __init tsc_early_init(void)
+{
+       if (!boot_cpu_has(X86_FEATURE_TSC))
+               return;
+       if (!determine_cpu_tsc_frequencies(true))
+               return;
+       loops_per_jiffy = get_loops_per_jiffy();
 
+       tsc_enable_sched_clock();
+}
+
+void __init tsc_init(void)
+{
        /*
-        * Secondary CPUs do not run through tsc_init(), so set up
-        * all the scale factors for all CPUs, assuming the same
-        * speed as the bootup CPU. (cpufreq notifiers will fix this
-        * up if their speed diverges)
+        * native_calibrate_cpu_early can only calibrate using methods that are
+        * available early in boot.
         */
-       cyc = rdtsc();
-       for_each_possible_cpu(cpu) {
-               cyc2ns_init(cpu);
-               set_cyc2ns_scale(tsc_khz, cpu, cyc);
-       }
+       if (x86_platform.calibrate_cpu == native_calibrate_cpu_early)
+               x86_platform.calibrate_cpu = native_calibrate_cpu;
 
-       if (tsc_disabled > 0)
+       if (!boot_cpu_has(X86_FEATURE_TSC)) {
+               setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER);
                return;
+       }
 
-       /* now allow native_sched_clock() to use rdtsc */
+       if (!tsc_khz) {
+               /* We failed to determine frequencies earlier, try again */
+               if (!determine_cpu_tsc_frequencies(false)) {
+                       mark_tsc_unstable("could not calculate TSC khz");
+                       setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER);
+                       return;
+               }
+               tsc_enable_sched_clock();
+       }
 
-       tsc_disabled = 0;
-       static_branch_enable(&__use_tsc);
+       cyc2ns_init_secondary_cpus();
 
        if (!no_sched_irq_time)
                enable_sched_clock_irqtime();
 
-       lpj = ((u64)tsc_khz * 1000);
-       do_div(lpj, HZ);
-       lpj_fine = lpj;
-
+       lpj_fine = get_loops_per_jiffy();
        use_tsc_delay();
 
        check_system_tsc_reliable();
@@ -1455,7 +1496,7 @@ unsigned long calibrate_delay_is_known(void)
        int constant_tsc = cpu_has(&cpu_data(cpu), X86_FEATURE_CONSTANT_TSC);
        const struct cpumask *mask = topology_core_cpumask(cpu);
 
-       if (tsc_disabled || !constant_tsc || !mask)
+       if (!constant_tsc || !mask)
                return 0;
 
        sibling = cpumask_any_but(mask, cpu);
index 19afdbd7d0a77c8f5511cf2de616d24c5259dbae..27ef714d886c121caa786a85b6211e92a30d6915 100644 (file)
@@ -1,17 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
- * tsc_msr.c - TSC frequency enumeration via MSR
+ * TSC frequency enumeration via MSR
  *
- * Copyright (C) 2013 Intel Corporation
+ * Copyright (C) 2013, 2018 Intel Corporation
  * Author: Bin Gao <bin.gao@intel.com>
- *
- * This file is released under the GPLv2.
  */
 
 #include <linux/kernel.h>
-#include <asm/processor.h>
-#include <asm/setup.h>
+
 #include <asm/apic.h>
+#include <asm/cpu_device_id.h>
+#include <asm/intel-family.h>
+#include <asm/msr.h>
 #include <asm/param.h>
+#include <asm/tsc.h>
 
 #define MAX_NUM_FREQS  9
 
  * field msr_plat does.
  */
 struct freq_desc {
-       u8 x86_family;  /* CPU family */
-       u8 x86_model;   /* model */
        u8 msr_plat;    /* 1: use MSR_PLATFORM_INFO, 0: MSR_IA32_PERF_STATUS */
        u32 freqs[MAX_NUM_FREQS];
 };
 
-static struct freq_desc freq_desc_tables[] = {
-       /* PNW */
-       { 6, 0x27, 0, { 0, 0, 0, 0, 0, 99840, 0, 83200 } },
-       /* CLV+ */
-       { 6, 0x35, 0, { 0, 133200, 0, 0, 0, 99840, 0, 83200 } },
-       /* TNG - Intel Atom processor Z3400 series */
-       { 6, 0x4a, 1, { 0, 100000, 133300, 0, 0, 0, 0, 0 } },
-       /* VLV2 - Intel Atom processor E3000, Z3600, Z3700 series */
-       { 6, 0x37, 1, { 83300, 100000, 133300, 116700, 80000, 0, 0, 0 } },
-       /* ANN - Intel Atom processor Z3500 series */
-       { 6, 0x5a, 1, { 83300, 100000, 133300, 100000, 0, 0, 0, 0 } },
-       /* AMT - Intel Atom processor X7-Z8000 and X5-Z8000 series */
-       { 6, 0x4c, 1, { 83300, 100000, 133300, 116700,
-                       80000, 93300, 90000, 88900, 87500 } },
+/*
+ * Penwell and Clovertrail use spread spectrum clock,
+ * so the freq number is not exactly the same as reported
+ * by MSR based on SDM.
+ */
+static const struct freq_desc freq_desc_pnw = {
+       0, { 0, 0, 0, 0, 0, 99840, 0, 83200 }
 };
 
-static int match_cpu(u8 family, u8 model)
-{
-       int i;
+static const struct freq_desc freq_desc_clv = {
+       0, { 0, 133200, 0, 0, 0, 99840, 0, 83200 }
+};
 
-       for (i = 0; i < ARRAY_SIZE(freq_desc_tables); i++) {
-               if ((family == freq_desc_tables[i].x86_family) &&
-                       (model == freq_desc_tables[i].x86_model))
-                       return i;
-       }
+static const struct freq_desc freq_desc_byt = {
+       1, { 83300, 100000, 133300, 116700, 80000, 0, 0, 0 }
+};
 
-       return -1;
-}
+static const struct freq_desc freq_desc_cht = {
+       1, { 83300, 100000, 133300, 116700, 80000, 93300, 90000, 88900, 87500 }
+};
 
-/* Map CPU reference clock freq ID(0-7) to CPU reference clock freq(KHz) */
-#define id_to_freq(cpu_index, freq_id) \
-       (freq_desc_tables[cpu_index].freqs[freq_id])
+static const struct freq_desc freq_desc_tng = {
+       1, { 0, 100000, 133300, 0, 0, 0, 0, 0 }
+};
+
+static const struct freq_desc freq_desc_ann = {
+       1, { 83300, 100000, 133300, 100000, 0, 0, 0, 0 }
+};
+
+static const struct x86_cpu_id tsc_msr_cpu_ids[] = {
+       INTEL_CPU_FAM6(ATOM_PENWELL,            freq_desc_pnw),
+       INTEL_CPU_FAM6(ATOM_CLOVERVIEW,         freq_desc_clv),
+       INTEL_CPU_FAM6(ATOM_SILVERMONT1,        freq_desc_byt),
+       INTEL_CPU_FAM6(ATOM_AIRMONT,            freq_desc_cht),
+       INTEL_CPU_FAM6(ATOM_MERRIFIELD,         freq_desc_tng),
+       INTEL_CPU_FAM6(ATOM_MOOREFIELD,         freq_desc_ann),
+       {}
+};
 
 /*
  * MSR-based CPU/TSC frequency discovery for certain CPUs.
@@ -70,18 +76,17 @@ static int match_cpu(u8 family, u8 model)
  */
 unsigned long cpu_khz_from_msr(void)
 {
-       u32 lo, hi, ratio, freq_id, freq;
+       u32 lo, hi, ratio, freq;
+       const struct freq_desc *freq_desc;
+       const struct x86_cpu_id *id;
        unsigned long res;
-       int cpu_index;
-
-       if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
-               return 0;
 
-       cpu_index = match_cpu(boot_cpu_data.x86, boot_cpu_data.x86_model);
-       if (cpu_index < 0)
+       id = x86_match_cpu(tsc_msr_cpu_ids);
+       if (!id)
                return 0;
 
-       if (freq_desc_tables[cpu_index].msr_plat) {
+       freq_desc = (struct freq_desc *)id->driver_data;
+       if (freq_desc->msr_plat) {
                rdmsr(MSR_PLATFORM_INFO, lo, hi);
                ratio = (lo >> 8) & 0xff;
        } else {
@@ -91,8 +96,9 @@ unsigned long cpu_khz_from_msr(void)
 
        /* Get FSB FREQ ID */
        rdmsr(MSR_FSB_FREQ, lo, hi);
-       freq_id = lo & 0x7;
-       freq = id_to_freq(cpu_index, freq_id);
+
+       /* Map CPU reference clock freq ID(0-7) to CPU reference clock freq(KHz) */
+       freq = freq_desc->freqs[lo & 0x7];
 
        /* TSC frequency = maximum resolved freq * maximum resolved bus ratio */
        res = freq * ratio;
index feb28fee6cea7f9fbad1a4b06ee9932ac8690d75..26038eacf74a7130d659f98e5e1348667d4d635e 100644 (file)
@@ -198,7 +198,7 @@ static int orc_sort_cmp(const void *_a, const void *_b)
         * whitelisted .o files which didn't get objtool generation.
         */
        orc_a = cur_orc_table + (a - cur_orc_ip_table);
-       return orc_a->sp_reg == ORC_REG_UNDEFINED ? -1 : 1;
+       return orc_a->sp_reg == ORC_REG_UNDEFINED && !orc_a->end ? -1 : 1;
 }
 
 #ifdef CONFIG_MODULES
@@ -352,7 +352,7 @@ static bool deref_stack_iret_regs(struct unwind_state *state, unsigned long addr
 
 bool unwind_next_frame(struct unwind_state *state)
 {
-       unsigned long ip_p, sp, orig_ip, prev_sp = state->sp;
+       unsigned long ip_p, sp, orig_ip = state->ip, prev_sp = state->sp;
        enum stack_type prev_type = state->stack_info.type;
        struct orc_entry *orc;
        bool indirect = false;
@@ -363,9 +363,9 @@ bool unwind_next_frame(struct unwind_state *state)
        /* Don't let modules unload while we're reading their ORC data. */
        preempt_disable();
 
-       /* Have we reached the end? */
+       /* End-of-stack check for user tasks: */
        if (state->regs && user_mode(state->regs))
-               goto done;
+               goto the_end;
 
        /*
         * Find the orc_entry associated with the text address.
@@ -374,9 +374,16 @@ bool unwind_next_frame(struct unwind_state *state)
         * calls and calls to noreturn functions.
         */
        orc = orc_find(state->signal ? state->ip : state->ip - 1);
-       if (!orc || orc->sp_reg == ORC_REG_UNDEFINED)
-               goto done;
-       orig_ip = state->ip;
+       if (!orc)
+               goto err;
+
+       /* End-of-stack check for kernel threads: */
+       if (orc->sp_reg == ORC_REG_UNDEFINED) {
+               if (!orc->end)
+                       goto err;
+
+               goto the_end;
+       }
 
        /* Find the previous frame's stack: */
        switch (orc->sp_reg) {
@@ -402,7 +409,7 @@ bool unwind_next_frame(struct unwind_state *state)
                if (!state->regs || !state->full_regs) {
                        orc_warn("missing regs for base reg R10 at ip %pB\n",
                                 (void *)state->ip);
-                       goto done;
+                       goto err;
                }
                sp = state->regs->r10;
                break;
@@ -411,7 +418,7 @@ bool unwind_next_frame(struct unwind_state *state)
                if (!state->regs || !state->full_regs) {
                        orc_warn("missing regs for base reg R13 at ip %pB\n",
                                 (void *)state->ip);
-                       goto done;
+                       goto err;
                }
                sp = state->regs->r13;
                break;
@@ -420,7 +427,7 @@ bool unwind_next_frame(struct unwind_state *state)
                if (!state->regs || !state->full_regs) {
                        orc_warn("missing regs for base reg DI at ip %pB\n",
                                 (void *)state->ip);
-                       goto done;
+                       goto err;
                }
                sp = state->regs->di;
                break;
@@ -429,7 +436,7 @@ bool unwind_next_frame(struct unwind_state *state)
                if (!state->regs || !state->full_regs) {
                        orc_warn("missing regs for base reg DX at ip %pB\n",
                                 (void *)state->ip);
-                       goto done;
+                       goto err;
                }
                sp = state->regs->dx;
                break;
@@ -437,12 +444,12 @@ bool unwind_next_frame(struct unwind_state *state)
        default:
                orc_warn("unknown SP base reg %d for ip %pB\n",
                         orc->sp_reg, (void *)state->ip);
-               goto done;
+               goto err;
        }
 
        if (indirect) {
                if (!deref_stack_reg(state, sp, &sp))
-                       goto done;
+                       goto err;
        }
 
        /* Find IP, SP and possibly regs: */
@@ -451,7 +458,7 @@ bool unwind_next_frame(struct unwind_state *state)
                ip_p = sp - sizeof(long);
 
                if (!deref_stack_reg(state, ip_p, &state->ip))
-                       goto done;
+                       goto err;
 
                state->ip = ftrace_graph_ret_addr(state->task, &state->graph_idx,
                                                  state->ip, (void *)ip_p);
@@ -465,7 +472,7 @@ bool unwind_next_frame(struct unwind_state *state)
                if (!deref_stack_regs(state, sp, &state->ip, &state->sp)) {
                        orc_warn("can't dereference registers at %p for ip %pB\n",
                                 (void *)sp, (void *)orig_ip);
-                       goto done;
+                       goto err;
                }
 
                state->regs = (struct pt_regs *)sp;
@@ -477,7 +484,7 @@ bool unwind_next_frame(struct unwind_state *state)
                if (!deref_stack_iret_regs(state, sp, &state->ip, &state->sp)) {
                        orc_warn("can't dereference iret registers at %p for ip %pB\n",
                                 (void *)sp, (void *)orig_ip);
-                       goto done;
+                       goto err;
                }
 
                state->regs = (void *)sp - IRET_FRAME_OFFSET;
@@ -500,18 +507,18 @@ bool unwind_next_frame(struct unwind_state *state)
 
        case ORC_REG_PREV_SP:
                if (!deref_stack_reg(state, sp + orc->bp_offset, &state->bp))
-                       goto done;
+                       goto err;
                break;
 
        case ORC_REG_BP:
                if (!deref_stack_reg(state, state->bp + orc->bp_offset, &state->bp))
-                       goto done;
+                       goto err;
                break;
 
        default:
                orc_warn("unknown BP base reg %d for ip %pB\n",
                         orc->bp_reg, (void *)orig_ip);
-               goto done;
+               goto err;
        }
 
        /* Prevent a recursive loop due to bad ORC data: */
@@ -520,13 +527,16 @@ bool unwind_next_frame(struct unwind_state *state)
            state->sp <= prev_sp) {
                orc_warn("stack going in the wrong direction? ip=%pB\n",
                         (void *)orig_ip);
-               goto done;
+               goto err;
        }
 
        preempt_enable();
        return true;
 
-done:
+err:
+       state->error = true;
+
+the_end:
        preempt_enable();
        state->stack_info.type = STACK_TYPE_UNKNOWN;
        return false;
index 58d8d800875d0c6a3789a0406fec1eed366eecfc..deb576b23b7cf49817533d00555d0dc976c42486 100644 (file)
@@ -293,7 +293,7 @@ static int uprobe_init_insn(struct arch_uprobe *auprobe, struct insn *insn, bool
        insn_init(insn, auprobe->insn, sizeof(auprobe->insn), x86_64);
        /* has the side-effect of processing the entire instruction */
        insn_get_length(insn);
-       if (WARN_ON_ONCE(!insn_complete(insn)))
+       if (!insn_complete(insn))
                return -ENOEXEC;
 
        if (is_prefix_bad(insn))
index 9d0b5af7db915c60adf23389ac47c3312dd3683a..1c03e4aa6474eaec356dc3bffddace14cb838cd2 100644 (file)
@@ -149,7 +149,7 @@ void save_v86_state(struct kernel_vm86_regs *regs, int retval)
        preempt_disable();
        tsk->thread.sp0 = vm86->saved_sp0;
        tsk->thread.sysenter_cs = __KERNEL_CS;
-       update_sp0(tsk);
+       update_task_stack(tsk);
        refresh_sysenter_cs(&tsk->thread);
        vm86->saved_sp0 = 0;
        preempt_enable();
@@ -374,7 +374,7 @@ static long do_sys_vm86(struct vm86plus_struct __user *user_vm86, bool plus)
                refresh_sysenter_cs(&tsk->thread);
        }
 
-       update_sp0(tsk);
+       update_task_stack(tsk);
        preempt_enable();
 
        if (vm86->flags & VM86_SCREEN_BITMAP)
index 5e1458f609a1ba84acfd0507dda1a98c4ae99a1b..8bde0a419f8689620db0a34cc9df0eaa135132c6 100644 (file)
@@ -55,19 +55,22 @@ jiffies_64 = jiffies;
  * so we can enable protection checks as well as retain 2MB large page
  * mappings for kernel text.
  */
-#define X64_ALIGN_RODATA_BEGIN . = ALIGN(HPAGE_SIZE);
+#define X86_ALIGN_RODATA_BEGIN . = ALIGN(HPAGE_SIZE);
 
-#define X64_ALIGN_RODATA_END                                   \
+#define X86_ALIGN_RODATA_END                                   \
                . = ALIGN(HPAGE_SIZE);                          \
-               __end_rodata_hpage_align = .;
+               __end_rodata_hpage_align = .;                   \
+               __end_rodata_aligned = .;
 
 #define ALIGN_ENTRY_TEXT_BEGIN . = ALIGN(PMD_SIZE);
 #define ALIGN_ENTRY_TEXT_END   . = ALIGN(PMD_SIZE);
 
 #else
 
-#define X64_ALIGN_RODATA_BEGIN
-#define X64_ALIGN_RODATA_END
+#define X86_ALIGN_RODATA_BEGIN
+#define X86_ALIGN_RODATA_END                                   \
+               . = ALIGN(PAGE_SIZE);                           \
+               __end_rodata_aligned = .;
 
 #define ALIGN_ENTRY_TEXT_BEGIN
 #define ALIGN_ENTRY_TEXT_END
@@ -141,9 +144,9 @@ SECTIONS
 
        /* .text should occupy whole number of pages */
        . = ALIGN(PAGE_SIZE);
-       X64_ALIGN_RODATA_BEGIN
+       X86_ALIGN_RODATA_BEGIN
        RO_DATA(PAGE_SIZE)
-       X64_ALIGN_RODATA_END
+       X86_ALIGN_RODATA_END
 
        /* Data */
        .data : AT(ADDR(.data) - LOAD_OFFSET) {
index 3ab867603e81f39ba879a6bdfdefa95af052436f..2792b5573818fff859673e439a8e3be1c23b6054 100644 (file)
@@ -109,7 +109,7 @@ struct x86_cpuinit_ops x86_cpuinit = {
 static void default_nmi_init(void) { };
 
 struct x86_platform_ops x86_platform __ro_after_init = {
-       .calibrate_cpu                  = native_calibrate_cpu,
+       .calibrate_cpu                  = native_calibrate_cpu_early,
        .calibrate_tsc                  = native_calibrate_tsc,
        .get_wallclock                  = mach_get_cmos_time,
        .set_wallclock                  = mach_set_rtc_mmss,
index 92fd433c50b9b5135e4ada92dc8968f4c5ed75d4..1bbec387d289cb785e4acbd28389e5e071fdfdbb 100644 (file)
@@ -85,7 +85,7 @@ config KVM_AMD_SEV
        def_bool y
        bool "AMD Secure Encrypted Virtualization (SEV) support"
        depends on KVM_AMD && X86_64
-       depends on CRYPTO_DEV_CCP && CRYPTO_DEV_CCP_DD && CRYPTO_DEV_SP_PSP
+       depends on CRYPTO_DEV_SP_PSP && !(KVM_AMD=y && CRYPTO_DEV_CCP_DD=m)
        ---help---
        Provides support for launching Encrypted VMs on AMD processors.
 
index b5cd8465d44f6cb99a9ae705cf2f44f3c310a1ac..d536d457517b9c5719c76b4354ee8feb09d1afc3 100644 (file)
@@ -1379,7 +1379,7 @@ static void apic_timer_expired(struct kvm_lapic *apic)
         * using swait_active() is safe.
         */
        if (swait_active(q))
-               swake_up(q);
+               swake_up_one(q);
 
        if (apic_lvtt_tscdeadline(apic))
                ktimer->expired_tscdeadline = ktimer->tscdeadline;
index d594690d8b9597a87f4cba26e8c1be5cb2de22de..6b8f11521c410be2ae902cc10fe3dc63095f822b 100644 (file)
@@ -890,7 +890,7 @@ static int mmu_topup_memory_cache_page(struct kvm_mmu_memory_cache *cache,
        if (cache->nobjs >= min)
                return 0;
        while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
-               page = (void *)__get_free_page(GFP_KERNEL);
+               page = (void *)__get_free_page(GFP_KERNEL_ACCOUNT);
                if (!page)
                        return -ENOMEM;
                cache->objects[cache->nobjs++] = page;
index 559a12b6184de38c67ef4f2001963600f41f8753..5d8e317c2b04f44af66b0c1c8060912dee28a510 100644 (file)
@@ -1705,6 +1705,17 @@ static inline bool nested_cpu_has_vmwrite_any_field(struct kvm_vcpu *vcpu)
                MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS;
 }
 
+static inline bool nested_cpu_has_zero_length_injection(struct kvm_vcpu *vcpu)
+{
+       return to_vmx(vcpu)->nested.msrs.misc_low & VMX_MISC_ZERO_LEN_INS;
+}
+
+static inline bool nested_cpu_supports_monitor_trap_flag(struct kvm_vcpu *vcpu)
+{
+       return to_vmx(vcpu)->nested.msrs.procbased_ctls_high &
+                       CPU_BASED_MONITOR_TRAP_FLAG;
+}
+
 static inline bool nested_cpu_has(struct vmcs12 *vmcs12, u32 bit)
 {
        return vmcs12->cpu_based_vm_exec_control & bit;
@@ -2560,6 +2571,7 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu)
        struct vcpu_vmx *vmx = to_vmx(vcpu);
 #ifdef CONFIG_X86_64
        int cpu = raw_smp_processor_id();
+       unsigned long fs_base, kernel_gs_base;
 #endif
        int i;
 
@@ -2575,12 +2587,20 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu)
        vmx->host_state.gs_ldt_reload_needed = vmx->host_state.ldt_sel;
 
 #ifdef CONFIG_X86_64
-       save_fsgs_for_kvm();
-       vmx->host_state.fs_sel = current->thread.fsindex;
-       vmx->host_state.gs_sel = current->thread.gsindex;
-#else
-       savesegment(fs, vmx->host_state.fs_sel);
-       savesegment(gs, vmx->host_state.gs_sel);
+       if (likely(is_64bit_mm(current->mm))) {
+               save_fsgs_for_kvm();
+               vmx->host_state.fs_sel = current->thread.fsindex;
+               vmx->host_state.gs_sel = current->thread.gsindex;
+               fs_base = current->thread.fsbase;
+               kernel_gs_base = current->thread.gsbase;
+       } else {
+#endif
+               savesegment(fs, vmx->host_state.fs_sel);
+               savesegment(gs, vmx->host_state.gs_sel);
+#ifdef CONFIG_X86_64
+               fs_base = read_msr(MSR_FS_BASE);
+               kernel_gs_base = read_msr(MSR_KERNEL_GS_BASE);
+       }
 #endif
        if (!(vmx->host_state.fs_sel & 7)) {
                vmcs_write16(HOST_FS_SELECTOR, vmx->host_state.fs_sel);
@@ -2600,10 +2620,10 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu)
        savesegment(ds, vmx->host_state.ds_sel);
        savesegment(es, vmx->host_state.es_sel);
 
-       vmcs_writel(HOST_FS_BASE, current->thread.fsbase);
+       vmcs_writel(HOST_FS_BASE, fs_base);
        vmcs_writel(HOST_GS_BASE, cpu_kernelmode_gs_base(cpu));
 
-       vmx->msr_host_kernel_gs_base = current->thread.gsbase;
+       vmx->msr_host_kernel_gs_base = kernel_gs_base;
        if (is_long_mode(&vmx->vcpu))
                wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
 #else
@@ -4311,11 +4331,7 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
        vmcs_conf->order = get_order(vmcs_conf->size);
        vmcs_conf->basic_cap = vmx_msr_high & ~0x1fff;
 
-       /* KVM supports Enlightened VMCS v1 only */
-       if (static_branch_unlikely(&enable_evmcs))
-               vmcs_conf->revision_id = KVM_EVMCS_VERSION;
-       else
-               vmcs_conf->revision_id = vmx_msr_low;
+       vmcs_conf->revision_id = vmx_msr_low;
 
        vmcs_conf->pin_based_exec_ctrl = _pin_based_exec_control;
        vmcs_conf->cpu_based_exec_ctrl = _cpu_based_exec_control;
@@ -4385,7 +4401,13 @@ static struct vmcs *alloc_vmcs_cpu(int cpu)
                return NULL;
        vmcs = page_address(pages);
        memset(vmcs, 0, vmcs_config.size);
-       vmcs->revision_id = vmcs_config.revision_id; /* vmcs revision id */
+
+       /* KVM supports Enlightened VMCS v1 only */
+       if (static_branch_unlikely(&enable_evmcs))
+               vmcs->revision_id = KVM_EVMCS_VERSION;
+       else
+               vmcs->revision_id = vmcs_config.revision_id;
+
        return vmcs;
 }
 
@@ -4553,6 +4575,19 @@ static __init int alloc_kvm_area(void)
                        return -ENOMEM;
                }
 
+               /*
+                * When eVMCS is enabled, alloc_vmcs_cpu() sets
+                * vmcs->revision_id to KVM_EVMCS_VERSION instead of
+                * revision_id reported by MSR_IA32_VMX_BASIC.
+                *
+                * However, even though not explictly documented by
+                * TLFS, VMXArea passed as VMXON argument should
+                * still be marked with revision_id reported by
+                * physical CPU.
+                */
+               if (static_branch_unlikely(&enable_evmcs))
+                       vmcs->revision_id = vmcs_config.revision_id;
+
                per_cpu(vmxarea, cpu) = vmcs;
        }
        return 0;
@@ -7858,6 +7893,8 @@ static int enter_vmx_operation(struct kvm_vcpu *vcpu)
                     HRTIMER_MODE_REL_PINNED);
        vmx->nested.preemption_timer.function = vmx_preemption_timer_fn;
 
+       vmx->nested.vpid02 = allocate_vpid();
+
        vmx->nested.vmxon = true;
        return 0;
 
@@ -8445,21 +8482,20 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu)
 /* Emulate the VMPTRST instruction */
 static int handle_vmptrst(struct kvm_vcpu *vcpu)
 {
-       unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
-       u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
-       gva_t vmcs_gva;
+       unsigned long exit_qual = vmcs_readl(EXIT_QUALIFICATION);
+       u32 instr_info = vmcs_read32(VMX_INSTRUCTION_INFO);
+       gpa_t current_vmptr = to_vmx(vcpu)->nested.current_vmptr;
        struct x86_exception e;
+       gva_t gva;
 
        if (!nested_vmx_check_permission(vcpu))
                return 1;
 
-       if (get_vmx_mem_address(vcpu, exit_qualification,
-                       vmx_instruction_info, true, &vmcs_gva))
+       if (get_vmx_mem_address(vcpu, exit_qual, instr_info, true, &gva))
                return 1;
        /* *_system ok, nested_vmx_check_permission has verified cpl=0 */
-       if (kvm_write_guest_virt_system(vcpu, vmcs_gva,
-                                       (void *)&to_vmx(vcpu)->nested.current_vmptr,
-                                       sizeof(u64), &e)) {
+       if (kvm_write_guest_virt_system(vcpu, gva, (void *)&current_vmptr,
+                                       sizeof(gpa_t), &e)) {
                kvm_inject_page_fault(vcpu, &e);
                return 1;
        }
@@ -10335,11 +10371,9 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
                        goto free_vmcs;
        }
 
-       if (nested) {
+       if (nested)
                nested_vmx_setup_ctls_msrs(&vmx->nested.msrs,
                                           kvm_vcpu_apicv_active(&vmx->vcpu));
-               vmx->nested.vpid02 = allocate_vpid();
-       }
 
        vmx->nested.posted_intr_nv = -1;
        vmx->nested.current_vmptr = -1ull;
@@ -10356,7 +10390,6 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
        return &vmx->vcpu;
 
 free_vmcs:
-       free_vpid(vmx->nested.vpid02);
        free_loaded_vmcs(vmx->loaded_vmcs);
 free_msrs:
        kfree(vmx->guest_msrs);
@@ -11620,6 +11653,62 @@ static int check_vmentry_prereqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
            !nested_cr3_valid(vcpu, vmcs12->host_cr3))
                return VMXERR_ENTRY_INVALID_HOST_STATE_FIELD;
 
+       /*
+        * From the Intel SDM, volume 3:
+        * Fields relevant to VM-entry event injection must be set properly.
+        * These fields are the VM-entry interruption-information field, the
+        * VM-entry exception error code, and the VM-entry instruction length.
+        */
+       if (vmcs12->vm_entry_intr_info_field & INTR_INFO_VALID_MASK) {
+               u32 intr_info = vmcs12->vm_entry_intr_info_field;
+               u8 vector = intr_info & INTR_INFO_VECTOR_MASK;
+               u32 intr_type = intr_info & INTR_INFO_INTR_TYPE_MASK;
+               bool has_error_code = intr_info & INTR_INFO_DELIVER_CODE_MASK;
+               bool should_have_error_code;
+               bool urg = nested_cpu_has2(vmcs12,
+                                          SECONDARY_EXEC_UNRESTRICTED_GUEST);
+               bool prot_mode = !urg || vmcs12->guest_cr0 & X86_CR0_PE;
+
+               /* VM-entry interruption-info field: interruption type */
+               if (intr_type == INTR_TYPE_RESERVED ||
+                   (intr_type == INTR_TYPE_OTHER_EVENT &&
+                    !nested_cpu_supports_monitor_trap_flag(vcpu)))
+                       return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
+
+               /* VM-entry interruption-info field: vector */
+               if ((intr_type == INTR_TYPE_NMI_INTR && vector != NMI_VECTOR) ||
+                   (intr_type == INTR_TYPE_HARD_EXCEPTION && vector > 31) ||
+                   (intr_type == INTR_TYPE_OTHER_EVENT && vector != 0))
+                       return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
+
+               /* VM-entry interruption-info field: deliver error code */
+               should_have_error_code =
+                       intr_type == INTR_TYPE_HARD_EXCEPTION && prot_mode &&
+                       x86_exception_has_error_code(vector);
+               if (has_error_code != should_have_error_code)
+                       return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
+
+               /* VM-entry exception error code */
+               if (has_error_code &&
+                   vmcs12->vm_entry_exception_error_code & GENMASK(31, 15))
+                       return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
+
+               /* VM-entry interruption-info field: reserved bits */
+               if (intr_info & INTR_INFO_RESVD_BITS_MASK)
+                       return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
+
+               /* VM-entry instruction length */
+               switch (intr_type) {
+               case INTR_TYPE_SOFT_EXCEPTION:
+               case INTR_TYPE_SOFT_INTR:
+               case INTR_TYPE_PRIV_SW_EXCEPTION:
+                       if ((vmcs12->vm_entry_instruction_len > 15) ||
+                           (vmcs12->vm_entry_instruction_len == 0 &&
+                            !nested_cpu_has_zero_length_injection(vcpu)))
+                               return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
+               }
+       }
+
        return 0;
 }
 
@@ -11686,7 +11775,6 @@ static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
        struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
-       u32 msr_entry_idx;
        u32 exit_qual;
        int r;
 
@@ -11708,10 +11796,10 @@ static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu)
        nested_get_vmcs12_pages(vcpu, vmcs12);
 
        r = EXIT_REASON_MSR_LOAD_FAIL;
-       msr_entry_idx = nested_vmx_load_msr(vcpu,
-                                           vmcs12->vm_entry_msr_load_addr,
-                                           vmcs12->vm_entry_msr_load_count);
-       if (msr_entry_idx)
+       exit_qual = nested_vmx_load_msr(vcpu,
+                                       vmcs12->vm_entry_msr_load_addr,
+                                       vmcs12->vm_entry_msr_load_count);
+       if (exit_qual)
                goto fail;
 
        /*
index 0046aa70205aa2dfbc0577065250be717ca25b4e..2b812b3c50881d2b42738792a7ef1a72cdcb9d66 100644 (file)
@@ -1097,6 +1097,7 @@ static u32 msr_based_features[] = {
 
        MSR_F10H_DECFG,
        MSR_IA32_UCODE_REV,
+       MSR_IA32_ARCH_CAPABILITIES,
 };
 
 static unsigned int num_msr_based_features;
@@ -1105,7 +1106,8 @@ static int kvm_get_msr_feature(struct kvm_msr_entry *msr)
 {
        switch (msr->index) {
        case MSR_IA32_UCODE_REV:
-               rdmsrl(msr->index, msr->data);
+       case MSR_IA32_ARCH_CAPABILITIES:
+               rdmsrl_safe(msr->index, &msr->data);
                break;
        default:
                if (kvm_x86_ops->get_msr_feature(msr))
index 331993c49dae9bd852c759afecbb3c6c17477e15..257f27620bc272e3312295714a120de07963441f 100644 (file)
@@ -110,6 +110,15 @@ static inline bool is_la57_mode(struct kvm_vcpu *vcpu)
 #endif
 }
 
+static inline bool x86_exception_has_error_code(unsigned int vector)
+{
+       static u32 exception_has_error_code = BIT(DF_VECTOR) | BIT(TS_VECTOR) |
+                       BIT(NP_VECTOR) | BIT(SS_VECTOR) | BIT(GP_VECTOR) |
+                       BIT(PF_VECTOR) | BIT(AC_VECTOR);
+
+       return (1U << vector) & exception_has_error_code;
+}
+
 static inline bool mmu_is_nested(struct kvm_vcpu *vcpu)
 {
        return vcpu->arch.walk_mmu == &vcpu->arch.nested_mmu;
index 298ef1479240b6b899fb4185a5204d5ad56b8785..3b24dc05251c7ce908cc2be48befb971b5b8f564 100644 (file)
@@ -256,7 +256,7 @@ ENTRY(__memcpy_mcsafe)
 
        /* Copy successful. Return zero */
 .L_done_memcpy_trap:
-       xorq %rax, %rax
+       xorl %eax, %eax
        ret
 ENDPROC(__memcpy_mcsafe)
 EXPORT_SYMBOL_GPL(__memcpy_mcsafe)
index 2f3c9196b8345e58e2264c9664b87d2c8de62185..a12afff146d10d274792f396f7e0eeab3c9962f2 100644 (file)
@@ -111,6 +111,8 @@ static struct addr_marker address_markers[] = {
        [END_OF_SPACE_NR]       = { -1,                 NULL }
 };
 
+#define INIT_PGD       ((pgd_t *) &init_top_pgt)
+
 #else /* CONFIG_X86_64 */
 
 enum address_markers_idx {
@@ -120,6 +122,9 @@ enum address_markers_idx {
        VMALLOC_END_NR,
 #ifdef CONFIG_HIGHMEM
        PKMAP_BASE_NR,
+#endif
+#ifdef CONFIG_MODIFY_LDT_SYSCALL
+       LDT_NR,
 #endif
        CPU_ENTRY_AREA_NR,
        FIXADDR_START_NR,
@@ -133,12 +138,17 @@ static struct addr_marker address_markers[] = {
        [VMALLOC_END_NR]        = { 0UL,                "vmalloc() End" },
 #ifdef CONFIG_HIGHMEM
        [PKMAP_BASE_NR]         = { 0UL,                "Persistent kmap() Area" },
+#endif
+#ifdef CONFIG_MODIFY_LDT_SYSCALL
+       [LDT_NR]                = { 0UL,                "LDT remap" },
 #endif
        [CPU_ENTRY_AREA_NR]     = { 0UL,                "CPU entry area" },
        [FIXADDR_START_NR]      = { 0UL,                "Fixmap area" },
        [END_OF_SPACE_NR]       = { -1,                 NULL }
 };
 
+#define INIT_PGD       (swapper_pg_dir)
+
 #endif /* !CONFIG_X86_64 */
 
 /* Multipliers for offsets within the PTEs */
@@ -496,11 +506,7 @@ static inline bool is_hypervisor_range(int idx)
 static void ptdump_walk_pgd_level_core(struct seq_file *m, pgd_t *pgd,
                                       bool checkwx, bool dmesg)
 {
-#ifdef CONFIG_X86_64
-       pgd_t *start = (pgd_t *) &init_top_pgt;
-#else
-       pgd_t *start = swapper_pg_dir;
-#endif
+       pgd_t *start = INIT_PGD;
        pgprotval_t prot, eff;
        int i;
        struct pg_state st = {};
@@ -563,12 +569,13 @@ void ptdump_walk_pgd_level_debugfs(struct seq_file *m, pgd_t *pgd, bool user)
 }
 EXPORT_SYMBOL_GPL(ptdump_walk_pgd_level_debugfs);
 
-static void ptdump_walk_user_pgd_level_checkwx(void)
+void ptdump_walk_user_pgd_level_checkwx(void)
 {
 #ifdef CONFIG_PAGE_TABLE_ISOLATION
-       pgd_t *pgd = (pgd_t *) &init_top_pgt;
+       pgd_t *pgd = INIT_PGD;
 
-       if (!static_cpu_has(X86_FEATURE_PTI))
+       if (!(__supported_pte_mask & _PAGE_NX) ||
+           !static_cpu_has(X86_FEATURE_PTI))
                return;
 
        pr_info("x86/mm: Checking user space page tables\n");
@@ -580,7 +587,6 @@ static void ptdump_walk_user_pgd_level_checkwx(void)
 void ptdump_walk_pgd_level_checkwx(void)
 {
        ptdump_walk_pgd_level_core(NULL, NULL, true, false);
-       ptdump_walk_user_pgd_level_checkwx();
 }
 
 static int __init pt_dump_init(void)
@@ -609,6 +615,9 @@ static int __init pt_dump_init(void)
 # endif
        address_markers[FIXADDR_START_NR].start_address = FIXADDR_START;
        address_markers[CPU_ENTRY_AREA_NR].start_address = CPU_ENTRY_AREA_BASE;
+# ifdef CONFIG_MODIFY_LDT_SYSCALL
+       address_markers[LDT_NR].start_address = LDT_BASE_ADDR;
+# endif
 #endif
        return 0;
 }
index 9a84a0d08727b7452ebea1e2e35b5ad3eb0b6e79..db1c042e9853c142b9ebf7e67337ce875b20bb40 100644 (file)
@@ -317,8 +317,6 @@ static noinline int vmalloc_fault(unsigned long address)
        if (!(address >= VMALLOC_START && address < VMALLOC_END))
                return -1;
 
-       WARN_ON_ONCE(in_nmi());
-
        /*
         * Synchronize this task's top level page-table
         * with the 'reference' page table.
@@ -641,11 +639,6 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
        return 0;
 }
 
-static const char nx_warning[] = KERN_CRIT
-"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
-static const char smep_warning[] = KERN_CRIT
-"unable to execute userspace code (SMEP?) (uid: %d)\n";
-
 static void
 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
                unsigned long address)
@@ -664,20 +657,18 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
                pte = lookup_address_in_pgd(pgd, address, &level);
 
                if (pte && pte_present(*pte) && !pte_exec(*pte))
-                       printk(nx_warning, from_kuid(&init_user_ns, current_uid()));
+                       pr_crit("kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n",
+                               from_kuid(&init_user_ns, current_uid()));
                if (pte && pte_present(*pte) && pte_exec(*pte) &&
                                (pgd_flags(*pgd) & _PAGE_USER) &&
                                (__read_cr4() & X86_CR4_SMEP))
-                       printk(smep_warning, from_kuid(&init_user_ns, current_uid()));
+                       pr_crit("unable to execute userspace code (SMEP?) (uid: %d)\n",
+                               from_kuid(&init_user_ns, current_uid()));
        }
 
-       printk(KERN_ALERT "BUG: unable to handle kernel ");
-       if (address < PAGE_SIZE)
-               printk(KERN_CONT "NULL pointer dereference");
-       else
-               printk(KERN_CONT "paging request");
-
-       printk(KERN_CONT " at %px\n", (void *) address);
+       pr_alert("BUG: unable to handle kernel %s at %px\n",
+                address < PAGE_SIZE ? "NULL pointer dereference" : "paging request",
+                (void *)address);
 
        dump_pagetable(address);
 }
index cee58a972cb20ff948e6384df5d9fc25f3e47ab7..74b157ac078de7ad0303cc9572185105750b1691 100644 (file)
@@ -773,13 +773,44 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
        }
 }
 
+/*
+ * begin/end can be in the direct map or the "high kernel mapping"
+ * used for the kernel image only.  free_init_pages() will do the
+ * right thing for either kind of address.
+ */
+void free_kernel_image_pages(void *begin, void *end)
+{
+       unsigned long begin_ul = (unsigned long)begin;
+       unsigned long end_ul = (unsigned long)end;
+       unsigned long len_pages = (end_ul - begin_ul) >> PAGE_SHIFT;
+
+
+       free_init_pages("unused kernel image", begin_ul, end_ul);
+
+       /*
+        * PTI maps some of the kernel into userspace.  For performance,
+        * this includes some kernel areas that do not contain secrets.
+        * Those areas might be adjacent to the parts of the kernel image
+        * being freed, which may contain secrets.  Remove the "high kernel
+        * image mapping" for these freed areas, ensuring they are not even
+        * potentially vulnerable to Meltdown regardless of the specific
+        * optimizations PTI is currently using.
+        *
+        * The "noalias" prevents unmapping the direct map alias which is
+        * needed to access the freed pages.
+        *
+        * This is only valid for 64bit kernels. 32bit has only one mapping
+        * which can't be treated in this way for obvious reasons.
+        */
+       if (IS_ENABLED(CONFIG_X86_64) && cpu_feature_enabled(X86_FEATURE_PTI))
+               set_memory_np_noalias(begin_ul, len_pages);
+}
+
 void __ref free_initmem(void)
 {
        e820__reallocate_tables();
 
-       free_init_pages("unused kernel",
-                       (unsigned long)(&__init_begin),
-                       (unsigned long)(&__init_end));
+       free_kernel_image_pages(&__init_begin, &__init_end);
 }
 
 #ifdef CONFIG_BLK_DEV_INITRD
index 045f492d5f68260a581f44c210aa3753dc4bc225..dd519f3721692180b3aac7f5e8eeda52a1d68226 100644 (file)
@@ -1283,20 +1283,10 @@ void mark_rodata_ro(void)
        set_memory_ro(start, (end-start) >> PAGE_SHIFT);
 #endif
 
-       free_init_pages("unused kernel",
-                       (unsigned long) __va(__pa_symbol(text_end)),
-                       (unsigned long) __va(__pa_symbol(rodata_start)));
-       free_init_pages("unused kernel",
-                       (unsigned long) __va(__pa_symbol(rodata_end)),
-                       (unsigned long) __va(__pa_symbol(_sdata)));
+       free_kernel_image_pages((void *)text_end, (void *)rodata_start);
+       free_kernel_image_pages((void *)rodata_end, (void *)_sdata);
 
        debug_checkwx();
-
-       /*
-        * Do this after all of the manipulation of the
-        * kernel text page tables are complete.
-        */
-       pti_clone_kernel_text();
 }
 
 int kern_addr_valid(unsigned long addr)
@@ -1350,16 +1340,28 @@ int kern_addr_valid(unsigned long addr)
 /* Amount of ram needed to start using large blocks */
 #define MEM_SIZE_FOR_LARGE_BLOCK (64UL << 30)
 
+/* Adjustable memory block size */
+static unsigned long set_memory_block_size;
+int __init set_memory_block_size_order(unsigned int order)
+{
+       unsigned long size = 1UL << order;
+
+       if (size > MEM_SIZE_FOR_LARGE_BLOCK || size < MIN_MEMORY_BLOCK_SIZE)
+               return -EINVAL;
+
+       set_memory_block_size = size;
+       return 0;
+}
+
 static unsigned long probe_memory_block_size(void)
 {
        unsigned long boot_mem_end = max_pfn << PAGE_SHIFT;
        unsigned long bz;
 
-       /* If this is UV system, always set 2G block size */
-       if (is_uv_system()) {
-               bz = MAX_BLOCK_SIZE;
+       /* If memory block size has been set, then use it */
+       bz = set_memory_block_size;
+       if (bz)
                goto done;
-       }
 
        /* Use regular block if RAM is smaller than MEM_SIZE_FOR_LARGE_BLOCK */
        if (boot_mem_end < MEM_SIZE_FOR_LARGE_BLOCK) {
index 34a2a3bfde9c144623f4bfafbeb32404e4e3a21d..b54d52a2d00a83a9fa4be8647fe3acf92b81c4c7 100644 (file)
@@ -61,7 +61,7 @@ static int __init emu_setup_memblk(struct numa_meminfo *ei,
        eb->nid = nid;
 
        if (emu_nid_to_phys[nid] == NUMA_NO_NODE)
-               emu_nid_to_phys[nid] = nid;
+               emu_nid_to_phys[nid] = pb->nid;
 
        pb->start += size;
        if (pb->start >= pb->end) {
@@ -198,40 +198,73 @@ static u64 __init find_end_of_node(u64 start, u64 max_addr, u64 size)
        return end;
 }
 
+static u64 uniform_size(u64 max_addr, u64 base, u64 hole, int nr_nodes)
+{
+       unsigned long max_pfn = PHYS_PFN(max_addr);
+       unsigned long base_pfn = PHYS_PFN(base);
+       unsigned long hole_pfns = PHYS_PFN(hole);
+
+       return PFN_PHYS((max_pfn - base_pfn - hole_pfns) / nr_nodes);
+}
+
 /*
  * Sets up fake nodes of `size' interleaved over physical nodes ranging from
  * `addr' to `max_addr'.
  *
  * Returns zero on success or negative on error.
  */
-static int __init split_nodes_size_interleave(struct numa_meminfo *ei,
+static int __init split_nodes_size_interleave_uniform(struct numa_meminfo *ei,
                                              struct numa_meminfo *pi,
-                                             u64 addr, u64 max_addr, u64 size)
+                                             u64 addr, u64 max_addr, u64 size,
+                                             int nr_nodes, struct numa_memblk *pblk,
+                                             int nid)
 {
        nodemask_t physnode_mask = numa_nodes_parsed;
+       int i, ret, uniform = 0;
        u64 min_size;
-       int nid = 0;
-       int i, ret;
 
-       if (!size)
+       if ((!size && !nr_nodes) || (nr_nodes && !pblk))
                return -1;
+
        /*
-        * The limit on emulated nodes is MAX_NUMNODES, so the size per node is
-        * increased accordingly if the requested size is too small.  This
-        * creates a uniform distribution of node sizes across the entire
-        * machine (but not necessarily over physical nodes).
+        * In the 'uniform' case split the passed in physical node by
+        * nr_nodes, in the non-uniform case, ignore the passed in
+        * physical block and try to create nodes of at least size
+        * @size.
+        *
+        * In the uniform case, split the nodes strictly by physical
+        * capacity, i.e. ignore holes. In the non-uniform case account
+        * for holes and treat @size as a minimum floor.
         */
-       min_size = (max_addr - addr - mem_hole_size(addr, max_addr)) / MAX_NUMNODES;
-       min_size = max(min_size, FAKE_NODE_MIN_SIZE);
-       if ((min_size & FAKE_NODE_MIN_HASH_MASK) < min_size)
-               min_size = (min_size + FAKE_NODE_MIN_SIZE) &
-                                               FAKE_NODE_MIN_HASH_MASK;
+       if (!nr_nodes)
+               nr_nodes = MAX_NUMNODES;
+       else {
+               nodes_clear(physnode_mask);
+               node_set(pblk->nid, physnode_mask);
+               uniform = 1;
+       }
+
+       if (uniform) {
+               min_size = uniform_size(max_addr, addr, 0, nr_nodes);
+               size = min_size;
+       } else {
+               /*
+                * The limit on emulated nodes is MAX_NUMNODES, so the
+                * size per node is increased accordingly if the
+                * requested size is too small.  This creates a uniform
+                * distribution of node sizes across the entire machine
+                * (but not necessarily over physical nodes).
+                */
+               min_size = uniform_size(max_addr, addr,
+                               mem_hole_size(addr, max_addr), nr_nodes);
+       }
+       min_size = ALIGN(max(min_size, FAKE_NODE_MIN_SIZE), FAKE_NODE_MIN_SIZE);
        if (size < min_size) {
                pr_err("Fake node size %LuMB too small, increasing to %LuMB\n",
                        size >> 20, min_size >> 20);
                size = min_size;
        }
-       size &= FAKE_NODE_MIN_HASH_MASK;
+       size = ALIGN_DOWN(size, FAKE_NODE_MIN_SIZE);
 
        /*
         * Fill physical nodes with fake nodes of size until there is no memory
@@ -248,10 +281,14 @@ static int __init split_nodes_size_interleave(struct numa_meminfo *ei,
                                node_clear(i, physnode_mask);
                                continue;
                        }
+
                        start = pi->blk[phys_blk].start;
                        limit = pi->blk[phys_blk].end;
 
-                       end = find_end_of_node(start, limit, size);
+                       if (uniform)
+                               end = start + size;
+                       else
+                               end = find_end_of_node(start, limit, size);
                        /*
                         * If there won't be at least FAKE_NODE_MIN_SIZE of
                         * non-reserved memory in ZONE_DMA32 for the next node,
@@ -266,7 +303,8 @@ static int __init split_nodes_size_interleave(struct numa_meminfo *ei,
                         * next node, this one must extend to the end of the
                         * physical node.
                         */
-                       if (limit - end - mem_hole_size(end, limit) < size)
+                       if ((limit - end - mem_hole_size(end, limit) < size)
+                                       && !uniform)
                                end = limit;
 
                        ret = emu_setup_memblk(ei, pi, nid++ % MAX_NUMNODES,
@@ -276,7 +314,15 @@ static int __init split_nodes_size_interleave(struct numa_meminfo *ei,
                                return ret;
                }
        }
-       return 0;
+       return nid;
+}
+
+static int __init split_nodes_size_interleave(struct numa_meminfo *ei,
+                                             struct numa_meminfo *pi,
+                                             u64 addr, u64 max_addr, u64 size)
+{
+       return split_nodes_size_interleave_uniform(ei, pi, addr, max_addr, size,
+                       0, NULL, NUMA_NO_NODE);
 }
 
 int __init setup_emu2phys_nid(int *dfl_phys_nid)
@@ -346,7 +392,28 @@ void __init numa_emulation(struct numa_meminfo *numa_meminfo, int numa_dist_cnt)
         * the fixed node size.  Otherwise, if it is just a single number N,
         * split the system RAM into N fake nodes.
         */
-       if (strchr(emu_cmdline, 'M') || strchr(emu_cmdline, 'G')) {
+       if (strchr(emu_cmdline, 'U')) {
+               nodemask_t physnode_mask = numa_nodes_parsed;
+               unsigned long n;
+               int nid = 0;
+
+               n = simple_strtoul(emu_cmdline, &emu_cmdline, 0);
+               ret = -1;
+               for_each_node_mask(i, physnode_mask) {
+                       ret = split_nodes_size_interleave_uniform(&ei, &pi,
+                                       pi.blk[i].start, pi.blk[i].end, 0,
+                                       n, &pi.blk[i], nid);
+                       if (ret < 0)
+                               break;
+                       if (ret < n) {
+                               pr_info("%s: phys: %d only got %d of %ld nodes, failing\n",
+                                               __func__, i, ret, n);
+                               ret = -1;
+                               break;
+                       }
+                       nid = ret;
+               }
+       } else if (strchr(emu_cmdline, 'M') || strchr(emu_cmdline, 'G')) {
                u64 size;
 
                size = memparse(emu_cmdline, &emu_cmdline);
index 3bded76e8d5c5676bd972b0217e96fb84753ff7f..0a74996a114933cc96d445685d72448b71c45343 100644 (file)
@@ -53,6 +53,7 @@ static DEFINE_SPINLOCK(cpa_lock);
 #define CPA_FLUSHTLB 1
 #define CPA_ARRAY 2
 #define CPA_PAGES_ARRAY 4
+#define CPA_NO_CHECK_ALIAS 8 /* Do not search for aliases */
 
 #ifdef CONFIG_PROC_FS
 static unsigned long direct_pages_count[PG_LEVEL_NUM];
@@ -1486,6 +1487,9 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages,
 
        /* No alias checking for _NX bit modifications */
        checkalias = (pgprot_val(mask_set) | pgprot_val(mask_clr)) != _PAGE_NX;
+       /* Has caller explicitly disabled alias checking? */
+       if (in_flag & CPA_NO_CHECK_ALIAS)
+               checkalias = 0;
 
        ret = __change_page_attr_set_clr(&cpa, checkalias);
 
@@ -1772,6 +1776,15 @@ int set_memory_np(unsigned long addr, int numpages)
        return change_page_attr_clear(&addr, numpages, __pgprot(_PAGE_PRESENT), 0);
 }
 
+int set_memory_np_noalias(unsigned long addr, int numpages)
+{
+       int cpa_flags = CPA_NO_CHECK_ALIAS;
+
+       return change_page_attr_set_clr(&addr, numpages, __pgprot(0),
+                                       __pgprot(_PAGE_PRESENT), 0,
+                                       cpa_flags, NULL);
+}
+
 int set_memory_4k(unsigned long addr, int numpages)
 {
        return change_page_attr_set_clr(&addr, numpages, __pgprot(0),
@@ -1784,6 +1797,12 @@ int set_memory_nonglobal(unsigned long addr, int numpages)
                                      __pgprot(_PAGE_GLOBAL), 0);
 }
 
+int set_memory_global(unsigned long addr, int numpages)
+{
+       return change_page_attr_set(&addr, numpages,
+                                   __pgprot(_PAGE_GLOBAL), 0);
+}
+
 static int __set_memory_enc_dec(unsigned long addr, int numpages, bool enc)
 {
        struct cpa_data cpa;
index 47b5951e592bec3edcea33e662c2965ec80e3572..3ef095c70ae31636fbecd185eac096ef5858d08b 100644 (file)
@@ -182,6 +182,14 @@ static void pgd_dtor(pgd_t *pgd)
  */
 #define PREALLOCATED_PMDS      UNSHARED_PTRS_PER_PGD
 
+/*
+ * We allocate separate PMDs for the kernel part of the user page-table
+ * when PTI is enabled. We need them to map the per-process LDT into the
+ * user-space page-table.
+ */
+#define PREALLOCATED_USER_PMDS  (static_cpu_has(X86_FEATURE_PTI) ? \
+                                       KERNEL_PGD_PTRS : 0)
+
 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
 {
        paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
@@ -202,14 +210,14 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
 
 /* No need to prepopulate any pagetable entries in non-PAE modes. */
 #define PREALLOCATED_PMDS      0
-
+#define PREALLOCATED_USER_PMDS  0
 #endif /* CONFIG_X86_PAE */
 
-static void free_pmds(struct mm_struct *mm, pmd_t *pmds[])
+static void free_pmds(struct mm_struct *mm, pmd_t *pmds[], int count)
 {
        int i;
 
-       for(i = 0; i < PREALLOCATED_PMDS; i++)
+       for (i = 0; i < count; i++)
                if (pmds[i]) {
                        pgtable_pmd_page_dtor(virt_to_page(pmds[i]));
                        free_page((unsigned long)pmds[i]);
@@ -217,7 +225,7 @@ static void free_pmds(struct mm_struct *mm, pmd_t *pmds[])
                }
 }
 
-static int preallocate_pmds(struct mm_struct *mm, pmd_t *pmds[])
+static int preallocate_pmds(struct mm_struct *mm, pmd_t *pmds[], int count)
 {
        int i;
        bool failed = false;
@@ -226,7 +234,7 @@ static int preallocate_pmds(struct mm_struct *mm, pmd_t *pmds[])
        if (mm == &init_mm)
                gfp &= ~__GFP_ACCOUNT;
 
-       for(i = 0; i < PREALLOCATED_PMDS; i++) {
+       for (i = 0; i < count; i++) {
                pmd_t *pmd = (pmd_t *)__get_free_page(gfp);
                if (!pmd)
                        failed = true;
@@ -241,7 +249,7 @@ static int preallocate_pmds(struct mm_struct *mm, pmd_t *pmds[])
        }
 
        if (failed) {
-               free_pmds(mm, pmds);
+               free_pmds(mm, pmds, count);
                return -ENOMEM;
        }
 
@@ -254,23 +262,38 @@ static int preallocate_pmds(struct mm_struct *mm, pmd_t *pmds[])
  * preallocate which never got a corresponding vma will need to be
  * freed manually.
  */
+static void mop_up_one_pmd(struct mm_struct *mm, pgd_t *pgdp)
+{
+       pgd_t pgd = *pgdp;
+
+       if (pgd_val(pgd) != 0) {
+               pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
+
+               *pgdp = native_make_pgd(0);
+
+               paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
+               pmd_free(mm, pmd);
+               mm_dec_nr_pmds(mm);
+       }
+}
+
 static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
 {
        int i;
 
-       for(i = 0; i < PREALLOCATED_PMDS; i++) {
-               pgd_t pgd = pgdp[i];
+       for (i = 0; i < PREALLOCATED_PMDS; i++)
+               mop_up_one_pmd(mm, &pgdp[i]);
 
-               if (pgd_val(pgd) != 0) {
-                       pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
+#ifdef CONFIG_PAGE_TABLE_ISOLATION
 
-                       pgdp[i] = native_make_pgd(0);
+       if (!static_cpu_has(X86_FEATURE_PTI))
+               return;
 
-                       paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
-                       pmd_free(mm, pmd);
-                       mm_dec_nr_pmds(mm);
-               }
-       }
+       pgdp = kernel_to_user_pgdp(pgdp);
+
+       for (i = 0; i < PREALLOCATED_USER_PMDS; i++)
+               mop_up_one_pmd(mm, &pgdp[i + KERNEL_PGD_BOUNDARY]);
+#endif
 }
 
 static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
@@ -296,6 +319,38 @@ static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
        }
 }
 
+#ifdef CONFIG_PAGE_TABLE_ISOLATION
+static void pgd_prepopulate_user_pmd(struct mm_struct *mm,
+                                    pgd_t *k_pgd, pmd_t *pmds[])
+{
+       pgd_t *s_pgd = kernel_to_user_pgdp(swapper_pg_dir);
+       pgd_t *u_pgd = kernel_to_user_pgdp(k_pgd);
+       p4d_t *u_p4d;
+       pud_t *u_pud;
+       int i;
+
+       u_p4d = p4d_offset(u_pgd, 0);
+       u_pud = pud_offset(u_p4d, 0);
+
+       s_pgd += KERNEL_PGD_BOUNDARY;
+       u_pud += KERNEL_PGD_BOUNDARY;
+
+       for (i = 0; i < PREALLOCATED_USER_PMDS; i++, u_pud++, s_pgd++) {
+               pmd_t *pmd = pmds[i];
+
+               memcpy(pmd, (pmd_t *)pgd_page_vaddr(*s_pgd),
+                      sizeof(pmd_t) * PTRS_PER_PMD);
+
+               pud_populate(mm, u_pud, pmd);
+       }
+
+}
+#else
+static void pgd_prepopulate_user_pmd(struct mm_struct *mm,
+                                    pgd_t *k_pgd, pmd_t *pmds[])
+{
+}
+#endif
 /*
  * Xen paravirt assumes pgd table should be in one page. 64 bit kernel also
  * assumes that pgd should be in one page.
@@ -329,9 +384,6 @@ static int __init pgd_cache_init(void)
         */
        pgd_cache = kmem_cache_create("pgd_cache", PGD_SIZE, PGD_ALIGN,
                                      SLAB_PANIC, NULL);
-       if (!pgd_cache)
-               return -ENOMEM;
-
        return 0;
 }
 core_initcall(pgd_cache_init);
@@ -343,7 +395,8 @@ static inline pgd_t *_pgd_alloc(void)
         * We allocate one page for pgd.
         */
        if (!SHARED_KERNEL_PMD)
-               return (pgd_t *)__get_free_page(PGALLOC_GFP);
+               return (pgd_t *)__get_free_pages(PGALLOC_GFP,
+                                                PGD_ALLOCATION_ORDER);
 
        /*
         * Now PAE kernel is not running as a Xen domain. We can allocate
@@ -355,7 +408,7 @@ static inline pgd_t *_pgd_alloc(void)
 static inline void _pgd_free(pgd_t *pgd)
 {
        if (!SHARED_KERNEL_PMD)
-               free_page((unsigned long)pgd);
+               free_pages((unsigned long)pgd, PGD_ALLOCATION_ORDER);
        else
                kmem_cache_free(pgd_cache, pgd);
 }
@@ -375,6 +428,7 @@ static inline void _pgd_free(pgd_t *pgd)
 pgd_t *pgd_alloc(struct mm_struct *mm)
 {
        pgd_t *pgd;
+       pmd_t *u_pmds[PREALLOCATED_USER_PMDS];
        pmd_t *pmds[PREALLOCATED_PMDS];
 
        pgd = _pgd_alloc();
@@ -384,12 +438,15 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
 
        mm->pgd = pgd;
 
-       if (preallocate_pmds(mm, pmds) != 0)
+       if (preallocate_pmds(mm, pmds, PREALLOCATED_PMDS) != 0)
                goto out_free_pgd;
 
-       if (paravirt_pgd_alloc(mm) != 0)
+       if (preallocate_pmds(mm, u_pmds, PREALLOCATED_USER_PMDS) != 0)
                goto out_free_pmds;
 
+       if (paravirt_pgd_alloc(mm) != 0)
+               goto out_free_user_pmds;
+
        /*
         * Make sure that pre-populating the pmds is atomic with
         * respect to anything walking the pgd_list, so that they
@@ -399,13 +456,16 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
 
        pgd_ctor(mm, pgd);
        pgd_prepopulate_pmd(mm, pgd, pmds);
+       pgd_prepopulate_user_pmd(mm, pgd, u_pmds);
 
        spin_unlock(&pgd_lock);
 
        return pgd;
 
+out_free_user_pmds:
+       free_pmds(mm, u_pmds, PREALLOCATED_USER_PMDS);
 out_free_pmds:
-       free_pmds(mm, pmds);
+       free_pmds(mm, pmds, PREALLOCATED_PMDS);
 out_free_pgd:
        _pgd_free(pgd);
 out:
@@ -719,28 +779,50 @@ int pmd_clear_huge(pmd_t *pmd)
        return 0;
 }
 
+#ifdef CONFIG_X86_64
 /**
  * pud_free_pmd_page - Clear pud entry and free pmd page.
  * @pud: Pointer to a PUD.
+ * @addr: Virtual address associated with pud.
  *
- * Context: The pud range has been unmaped and TLB purged.
+ * Context: The pud range has been unmapped and TLB purged.
  * Return: 1 if clearing the entry succeeded. 0 otherwise.
+ *
+ * NOTE: Callers must allow a single page allocation.
  */
-int pud_free_pmd_page(pud_t *pud)
+int pud_free_pmd_page(pud_t *pud, unsigned long addr)
 {
-       pmd_t *pmd;
+       pmd_t *pmd, *pmd_sv;
+       pte_t *pte;
        int i;
 
        if (pud_none(*pud))
                return 1;
 
        pmd = (pmd_t *)pud_page_vaddr(*pud);
+       pmd_sv = (pmd_t *)__get_free_page(GFP_KERNEL);
+       if (!pmd_sv)
+               return 0;
 
-       for (i = 0; i < PTRS_PER_PMD; i++)
-               if (!pmd_free_pte_page(&pmd[i]))
-                       return 0;
+       for (i = 0; i < PTRS_PER_PMD; i++) {
+               pmd_sv[i] = pmd[i];
+               if (!pmd_none(pmd[i]))
+                       pmd_clear(&pmd[i]);
+       }
 
        pud_clear(pud);
+
+       /* INVLPG to clear all paging-structure caches */
+       flush_tlb_kernel_range(addr, addr + PAGE_SIZE-1);
+
+       for (i = 0; i < PTRS_PER_PMD; i++) {
+               if (!pmd_none(pmd_sv[i])) {
+                       pte = (pte_t *)pmd_page_vaddr(pmd_sv[i]);
+                       free_page((unsigned long)pte);
+               }
+       }
+
+       free_page((unsigned long)pmd_sv);
        free_page((unsigned long)pmd);
 
        return 1;
@@ -749,11 +831,12 @@ int pud_free_pmd_page(pud_t *pud)
 /**
  * pmd_free_pte_page - Clear pmd entry and free pte page.
  * @pmd: Pointer to a PMD.
+ * @addr: Virtual address associated with pmd.
  *
- * Context: The pmd range has been unmaped and TLB purged.
+ * Context: The pmd range has been unmapped and TLB purged.
  * Return: 1 if clearing the entry succeeded. 0 otherwise.
  */
-int pmd_free_pte_page(pmd_t *pmd)
+int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
 {
        pte_t *pte;
 
@@ -762,8 +845,30 @@ int pmd_free_pte_page(pmd_t *pmd)
 
        pte = (pte_t *)pmd_page_vaddr(*pmd);
        pmd_clear(pmd);
+
+       /* INVLPG to clear all paging-structure caches */
+       flush_tlb_kernel_range(addr, addr + PAGE_SIZE-1);
+
        free_page((unsigned long)pte);
 
        return 1;
 }
+
+#else /* !CONFIG_X86_64 */
+
+int pud_free_pmd_page(pud_t *pud, unsigned long addr)
+{
+       return pud_none(*pud);
+}
+
+/*
+ * Disable free page handling on x86-PAE. This assures that ioremap()
+ * does not update sync'd pmd entries. See vmalloc_sync_one().
+ */
+int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
+{
+       return pmd_none(*pmd);
+}
+
+#endif /* CONFIG_X86_64 */
 #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
index 4d418e70587802b05aff3ae49e80db7faf47a64c..d58b4aba9510f3d2a5f89f2df942d966b35cbe19 100644 (file)
 #define __GFP_NOTRACK  0
 #endif
 
+/*
+ * Define the page-table levels we clone for user-space on 32
+ * and 64 bit.
+ */
+#ifdef CONFIG_X86_64
+#define        PTI_LEVEL_KERNEL_IMAGE  PTI_CLONE_PMD
+#else
+#define        PTI_LEVEL_KERNEL_IMAGE  PTI_CLONE_PTE
+#endif
+
 static void __init pti_print_if_insecure(const char *reason)
 {
        if (boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
@@ -117,7 +127,7 @@ enable:
        setup_force_cpu_cap(X86_FEATURE_PTI);
 }
 
-pgd_t __pti_set_user_pgd(pgd_t *pgdp, pgd_t pgd)
+pgd_t __pti_set_user_pgtbl(pgd_t *pgdp, pgd_t pgd)
 {
        /*
         * Changes to the high (kernel) portion of the kernelmode page
@@ -176,7 +186,7 @@ static p4d_t *pti_user_pagetable_walk_p4d(unsigned long address)
 
        if (pgd_none(*pgd)) {
                unsigned long new_p4d_page = __get_free_page(gfp);
-               if (!new_p4d_page)
+               if (WARN_ON_ONCE(!new_p4d_page))
                        return NULL;
 
                set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(new_p4d_page)));
@@ -195,13 +205,17 @@ static p4d_t *pti_user_pagetable_walk_p4d(unsigned long address)
 static pmd_t *pti_user_pagetable_walk_pmd(unsigned long address)
 {
        gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
-       p4d_t *p4d = pti_user_pagetable_walk_p4d(address);
+       p4d_t *p4d;
        pud_t *pud;
 
+       p4d = pti_user_pagetable_walk_p4d(address);
+       if (!p4d)
+               return NULL;
+
        BUILD_BUG_ON(p4d_large(*p4d) != 0);
        if (p4d_none(*p4d)) {
                unsigned long new_pud_page = __get_free_page(gfp);
-               if (!new_pud_page)
+               if (WARN_ON_ONCE(!new_pud_page))
                        return NULL;
 
                set_p4d(p4d, __p4d(_KERNPG_TABLE | __pa(new_pud_page)));
@@ -215,7 +229,7 @@ static pmd_t *pti_user_pagetable_walk_pmd(unsigned long address)
        }
        if (pud_none(*pud)) {
                unsigned long new_pmd_page = __get_free_page(gfp);
-               if (!new_pmd_page)
+               if (WARN_ON_ONCE(!new_pmd_page))
                        return NULL;
 
                set_pud(pud, __pud(_KERNPG_TABLE | __pa(new_pmd_page)));
@@ -224,7 +238,6 @@ static pmd_t *pti_user_pagetable_walk_pmd(unsigned long address)
        return pmd_offset(pud, address);
 }
 
-#ifdef CONFIG_X86_VSYSCALL_EMULATION
 /*
  * Walk the shadow copy of the page tables (optionally) trying to allocate
  * page table pages on the way down.  Does not support large pages.
@@ -237,9 +250,13 @@ static pmd_t *pti_user_pagetable_walk_pmd(unsigned long address)
 static __init pte_t *pti_user_pagetable_walk_pte(unsigned long address)
 {
        gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
-       pmd_t *pmd = pti_user_pagetable_walk_pmd(address);
+       pmd_t *pmd;
        pte_t *pte;
 
+       pmd = pti_user_pagetable_walk_pmd(address);
+       if (!pmd)
+               return NULL;
+
        /* We can't do anything sensible if we hit a large mapping. */
        if (pmd_large(*pmd)) {
                WARN_ON(1);
@@ -262,6 +279,7 @@ static __init pte_t *pti_user_pagetable_walk_pte(unsigned long address)
        return pte;
 }
 
+#ifdef CONFIG_X86_VSYSCALL_EMULATION
 static void __init pti_setup_vsyscall(void)
 {
        pte_t *pte, *target_pte;
@@ -282,8 +300,14 @@ static void __init pti_setup_vsyscall(void)
 static void __init pti_setup_vsyscall(void) { }
 #endif
 
+enum pti_clone_level {
+       PTI_CLONE_PMD,
+       PTI_CLONE_PTE,
+};
+
 static void
-pti_clone_pmds(unsigned long start, unsigned long end, pmdval_t clear)
+pti_clone_pgtable(unsigned long start, unsigned long end,
+                 enum pti_clone_level level)
 {
        unsigned long addr;
 
@@ -291,59 +315,105 @@ pti_clone_pmds(unsigned long start, unsigned long end, pmdval_t clear)
         * Clone the populated PMDs which cover start to end. These PMD areas
         * can have holes.
         */
-       for (addr = start; addr < end; addr += PMD_SIZE) {
+       for (addr = start; addr < end;) {
+               pte_t *pte, *target_pte;
                pmd_t *pmd, *target_pmd;
                pgd_t *pgd;
                p4d_t *p4d;
                pud_t *pud;
 
+               /* Overflow check */
+               if (addr < start)
+                       break;
+
                pgd = pgd_offset_k(addr);
                if (WARN_ON(pgd_none(*pgd)))
                        return;
                p4d = p4d_offset(pgd, addr);
                if (WARN_ON(p4d_none(*p4d)))
                        return;
+
                pud = pud_offset(p4d, addr);
-               if (pud_none(*pud))
+               if (pud_none(*pud)) {
+                       addr += PUD_SIZE;
                        continue;
+               }
+
                pmd = pmd_offset(pud, addr);
-               if (pmd_none(*pmd))
+               if (pmd_none(*pmd)) {
+                       addr += PMD_SIZE;
                        continue;
+               }
 
-               target_pmd = pti_user_pagetable_walk_pmd(addr);
-               if (WARN_ON(!target_pmd))
-                       return;
-
-               /*
-                * Only clone present PMDs.  This ensures only setting
-                * _PAGE_GLOBAL on present PMDs.  This should only be
-                * called on well-known addresses anyway, so a non-
-                * present PMD would be a surprise.
-                */
-               if (WARN_ON(!(pmd_flags(*pmd) & _PAGE_PRESENT)))
-                       return;
-
-               /*
-                * Setting 'target_pmd' below creates a mapping in both
-                * the user and kernel page tables.  It is effectively
-                * global, so set it as global in both copies.  Note:
-                * the X86_FEATURE_PGE check is not _required_ because
-                * the CPU ignores _PAGE_GLOBAL when PGE is not
-                * supported.  The check keeps consistentency with
-                * code that only set this bit when supported.
-                */
-               if (boot_cpu_has(X86_FEATURE_PGE))
-                       *pmd = pmd_set_flags(*pmd, _PAGE_GLOBAL);
-
-               /*
-                * Copy the PMD.  That is, the kernelmode and usermode
-                * tables will share the last-level page tables of this
-                * address range
-                */
-               *target_pmd = pmd_clear_flags(*pmd, clear);
+               if (pmd_large(*pmd) || level == PTI_CLONE_PMD) {
+                       target_pmd = pti_user_pagetable_walk_pmd(addr);
+                       if (WARN_ON(!target_pmd))
+                               return;
+
+                       /*
+                        * Only clone present PMDs.  This ensures only setting
+                        * _PAGE_GLOBAL on present PMDs.  This should only be
+                        * called on well-known addresses anyway, so a non-
+                        * present PMD would be a surprise.
+                        */
+                       if (WARN_ON(!(pmd_flags(*pmd) & _PAGE_PRESENT)))
+                               return;
+
+                       /*
+                        * Setting 'target_pmd' below creates a mapping in both
+                        * the user and kernel page tables.  It is effectively
+                        * global, so set it as global in both copies.  Note:
+                        * the X86_FEATURE_PGE check is not _required_ because
+                        * the CPU ignores _PAGE_GLOBAL when PGE is not
+                        * supported.  The check keeps consistentency with
+                        * code that only set this bit when supported.
+                        */
+                       if (boot_cpu_has(X86_FEATURE_PGE))
+                               *pmd = pmd_set_flags(*pmd, _PAGE_GLOBAL);
+
+                       /*
+                        * Copy the PMD.  That is, the kernelmode and usermode
+                        * tables will share the last-level page tables of this
+                        * address range
+                        */
+                       *target_pmd = *pmd;
+
+                       addr += PMD_SIZE;
+
+               } else if (level == PTI_CLONE_PTE) {
+
+                       /* Walk the page-table down to the pte level */
+                       pte = pte_offset_kernel(pmd, addr);
+                       if (pte_none(*pte)) {
+                               addr += PAGE_SIZE;
+                               continue;
+                       }
+
+                       /* Only clone present PTEs */
+                       if (WARN_ON(!(pte_flags(*pte) & _PAGE_PRESENT)))
+                               return;
+
+                       /* Allocate PTE in the user page-table */
+                       target_pte = pti_user_pagetable_walk_pte(addr);
+                       if (WARN_ON(!target_pte))
+                               return;
+
+                       /* Set GLOBAL bit in both PTEs */
+                       if (boot_cpu_has(X86_FEATURE_PGE))
+                               *pte = pte_set_flags(*pte, _PAGE_GLOBAL);
+
+                       /* Clone the PTE */
+                       *target_pte = *pte;
+
+                       addr += PAGE_SIZE;
+
+               } else {
+                       BUG();
+               }
        }
 }
 
+#ifdef CONFIG_X86_64
 /*
  * Clone a single p4d (i.e. a top-level entry on 4-level systems and a
  * next-level entry on 5-level systems.
@@ -354,6 +424,9 @@ static void __init pti_clone_p4d(unsigned long addr)
        pgd_t *kernel_pgd;
 
        user_p4d = pti_user_pagetable_walk_p4d(addr);
+       if (!user_p4d)
+               return;
+
        kernel_pgd = pgd_offset_k(addr);
        kernel_p4d = p4d_offset(kernel_pgd, addr);
        *user_p4d = *kernel_p4d;
@@ -367,6 +440,25 @@ static void __init pti_clone_user_shared(void)
        pti_clone_p4d(CPU_ENTRY_AREA_BASE);
 }
 
+#else /* CONFIG_X86_64 */
+
+/*
+ * On 32 bit PAE systems with 1GB of Kernel address space there is only
+ * one pgd/p4d for the whole kernel. Cloning that would map the whole
+ * address space into the user page-tables, making PTI useless. So clone
+ * the page-table on the PMD level to prevent that.
+ */
+static void __init pti_clone_user_shared(void)
+{
+       unsigned long start, end;
+
+       start = CPU_ENTRY_AREA_BASE;
+       end   = start + (PAGE_SIZE * CPU_ENTRY_AREA_PAGES);
+
+       pti_clone_pgtable(start, end, PTI_CLONE_PMD);
+}
+#endif /* CONFIG_X86_64 */
+
 /*
  * Clone the ESPFIX P4D into the user space visible page table
  */
@@ -380,11 +472,11 @@ static void __init pti_setup_espfix64(void)
 /*
  * Clone the populated PMDs of the entry and irqentry text and force it RO.
  */
-static void __init pti_clone_entry_text(void)
+static void pti_clone_entry_text(void)
 {
-       pti_clone_pmds((unsigned long) __entry_text_start,
-                       (unsigned long) __irqentry_text_end,
-                      _PAGE_RW);
+       pti_clone_pgtable((unsigned long) __entry_text_start,
+                         (unsigned long) __irqentry_text_end,
+                         PTI_CLONE_PMD);
 }
 
 /*
@@ -434,11 +526,18 @@ static inline bool pti_kernel_image_global_ok(void)
        return true;
 }
 
+/*
+ * This is the only user for these and it is not arch-generic
+ * like the other set_memory.h functions.  Just extern them.
+ */
+extern int set_memory_nonglobal(unsigned long addr, int numpages);
+extern int set_memory_global(unsigned long addr, int numpages);
+
 /*
  * For some configurations, map all of kernel text into the user page
  * tables.  This reduces TLB misses, especially on non-PCID systems.
  */
-void pti_clone_kernel_text(void)
+static void pti_clone_kernel_text(void)
 {
        /*
         * rodata is part of the kernel image and is normally
@@ -446,7 +545,8 @@ void pti_clone_kernel_text(void)
         * clone the areas past rodata, they might contain secrets.
         */
        unsigned long start = PFN_ALIGN(_text);
-       unsigned long end = (unsigned long)__end_rodata_hpage_align;
+       unsigned long end_clone  = (unsigned long)__end_rodata_aligned;
+       unsigned long end_global = PFN_ALIGN((unsigned long)__stop___ex_table);
 
        if (!pti_kernel_image_global_ok())
                return;
@@ -458,14 +558,18 @@ void pti_clone_kernel_text(void)
         * pti_set_kernel_image_nonglobal() did to clear the
         * global bit.
         */
-       pti_clone_pmds(start, end, _PAGE_RW);
+       pti_clone_pgtable(start, end_clone, PTI_LEVEL_KERNEL_IMAGE);
+
+       /*
+        * pti_clone_pgtable() will set the global bit in any PMDs
+        * that it clones, but we also need to get any PTEs in
+        * the last level for areas that are not huge-page-aligned.
+        */
+
+       /* Set the global bit for normal non-__init kernel text: */
+       set_memory_global(start, (end_global - start) >> PAGE_SHIFT);
 }
 
-/*
- * This is the only user for it and it is not arch-generic like
- * the other set_memory.h functions.  Just extern it.
- */
-extern int set_memory_nonglobal(unsigned long addr, int numpages);
 void pti_set_kernel_image_nonglobal(void)
 {
        /*
@@ -477,9 +581,11 @@ void pti_set_kernel_image_nonglobal(void)
        unsigned long start = PFN_ALIGN(_text);
        unsigned long end = ALIGN((unsigned long)_end, PMD_PAGE_SIZE);
 
-       if (pti_kernel_image_global_ok())
-               return;
-
+       /*
+        * This clears _PAGE_GLOBAL from the entire kernel image.
+        * pti_clone_kernel_text() map put _PAGE_GLOBAL back for
+        * areas that are mapped to userspace.
+        */
        set_memory_nonglobal(start, (end - start) >> PAGE_SHIFT);
 }
 
@@ -493,6 +599,28 @@ void __init pti_init(void)
 
        pr_info("enabled\n");
 
+#ifdef CONFIG_X86_32
+       /*
+        * We check for X86_FEATURE_PCID here. But the init-code will
+        * clear the feature flag on 32 bit because the feature is not
+        * supported on 32 bit anyway. To print the warning we need to
+        * check with cpuid directly again.
+        */
+       if (cpuid_ecx(0x1) & BIT(17)) {
+               /* Use printk to work around pr_fmt() */
+               printk(KERN_WARNING "\n");
+               printk(KERN_WARNING "************************************************************\n");
+               printk(KERN_WARNING "** WARNING! WARNING! WARNING! WARNING! WARNING! WARNING!  **\n");
+               printk(KERN_WARNING "**                                                        **\n");
+               printk(KERN_WARNING "** You are using 32-bit PTI on a 64-bit PCID-capable CPU. **\n");
+               printk(KERN_WARNING "** Your performance will increase dramatically if you     **\n");
+               printk(KERN_WARNING "** switch to a 64-bit kernel!                             **\n");
+               printk(KERN_WARNING "**                                                        **\n");
+               printk(KERN_WARNING "** WARNING! WARNING! WARNING! WARNING! WARNING! WARNING!  **\n");
+               printk(KERN_WARNING "************************************************************\n");
+       }
+#endif
+
        pti_clone_user_shared();
 
        /* Undo all global bits from the init pagetables in head_64.S: */
@@ -502,3 +630,22 @@ void __init pti_init(void)
        pti_setup_espfix64();
        pti_setup_vsyscall();
 }
+
+/*
+ * Finalize the kernel mappings in the userspace page-table. Some of the
+ * mappings for the kernel image might have changed since pti_init()
+ * cloned them. This is because parts of the kernel image have been
+ * mapped RO and/or NX.  These changes need to be cloned again to the
+ * userspace page-table.
+ */
+void pti_finalize(void)
+{
+       /*
+        * We need to clone everything (again) that maps parts of the
+        * kernel image.
+        */
+       pti_clone_entry_text();
+       pti_clone_kernel_text();
+
+       debug_checkwx_user();
+}
index 6eb1f34c3c858fcf72336a5e78c6b924f0179f7e..752dbf4e0e5071b9a71908822231ceda5940dabf 100644 (file)
@@ -7,6 +7,7 @@
 #include <linux/export.h>
 #include <linux/cpu.h>
 #include <linux/debugfs.h>
+#include <linux/gfp.h>
 
 #include <asm/tlbflush.h>
 #include <asm/mmu_context.h>
@@ -35,7 +36,7 @@
  * necessary invalidation by clearing out the 'ctx_id' which
  * forces a TLB flush when the context is loaded.
  */
-void clear_asid_other(void)
+static void clear_asid_other(void)
 {
        u16 asid;
 
@@ -185,8 +186,11 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
 {
        struct mm_struct *real_prev = this_cpu_read(cpu_tlbstate.loaded_mm);
        u16 prev_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid);
+       bool was_lazy = this_cpu_read(cpu_tlbstate.is_lazy);
        unsigned cpu = smp_processor_id();
        u64 next_tlb_gen;
+       bool need_flush;
+       u16 new_asid;
 
        /*
         * NB: The scheduler will call us with prev == next when switching
@@ -240,20 +244,41 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
                           next->context.ctx_id);
 
                /*
-                * We don't currently support having a real mm loaded without
-                * our cpu set in mm_cpumask().  We have all the bookkeeping
-                * in place to figure out whether we would need to flush
-                * if our cpu were cleared in mm_cpumask(), but we don't
-                * currently use it.
+                * Even in lazy TLB mode, the CPU should stay set in the
+                * mm_cpumask. The TLB shootdown code can figure out from
+                * from cpu_tlbstate.is_lazy whether or not to send an IPI.
                 */
                if (WARN_ON_ONCE(real_prev != &init_mm &&
                                 !cpumask_test_cpu(cpu, mm_cpumask(next))))
                        cpumask_set_cpu(cpu, mm_cpumask(next));
 
-               return;
+               /*
+                * If the CPU is not in lazy TLB mode, we are just switching
+                * from one thread in a process to another thread in the same
+                * process. No TLB flush required.
+                */
+               if (!was_lazy)
+                       return;
+
+               /*
+                * Read the tlb_gen to check whether a flush is needed.
+                * If the TLB is up to date, just use it.
+                * The barrier synchronizes with the tlb_gen increment in
+                * the TLB shootdown code.
+                */
+               smp_mb();
+               next_tlb_gen = atomic64_read(&next->context.tlb_gen);
+               if (this_cpu_read(cpu_tlbstate.ctxs[prev_asid].tlb_gen) ==
+                               next_tlb_gen)
+                       return;
+
+               /*
+                * TLB contents went out of date while we were in lazy
+                * mode. Fall through to the TLB switching code below.
+                */
+               new_asid = prev_asid;
+               need_flush = true;
        } else {
-               u16 new_asid;
-               bool need_flush;
                u64 last_ctx_id = this_cpu_read(cpu_tlbstate.last_ctx_id);
 
                /*
@@ -285,53 +310,60 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
                        sync_current_stack_to_mm(next);
                }
 
-               /* Stop remote flushes for the previous mm */
-               VM_WARN_ON_ONCE(!cpumask_test_cpu(cpu, mm_cpumask(real_prev)) &&
-                               real_prev != &init_mm);
-               cpumask_clear_cpu(cpu, mm_cpumask(real_prev));
+               /*
+                * Stop remote flushes for the previous mm.
+                * Skip kernel threads; we never send init_mm TLB flushing IPIs,
+                * but the bitmap manipulation can cause cache line contention.
+                */
+               if (real_prev != &init_mm) {
+                       VM_WARN_ON_ONCE(!cpumask_test_cpu(cpu,
+                                               mm_cpumask(real_prev)));
+                       cpumask_clear_cpu(cpu, mm_cpumask(real_prev));
+               }
 
                /*
                 * Start remote flushes and then read tlb_gen.
                 */
-               cpumask_set_cpu(cpu, mm_cpumask(next));
+               if (next != &init_mm)
+                       cpumask_set_cpu(cpu, mm_cpumask(next));
                next_tlb_gen = atomic64_read(&next->context.tlb_gen);
 
                choose_new_asid(next, next_tlb_gen, &new_asid, &need_flush);
+       }
 
-               if (need_flush) {
-                       this_cpu_write(cpu_tlbstate.ctxs[new_asid].ctx_id, next->context.ctx_id);
-                       this_cpu_write(cpu_tlbstate.ctxs[new_asid].tlb_gen, next_tlb_gen);
-                       load_new_mm_cr3(next->pgd, new_asid, true);
-
-                       /*
-                        * NB: This gets called via leave_mm() in the idle path
-                        * where RCU functions differently.  Tracing normally
-                        * uses RCU, so we need to use the _rcuidle variant.
-                        *
-                        * (There is no good reason for this.  The idle code should
-                        *  be rearranged to call this before rcu_idle_enter().)
-                        */
-                       trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
-               } else {
-                       /* The new ASID is already up to date. */
-                       load_new_mm_cr3(next->pgd, new_asid, false);
-
-                       /* See above wrt _rcuidle. */
-                       trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, 0);
-               }
+       if (need_flush) {
+               this_cpu_write(cpu_tlbstate.ctxs[new_asid].ctx_id, next->context.ctx_id);
+               this_cpu_write(cpu_tlbstate.ctxs[new_asid].tlb_gen, next_tlb_gen);
+               load_new_mm_cr3(next->pgd, new_asid, true);
 
                /*
-                * Record last user mm's context id, so we can avoid
-                * flushing branch buffer with IBPB if we switch back
-                * to the same user.
+                * NB: This gets called via leave_mm() in the idle path
+                * where RCU functions differently.  Tracing normally
+                * uses RCU, so we need to use the _rcuidle variant.
+                *
+                * (There is no good reason for this.  The idle code should
+                *  be rearranged to call this before rcu_idle_enter().)
                 */
-               if (next != &init_mm)
-                       this_cpu_write(cpu_tlbstate.last_ctx_id, next->context.ctx_id);
+               trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
+       } else {
+               /* The new ASID is already up to date. */
+               load_new_mm_cr3(next->pgd, new_asid, false);
 
-               this_cpu_write(cpu_tlbstate.loaded_mm, next);
-               this_cpu_write(cpu_tlbstate.loaded_mm_asid, new_asid);
+               /* See above wrt _rcuidle. */
+               trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, 0);
        }
 
+       /*
+        * Record last user mm's context id, so we can avoid
+        * flushing branch buffer with IBPB if we switch back
+        * to the same user.
+        */
+       if (next != &init_mm)
+               this_cpu_write(cpu_tlbstate.last_ctx_id, next->context.ctx_id);
+
+       this_cpu_write(cpu_tlbstate.loaded_mm, next);
+       this_cpu_write(cpu_tlbstate.loaded_mm_asid, new_asid);
+
        load_mm_cr4(next);
        switch_ldt(real_prev, next);
 }
@@ -354,20 +386,7 @@ void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
        if (this_cpu_read(cpu_tlbstate.loaded_mm) == &init_mm)
                return;
 
-       if (tlb_defer_switch_to_init_mm()) {
-               /*
-                * There's a significant optimization that may be possible
-                * here.  We have accurate enough TLB flush tracking that we
-                * don't need to maintain coherence of TLB per se when we're
-                * lazy.  We do, however, need to maintain coherence of
-                * paging-structure caches.  We could, in principle, leave our
-                * old mm loaded and only switch to init_mm when
-                * tlb_remove_page() happens.
-                */
-               this_cpu_write(cpu_tlbstate.is_lazy, true);
-       } else {
-               switch_mm(NULL, &init_mm, NULL);
-       }
+       this_cpu_write(cpu_tlbstate.is_lazy, true);
 }
 
 /*
@@ -454,6 +473,9 @@ static void flush_tlb_func_common(const struct flush_tlb_info *f,
                 * paging-structure cache to avoid speculatively reading
                 * garbage into our TLB.  Since switching to init_mm is barely
                 * slower than a minimal flush, just switch to init_mm.
+                *
+                * This should be rare, with native_flush_tlb_others skipping
+                * IPIs to lazy TLB mode CPUs.
                 */
                switch_mm_irqs_off(NULL, &init_mm, NULL);
                return;
@@ -560,6 +582,9 @@ static void flush_tlb_func_remote(void *info)
 void native_flush_tlb_others(const struct cpumask *cpumask,
                             const struct flush_tlb_info *info)
 {
+       cpumask_var_t lazymask;
+       unsigned int cpu;
+
        count_vm_tlb_event(NR_TLB_REMOTE_FLUSH);
        if (info->end == TLB_FLUSH_ALL)
                trace_tlb_flush(TLB_REMOTE_SEND_IPI, TLB_FLUSH_ALL);
@@ -583,8 +608,6 @@ void native_flush_tlb_others(const struct cpumask *cpumask,
                 * that UV should be updated so that smp_call_function_many(),
                 * etc, are optimal on UV.
                 */
-               unsigned int cpu;
-
                cpu = smp_processor_id();
                cpumask = uv_flush_tlb_others(cpumask, info);
                if (cpumask)
@@ -592,8 +615,29 @@ void native_flush_tlb_others(const struct cpumask *cpumask,
                                               (void *)info, 1);
                return;
        }
-       smp_call_function_many(cpumask, flush_tlb_func_remote,
+
+       /*
+        * A temporary cpumask is used in order to skip sending IPIs
+        * to CPUs in lazy TLB state, while keeping them in mm_cpumask(mm).
+        * If the allocation fails, simply IPI every CPU in mm_cpumask.
+        */
+       if (!alloc_cpumask_var(&lazymask, GFP_ATOMIC)) {
+               smp_call_function_many(cpumask, flush_tlb_func_remote,
                               (void *)info, 1);
+               return;
+       }
+
+       cpumask_copy(lazymask, cpumask);
+
+       for_each_cpu(cpu, lazymask) {
+               if (per_cpu(cpu_tlbstate.is_lazy, cpu))
+                       cpumask_clear_cpu(cpu, lazymask);
+       }
+
+       smp_call_function_many(lazymask, flush_tlb_func_remote,
+                              (void *)info, 1);
+
+       free_cpumask_var(lazymask);
 }
 
 /*
@@ -646,6 +690,68 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
        put_cpu();
 }
 
+void tlb_flush_remove_tables_local(void *arg)
+{
+       struct mm_struct *mm = arg;
+
+       if (this_cpu_read(cpu_tlbstate.loaded_mm) == mm &&
+                       this_cpu_read(cpu_tlbstate.is_lazy)) {
+               /*
+                * We're in lazy mode.  We need to at least flush our
+                * paging-structure cache to avoid speculatively reading
+                * garbage into our TLB.  Since switching to init_mm is barely
+                * slower than a minimal flush, just switch to init_mm.
+                */
+               switch_mm_irqs_off(NULL, &init_mm, NULL);
+       }
+}
+
+static void mm_fill_lazy_tlb_cpu_mask(struct mm_struct *mm,
+                                     struct cpumask *lazy_cpus)
+{
+       int cpu;
+
+       for_each_cpu(cpu, mm_cpumask(mm)) {
+               if (!per_cpu(cpu_tlbstate.is_lazy, cpu))
+                       cpumask_set_cpu(cpu, lazy_cpus);
+       }
+}
+
+void tlb_flush_remove_tables(struct mm_struct *mm)
+{
+       int cpu = get_cpu();
+       cpumask_var_t lazy_cpus;
+
+       if (cpumask_any_but(mm_cpumask(mm), cpu) >= nr_cpu_ids) {
+               put_cpu();
+               return;
+       }
+
+       if (!zalloc_cpumask_var(&lazy_cpus, GFP_ATOMIC)) {
+               /*
+                * If the cpumask allocation fails, do a brute force flush
+                * on all the CPUs that have this mm loaded.
+                */
+               smp_call_function_many(mm_cpumask(mm),
+                               tlb_flush_remove_tables_local, (void *)mm, 1);
+               put_cpu();
+               return;
+       }
+
+       /*
+        * CPUs with !is_lazy either received a TLB flush IPI while the user
+        * pages in this address range were unmapped, or have context switched
+        * and reloaded %CR3 since then.
+        *
+        * Shootdown IPIs at page table freeing time only need to be sent to
+        * CPUs that may have out of date TLB contents.
+        */
+       mm_fill_lazy_tlb_cpu_mask(mm, lazy_cpus);
+       smp_call_function_many(lazy_cpus,
+                               tlb_flush_remove_tables_local, (void *)mm, 1);
+       free_cpumask_var(lazy_cpus);
+       put_cpu();
+}
 
 static void do_flush_tlb_all(void *info)
 {
index 55799873ebe53375b8cf492af137461bba00b9b8..8f6cc71e08482b05ddb53efce7ee3b818b1061f2 100644 (file)
@@ -1441,8 +1441,8 @@ static void emit_prologue(u8 **pprog, u32 stack_depth)
 
        /* sub esp,STACK_SIZE */
        EMIT2_off32(0x81, 0xEC, STACK_SIZE);
-       /* sub ebp,SCRATCH_SIZE+4+12*/
-       EMIT3(0x83, add_1reg(0xE8, IA32_EBP), SCRATCH_SIZE + 16);
+       /* sub ebp,SCRATCH_SIZE+12*/
+       EMIT3(0x83, add_1reg(0xE8, IA32_EBP), SCRATCH_SIZE + 12);
        /* xor ebx,ebx */
        EMIT2(0x31, add_2reg(0xC0, IA32_EBX, IA32_EBX));
 
@@ -1475,8 +1475,8 @@ static void emit_epilogue(u8 **pprog, u32 stack_depth)
        /* mov edx,dword ptr [ebp+off]*/
        EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EDX), STACK_VAR(r0[1]));
 
-       /* add ebp,SCRATCH_SIZE+4+12*/
-       EMIT3(0x83, add_1reg(0xC0, IA32_EBP), SCRATCH_SIZE + 16);
+       /* add ebp,SCRATCH_SIZE+12*/
+       EMIT3(0x83, add_1reg(0xC0, IA32_EBP), SCRATCH_SIZE + 12);
 
        /* mov ebx,dword ptr [ebp-12]*/
        EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EBX), -12);
index e01f7ceb9e7a17436eb71634c5467bbb20a2a2de..ee5d08f25ce45f21aa81550ce317760a2c745900 100644 (file)
@@ -166,14 +166,14 @@ void __init efi_call_phys_epilog(pgd_t *save_pgd)
                pgd = pgd_offset_k(pgd_idx * PGDIR_SIZE);
                set_pgd(pgd_offset_k(pgd_idx * PGDIR_SIZE), save_pgd[pgd_idx]);
 
-               if (!(pgd_val(*pgd) & _PAGE_PRESENT))
+               if (!pgd_present(*pgd))
                        continue;
 
                for (i = 0; i < PTRS_PER_P4D; i++) {
                        p4d = p4d_offset(pgd,
                                         pgd_idx * PGDIR_SIZE + i * P4D_SIZE);
 
-                       if (!(p4d_val(*p4d) & _PAGE_PRESENT))
+                       if (!p4d_present(*p4d))
                                continue;
 
                        pud = (pud_t *)p4d_page_vaddr(*p4d);
@@ -417,7 +417,7 @@ static void __init __map_region(efi_memory_desc_t *md, u64 va)
        if (!(md->attribute & EFI_MEMORY_WB))
                flags |= _PAGE_PCD;
 
-       if (sev_active())
+       if (sev_active() && md->type != EFI_MEMORY_MAPPED_IO)
                flags |= _PAGE_ENC;
 
        pfn = md->phys_addr >> PAGE_SHIFT;
@@ -636,6 +636,8 @@ void efi_switch_mm(struct mm_struct *mm)
 #ifdef CONFIG_EFI_MIXED
 extern efi_status_t efi64_thunk(u32, ...);
 
+static DEFINE_SPINLOCK(efi_runtime_lock);
+
 #define runtime_service32(func)                                                 \
 ({                                                                      \
        u32 table = (u32)(unsigned long)efi.systab;                      \
@@ -657,17 +659,14 @@ extern efi_status_t efi64_thunk(u32, ...);
 #define efi_thunk(f, ...)                                              \
 ({                                                                     \
        efi_status_t __s;                                               \
-       unsigned long __flags;                                          \
        u32 __func;                                                     \
                                                                        \
-       local_irq_save(__flags);                                        \
        arch_efi_call_virt_setup();                                     \
                                                                        \
        __func = runtime_service32(f);                                  \
        __s = efi64_thunk(__func, __VA_ARGS__);                         \
                                                                        \
        arch_efi_call_virt_teardown();                                  \
-       local_irq_restore(__flags);                                     \
                                                                        \
        __s;                                                            \
 })
@@ -702,14 +701,17 @@ static efi_status_t efi_thunk_get_time(efi_time_t *tm, efi_time_cap_t *tc)
 {
        efi_status_t status;
        u32 phys_tm, phys_tc;
+       unsigned long flags;
 
        spin_lock(&rtc_lock);
+       spin_lock_irqsave(&efi_runtime_lock, flags);
 
        phys_tm = virt_to_phys_or_null(tm);
        phys_tc = virt_to_phys_or_null(tc);
 
        status = efi_thunk(get_time, phys_tm, phys_tc);
 
+       spin_unlock_irqrestore(&efi_runtime_lock, flags);
        spin_unlock(&rtc_lock);
 
        return status;
@@ -719,13 +721,16 @@ static efi_status_t efi_thunk_set_time(efi_time_t *tm)
 {
        efi_status_t status;
        u32 phys_tm;
+       unsigned long flags;
 
        spin_lock(&rtc_lock);
+       spin_lock_irqsave(&efi_runtime_lock, flags);
 
        phys_tm = virt_to_phys_or_null(tm);
 
        status = efi_thunk(set_time, phys_tm);
 
+       spin_unlock_irqrestore(&efi_runtime_lock, flags);
        spin_unlock(&rtc_lock);
 
        return status;
@@ -737,8 +742,10 @@ efi_thunk_get_wakeup_time(efi_bool_t *enabled, efi_bool_t *pending,
 {
        efi_status_t status;
        u32 phys_enabled, phys_pending, phys_tm;
+       unsigned long flags;
 
        spin_lock(&rtc_lock);
+       spin_lock_irqsave(&efi_runtime_lock, flags);
 
        phys_enabled = virt_to_phys_or_null(enabled);
        phys_pending = virt_to_phys_or_null(pending);
@@ -747,6 +754,7 @@ efi_thunk_get_wakeup_time(efi_bool_t *enabled, efi_bool_t *pending,
        status = efi_thunk(get_wakeup_time, phys_enabled,
                             phys_pending, phys_tm);
 
+       spin_unlock_irqrestore(&efi_runtime_lock, flags);
        spin_unlock(&rtc_lock);
 
        return status;
@@ -757,13 +765,16 @@ efi_thunk_set_wakeup_time(efi_bool_t enabled, efi_time_t *tm)
 {
        efi_status_t status;
        u32 phys_tm;
+       unsigned long flags;
 
        spin_lock(&rtc_lock);
+       spin_lock_irqsave(&efi_runtime_lock, flags);
 
        phys_tm = virt_to_phys_or_null(tm);
 
        status = efi_thunk(set_wakeup_time, enabled, phys_tm);
 
+       spin_unlock_irqrestore(&efi_runtime_lock, flags);
        spin_unlock(&rtc_lock);
 
        return status;
@@ -781,6 +792,9 @@ efi_thunk_get_variable(efi_char16_t *name, efi_guid_t *vendor,
        efi_status_t status;
        u32 phys_name, phys_vendor, phys_attr;
        u32 phys_data_size, phys_data;
+       unsigned long flags;
+
+       spin_lock_irqsave(&efi_runtime_lock, flags);
 
        phys_data_size = virt_to_phys_or_null(data_size);
        phys_vendor = virt_to_phys_or_null(vendor);
@@ -791,6 +805,8 @@ efi_thunk_get_variable(efi_char16_t *name, efi_guid_t *vendor,
        status = efi_thunk(get_variable, phys_name, phys_vendor,
                           phys_attr, phys_data_size, phys_data);
 
+       spin_unlock_irqrestore(&efi_runtime_lock, flags);
+
        return status;
 }
 
@@ -800,6 +816,34 @@ efi_thunk_set_variable(efi_char16_t *name, efi_guid_t *vendor,
 {
        u32 phys_name, phys_vendor, phys_data;
        efi_status_t status;
+       unsigned long flags;
+
+       spin_lock_irqsave(&efi_runtime_lock, flags);
+
+       phys_name = virt_to_phys_or_null_size(name, efi_name_size(name));
+       phys_vendor = virt_to_phys_or_null(vendor);
+       phys_data = virt_to_phys_or_null_size(data, data_size);
+
+       /* If data_size is > sizeof(u32) we've got problems */
+       status = efi_thunk(set_variable, phys_name, phys_vendor,
+                          attr, data_size, phys_data);
+
+       spin_unlock_irqrestore(&efi_runtime_lock, flags);
+
+       return status;
+}
+
+static efi_status_t
+efi_thunk_set_variable_nonblocking(efi_char16_t *name, efi_guid_t *vendor,
+                                  u32 attr, unsigned long data_size,
+                                  void *data)
+{
+       u32 phys_name, phys_vendor, phys_data;
+       efi_status_t status;
+       unsigned long flags;
+
+       if (!spin_trylock_irqsave(&efi_runtime_lock, flags))
+               return EFI_NOT_READY;
 
        phys_name = virt_to_phys_or_null_size(name, efi_name_size(name));
        phys_vendor = virt_to_phys_or_null(vendor);
@@ -809,6 +853,8 @@ efi_thunk_set_variable(efi_char16_t *name, efi_guid_t *vendor,
        status = efi_thunk(set_variable, phys_name, phys_vendor,
                           attr, data_size, phys_data);
 
+       spin_unlock_irqrestore(&efi_runtime_lock, flags);
+
        return status;
 }
 
@@ -819,6 +865,9 @@ efi_thunk_get_next_variable(unsigned long *name_size,
 {
        efi_status_t status;
        u32 phys_name_size, phys_name, phys_vendor;
+       unsigned long flags;
+
+       spin_lock_irqsave(&efi_runtime_lock, flags);
 
        phys_name_size = virt_to_phys_or_null(name_size);
        phys_vendor = virt_to_phys_or_null(vendor);
@@ -827,6 +876,8 @@ efi_thunk_get_next_variable(unsigned long *name_size,
        status = efi_thunk(get_next_variable, phys_name_size,
                           phys_name, phys_vendor);
 
+       spin_unlock_irqrestore(&efi_runtime_lock, flags);
+
        return status;
 }
 
@@ -835,10 +886,15 @@ efi_thunk_get_next_high_mono_count(u32 *count)
 {
        efi_status_t status;
        u32 phys_count;
+       unsigned long flags;
+
+       spin_lock_irqsave(&efi_runtime_lock, flags);
 
        phys_count = virt_to_phys_or_null(count);
        status = efi_thunk(get_next_high_mono_count, phys_count);
 
+       spin_unlock_irqrestore(&efi_runtime_lock, flags);
+
        return status;
 }
 
@@ -847,10 +903,15 @@ efi_thunk_reset_system(int reset_type, efi_status_t status,
                       unsigned long data_size, efi_char16_t *data)
 {
        u32 phys_data;
+       unsigned long flags;
+
+       spin_lock_irqsave(&efi_runtime_lock, flags);
 
        phys_data = virt_to_phys_or_null_size(data, data_size);
 
        efi_thunk(reset_system, reset_type, status, data_size, phys_data);
+
+       spin_unlock_irqrestore(&efi_runtime_lock, flags);
 }
 
 static efi_status_t
@@ -872,10 +933,40 @@ efi_thunk_query_variable_info(u32 attr, u64 *storage_space,
 {
        efi_status_t status;
        u32 phys_storage, phys_remaining, phys_max;
+       unsigned long flags;
+
+       if (efi.runtime_version < EFI_2_00_SYSTEM_TABLE_REVISION)
+               return EFI_UNSUPPORTED;
+
+       spin_lock_irqsave(&efi_runtime_lock, flags);
+
+       phys_storage = virt_to_phys_or_null(storage_space);
+       phys_remaining = virt_to_phys_or_null(remaining_space);
+       phys_max = virt_to_phys_or_null(max_variable_size);
+
+       status = efi_thunk(query_variable_info, attr, phys_storage,
+                          phys_remaining, phys_max);
+
+       spin_unlock_irqrestore(&efi_runtime_lock, flags);
+
+       return status;
+}
+
+static efi_status_t
+efi_thunk_query_variable_info_nonblocking(u32 attr, u64 *storage_space,
+                                         u64 *remaining_space,
+                                         u64 *max_variable_size)
+{
+       efi_status_t status;
+       u32 phys_storage, phys_remaining, phys_max;
+       unsigned long flags;
 
        if (efi.runtime_version < EFI_2_00_SYSTEM_TABLE_REVISION)
                return EFI_UNSUPPORTED;
 
+       if (!spin_trylock_irqsave(&efi_runtime_lock, flags))
+               return EFI_NOT_READY;
+
        phys_storage = virt_to_phys_or_null(storage_space);
        phys_remaining = virt_to_phys_or_null(remaining_space);
        phys_max = virt_to_phys_or_null(max_variable_size);
@@ -883,6 +974,8 @@ efi_thunk_query_variable_info(u32 attr, u64 *storage_space,
        status = efi_thunk(query_variable_info, attr, phys_storage,
                           phys_remaining, phys_max);
 
+       spin_unlock_irqrestore(&efi_runtime_lock, flags);
+
        return status;
 }
 
@@ -908,9 +1001,11 @@ void efi_thunk_runtime_setup(void)
        efi.get_variable = efi_thunk_get_variable;
        efi.get_next_variable = efi_thunk_get_next_variable;
        efi.set_variable = efi_thunk_set_variable;
+       efi.set_variable_nonblocking = efi_thunk_set_variable_nonblocking;
        efi.get_next_high_mono_count = efi_thunk_get_next_high_mono_count;
        efi.reset_system = efi_thunk_reset_system;
        efi.query_variable_info = efi_thunk_query_variable_info;
+       efi.query_variable_info_nonblocking = efi_thunk_query_variable_info_nonblocking;
        efi.update_capsule = efi_thunk_update_capsule;
        efi.query_capsule_caps = efi_thunk_query_capsule_caps;
 }
index 36c1f8b9f7e0c2df3dec1828e9f786f9d6c1ecfb..844d31cb8a0c7eae1dcb37ed48fa373564e83f22 100644 (file)
@@ -105,12 +105,11 @@ early_param("efi_no_storage_paranoia", setup_storage_paranoia);
 */
 void efi_delete_dummy_variable(void)
 {
-       efi.set_variable((efi_char16_t *)efi_dummy_name,
-                        &EFI_DUMMY_GUID,
-                        EFI_VARIABLE_NON_VOLATILE |
-                        EFI_VARIABLE_BOOTSERVICE_ACCESS |
-                        EFI_VARIABLE_RUNTIME_ACCESS,
-                        0, NULL);
+       efi.set_variable_nonblocking((efi_char16_t *)efi_dummy_name,
+                                    &EFI_DUMMY_GUID,
+                                    EFI_VARIABLE_NON_VOLATILE |
+                                    EFI_VARIABLE_BOOTSERVICE_ACCESS |
+                                    EFI_VARIABLE_RUNTIME_ACCESS, 0, NULL);
 }
 
 /*
@@ -249,7 +248,8 @@ void __init efi_arch_mem_reserve(phys_addr_t addr, u64 size)
        int num_entries;
        void *new;
 
-       if (efi_mem_desc_lookup(addr, &md)) {
+       if (efi_mem_desc_lookup(addr, &md) ||
+           md.type != EFI_BOOT_SERVICES_DATA) {
                pr_err("Failed to lookup EFI memory descriptor for %pa\n", &addr);
                return;
        }
index fa021dfab0882122189b51d15d32df8c7f503849..5cf886c867c243166509c5a3e9d85512145a3e5b 100644 (file)
@@ -1,4 +1,4 @@
-obj-$(CONFIG_X86_INTEL_MID) += intel-mid.o intel_mid_vrtc.o mfld.o mrfld.o pwr.o
+obj-$(CONFIG_X86_INTEL_MID) += intel-mid.o intel_mid_vrtc.o pwr.o
 
 # SFI specific code
 ifdef CONFIG_X86_INTEL_MID
index 2ebdf31d999637b5649abc2371302f9aa92a8eac..56f66eafb94fd5b8d4e2a321525309b2a41e9bd6 100644 (file)
@@ -36,8 +36,6 @@
 #include <asm/apb_timer.h>
 #include <asm/reboot.h>
 
-#include "intel_mid_weak_decls.h"
-
 /*
  * the clockevent devices on Moorestown/Medfield can be APBT or LAPIC clock,
  * cmdline option x86_intel_mid_timer can be used to override the configuration
 
 enum intel_mid_timer_options intel_mid_timer_options;
 
-/* intel_mid_ops to store sub arch ops */
-static struct intel_mid_ops *intel_mid_ops;
-/* getter function for sub arch ops*/
-static void *(*get_intel_mid_ops[])(void) = INTEL_MID_OPS_INIT;
 enum intel_mid_cpu_type __intel_mid_cpu_chip;
 EXPORT_SYMBOL_GPL(__intel_mid_cpu_chip);
 
@@ -82,11 +76,6 @@ static void intel_mid_reboot(void)
        intel_scu_ipc_simple_command(IPCMSG_COLD_RESET, 0);
 }
 
-static unsigned long __init intel_mid_calibrate_tsc(void)
-{
-       return 0;
-}
-
 static void __init intel_mid_setup_bp_timer(void)
 {
        apbt_time_init();
@@ -133,6 +122,7 @@ static void intel_mid_arch_setup(void)
        case 0x3C:
        case 0x4A:
                __intel_mid_cpu_chip = INTEL_MID_CPU_CHIP_TANGIER;
+               x86_platform.legacy.rtc = 1;
                break;
        case 0x27:
        default:
@@ -140,17 +130,7 @@ static void intel_mid_arch_setup(void)
                break;
        }
 
-       if (__intel_mid_cpu_chip < MAX_CPU_OPS(get_intel_mid_ops))
-               intel_mid_ops = get_intel_mid_ops[__intel_mid_cpu_chip]();
-       else {
-               intel_mid_ops = get_intel_mid_ops[INTEL_MID_CPU_CHIP_PENWELL]();
-               pr_info("ARCH: Unknown SoC, assuming Penwell!\n");
-       }
-
 out:
-       if (intel_mid_ops->arch_setup)
-               intel_mid_ops->arch_setup();
-
        /*
         * Intel MID platforms are using explicitly defined regulators.
         *
@@ -191,7 +171,6 @@ void __init x86_intel_mid_early_setup(void)
 
        x86_cpuinit.setup_percpu_clockev = apbt_setup_secondary_clock;
 
-       x86_platform.calibrate_tsc = intel_mid_calibrate_tsc;
        x86_platform.get_nmi_reason = intel_mid_get_nmi_reason;
 
        x86_init.pci.arch_init = intel_mid_pci_init;
diff --git a/arch/x86/platform/intel-mid/intel_mid_weak_decls.h b/arch/x86/platform/intel-mid/intel_mid_weak_decls.h
deleted file mode 100644 (file)
index 3c1c386..0000000
+++ /dev/null
@@ -1,18 +0,0 @@
-/*
- * intel_mid_weak_decls.h: Weak declarations of intel-mid.c
- *
- * (C) Copyright 2013 Intel Corporation
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; version 2
- * of the License.
- */
-
-
-/* For every CPU addition a new get_<cpuname>_ops interface needs
- * to be added.
- */
-extern void *get_penwell_ops(void);
-extern void *get_cloverview_ops(void);
-extern void *get_tangier_ops(void);
diff --git a/arch/x86/platform/intel-mid/mfld.c b/arch/x86/platform/intel-mid/mfld.c
deleted file mode 100644 (file)
index e42978d..0000000
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- * mfld.c: Intel Medfield platform setup code
- *
- * (C) Copyright 2013 Intel Corporation
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; version 2
- * of the License.
- */
-
-#include <linux/init.h>
-
-#include <asm/apic.h>
-#include <asm/intel-mid.h>
-#include <asm/intel_mid_vrtc.h>
-
-#include "intel_mid_weak_decls.h"
-
-static unsigned long __init mfld_calibrate_tsc(void)
-{
-       unsigned long fast_calibrate;
-       u32 lo, hi, ratio, fsb;
-
-       rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
-       pr_debug("IA32 perf status is 0x%x, 0x%0x\n", lo, hi);
-       ratio = (hi >> 8) & 0x1f;
-       pr_debug("ratio is %d\n", ratio);
-       if (!ratio) {
-               pr_err("read a zero ratio, should be incorrect!\n");
-               pr_err("force tsc ratio to 16 ...\n");
-               ratio = 16;
-       }
-       rdmsr(MSR_FSB_FREQ, lo, hi);
-       if ((lo & 0x7) == 0x7)
-               fsb = FSB_FREQ_83SKU;
-       else
-               fsb = FSB_FREQ_100SKU;
-       fast_calibrate = ratio * fsb;
-       pr_debug("read penwell tsc %lu khz\n", fast_calibrate);
-       lapic_timer_frequency = fsb * 1000 / HZ;
-
-       /*
-        * TSC on Intel Atom SoCs is reliable and of known frequency.
-        * See tsc_msr.c for details.
-        */
-       setup_force_cpu_cap(X86_FEATURE_TSC_KNOWN_FREQ);
-       setup_force_cpu_cap(X86_FEATURE_TSC_RELIABLE);
-
-       return fast_calibrate;
-}
-
-static void __init penwell_arch_setup(void)
-{
-       x86_platform.calibrate_tsc = mfld_calibrate_tsc;
-}
-
-static struct intel_mid_ops penwell_ops = {
-       .arch_setup = penwell_arch_setup,
-};
-
-void *get_penwell_ops(void)
-{
-       return &penwell_ops;
-}
-
-void *get_cloverview_ops(void)
-{
-       return &penwell_ops;
-}
diff --git a/arch/x86/platform/intel-mid/mrfld.c b/arch/x86/platform/intel-mid/mrfld.c
deleted file mode 100644 (file)
index ae7bdeb..0000000
+++ /dev/null
@@ -1,105 +0,0 @@
-/*
- * Intel Merrifield platform specific setup code
- *
- * (C) Copyright 2013 Intel Corporation
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; version 2
- * of the License.
- */
-
-#include <linux/init.h>
-
-#include <asm/apic.h>
-#include <asm/intel-mid.h>
-
-#include "intel_mid_weak_decls.h"
-
-static unsigned long __init tangier_calibrate_tsc(void)
-{
-       unsigned long fast_calibrate;
-       u32 lo, hi, ratio, fsb, bus_freq;
-
-       /* *********************** */
-       /* Compute TSC:Ratio * FSB */
-       /* *********************** */
-
-       /* Compute Ratio */
-       rdmsr(MSR_PLATFORM_INFO, lo, hi);
-       pr_debug("IA32 PLATFORM_INFO is 0x%x : %x\n", hi, lo);
-
-       ratio = (lo >> 8) & 0xFF;
-       pr_debug("ratio is %d\n", ratio);
-       if (!ratio) {
-               pr_err("Read a zero ratio, force tsc ratio to 4 ...\n");
-               ratio = 4;
-       }
-
-       /* Compute FSB */
-       rdmsr(MSR_FSB_FREQ, lo, hi);
-       pr_debug("Actual FSB frequency detected by SOC 0x%x : %x\n",
-                       hi, lo);
-
-       bus_freq = lo & 0x7;
-       pr_debug("bus_freq = 0x%x\n", bus_freq);
-
-       if (bus_freq == 0)
-               fsb = FSB_FREQ_100SKU;
-       else if (bus_freq == 1)
-               fsb = FSB_FREQ_100SKU;
-       else if (bus_freq == 2)
-               fsb = FSB_FREQ_133SKU;
-       else if (bus_freq == 3)
-               fsb = FSB_FREQ_167SKU;
-       else if (bus_freq == 4)
-               fsb = FSB_FREQ_83SKU;
-       else if (bus_freq == 5)
-               fsb = FSB_FREQ_400SKU;
-       else if (bus_freq == 6)
-               fsb = FSB_FREQ_267SKU;
-       else if (bus_freq == 7)
-               fsb = FSB_FREQ_333SKU;
-       else {
-               BUG();
-               pr_err("Invalid bus_freq! Setting to minimal value!\n");
-               fsb = FSB_FREQ_100SKU;
-       }
-
-       /* TSC = FSB Freq * Resolved HFM Ratio */
-       fast_calibrate = ratio * fsb;
-       pr_debug("calculate tangier tsc %lu KHz\n", fast_calibrate);
-
-       /* ************************************ */
-       /* Calculate Local APIC Timer Frequency */
-       /* ************************************ */
-       lapic_timer_frequency = (fsb * 1000) / HZ;
-
-       pr_debug("Setting lapic_timer_frequency = %d\n",
-                       lapic_timer_frequency);
-
-       /*
-        * TSC on Intel Atom SoCs is reliable and of known frequency.
-        * See tsc_msr.c for details.
-        */
-       setup_force_cpu_cap(X86_FEATURE_TSC_KNOWN_FREQ);
-       setup_force_cpu_cap(X86_FEATURE_TSC_RELIABLE);
-
-       return fast_calibrate;
-}
-
-static void __init tangier_arch_setup(void)
-{
-       x86_platform.calibrate_tsc = tangier_calibrate_tsc;
-       x86_platform.legacy.rtc = 1;
-}
-
-/* tangier arch ops */
-static struct intel_mid_ops tangier_ops = {
-       .arch_setup = tangier_arch_setup,
-};
-
-void *get_tangier_ops(void)
-{
-       return &tangier_ops;
-}
index 7c3077e58fa02f340e21f901ea0315716574ac62..f0e920fb98ad01f8657fd58c519e00a54f09bc99 100644 (file)
@@ -311,10 +311,8 @@ static int __init add_xo1_platform_devices(void)
                return PTR_ERR(pdev);
 
        pdev = platform_device_register_simple("olpc-xo1", -1, NULL, 0);
-       if (IS_ERR(pdev))
-               return PTR_ERR(pdev);
 
-       return 0;
+       return PTR_ERR_OR_ZERO(pdev);
 }
 
 static int olpc_xo1_ec_probe(struct platform_device *pdev)
index ca446da48fd2872e262b789ebbfe12c2ec1d4976..e26dfad507c85a5a12300c3c058ee92ab13e63fa 100644 (file)
@@ -1607,8 +1607,6 @@ static int parse_tunables_write(struct bau_control *bcp, char *instr,
                                *tunables[cnt].tunp = val;
                        continue;
                }
-               if (q == p)
-                       break;
        }
        return 0;
 }
index ce8da3a0412cbb1a715b56e4c2f41cc431fe9965..fd369a6e9ff8ce64be071448cb45f46e0de4d1fb 100644 (file)
@@ -137,7 +137,7 @@ ENTRY(restore_registers)
        /* Saved in save_processor_state. */
        lgdt    saved_context_gdt_desc(%rax)
 
-       xorq    %rax, %rax
+       xorl    %eax, %eax
 
        /* tell the hibernation core that we've just restored the memory */
        movq    %rax, in_suspend(%rip)
index 2e9ee023e6bcff25055bf5e05b0fc597d75f49bb..81a8e33115ad5b72d53fc0829930d60238dbb0c2 100644 (file)
@@ -6,7 +6,7 @@ purgatory-y := purgatory.o stack.o setup-x86_$(BITS).o sha256.o entry64.o string
 targets += $(purgatory-y)
 PURGATORY_OBJS = $(addprefix $(obj)/,$(purgatory-y))
 
-$(obj)/sha256.o: $(srctree)/lib/sha256.c
+$(obj)/sha256.o: $(srctree)/lib/sha256.c FORCE
        $(call if_changed_rule,cc_o_c)
 
 LDFLAGS_purgatory.ro := -e purgatory_start -r --no-undefined -nostdlib -z nodefaultlib
index 220e97841e494c41e21b242a053b9ffcefeb2b4a..3a6c8ebc8032eb5c95e70af2e5e4f44000e29350 100644 (file)
@@ -67,6 +67,7 @@ static const char * const sym_regex_kernel[S_NSYMTYPES] = {
        "__tracedata_(start|end)|"
        "__(start|stop)_notes|"
        "__end_rodata|"
+       "__end_rodata_aligned|"
        "__initramfs_start|"
        "(jiffies|jiffies_64)|"
 #if ELF_BITS == 64
index 744afdc18cf3a0ee8fab8624520fa8b2db2d2fd5..56c44d865f7bedb628aba3f0de218a1b5dcb9ccd 100644 (file)
@@ -16,7 +16,7 @@ static int __init gate_vma_init(void)
        if (!FIXADDR_USER_START)
                return 0;
 
-       gate_vma.vm_mm = NULL;
+       vma_init(&gate_vma, NULL);
        gate_vma.vm_start = FIXADDR_USER_START;
        gate_vma.vm_end = FIXADDR_USER_END;
        gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
index 9cac6d0721998e1956ff16a4c4c5d7b88b98adff..f8b69d84238eb4f7743c23b425e1a4900cb9cd7f 100644 (file)
@@ -1,2 +1 @@
-vdso-syms.lds
 vdso.lds
index b2d6967262b2ee328aefd28b29a8694b100c1214..822ccdba93adac4cef571cfd0308c871c3bc28f7 100644 (file)
@@ -53,22 +53,6 @@ $(vobjs): KBUILD_CFLAGS += $(CFL)
 CFLAGS_REMOVE_vdso-note.o = -pg -fprofile-arcs -ftest-coverage
 CFLAGS_REMOVE_um_vdso.o = -pg -fprofile-arcs -ftest-coverage
 
-targets += vdso-syms.lds
-extra-$(VDSO64-y)                      += vdso-syms.lds
-
-#
-# Match symbols in the DSO that look like VDSO*; produce a file of constants.
-#
-sed-vdsosym := -e 's/^00*/0/' \
-       -e 's/^\([0-9a-fA-F]*\) . \(VDSO[a-zA-Z0-9_]*\)$$/\2 = 0x\1;/p'
-quiet_cmd_vdsosym = VDSOSYM $@
-define cmd_vdsosym
-       $(NM) $< | LC_ALL=C sed -n $(sed-vdsosym) | LC_ALL=C sort > $@
-endef
-
-$(obj)/%-syms.lds: $(obj)/%.so.dbg FORCE
-       $(call if_changed,vdsosym)
-
 #
 # The DSO images are built using a special linker script.
 #
index c9081c6671f0b7a05ecfaaf206e7e1ed2b1f456a..3b5318505c69c487f8cfc9c46c93c526197caef6 100644 (file)
@@ -64,6 +64,13 @@ struct shared_info xen_dummy_shared_info;
 __read_mostly int xen_have_vector_callback;
 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
 
+/*
+ * NB: needs to live in .data because it's used by xen_prepare_pvh which runs
+ * before clearing the bss.
+ */
+uint32_t xen_start_flags __attribute__((section(".data"))) = 0;
+EXPORT_SYMBOL(xen_start_flags);
+
 /*
  * Point at some empty memory to start with. We map the real shared_info
  * page as soon as fixmap is up and running.
index 357969a3697cc7af6e08c12144ec06f43a8841ad..105a57d73701b986bab193fc76f62019762d918f 100644 (file)
@@ -119,6 +119,27 @@ static void __init xen_banner(void)
               version >> 16, version & 0xffff, extra.extraversion,
               xen_feature(XENFEAT_mmu_pt_update_preserve_ad) ? " (preserve-AD)" : "");
 }
+
+static void __init xen_pv_init_platform(void)
+{
+       set_fixmap(FIX_PARAVIRT_BOOTMAP, xen_start_info->shared_info);
+       HYPERVISOR_shared_info = (void *)fix_to_virt(FIX_PARAVIRT_BOOTMAP);
+
+       /* xen clock uses per-cpu vcpu_info, need to init it for boot cpu */
+       xen_vcpu_info_reset(0);
+
+       /* pvclock is in shared info area */
+       xen_init_time_ops();
+}
+
+static void __init xen_pv_guest_late_init(void)
+{
+#ifndef CONFIG_SMP
+       /* Setup shared vcpu info for non-smp configurations */
+       xen_setup_vcpu_info_placement();
+#endif
+}
+
 /* Check if running on Xen version (major, minor) or later */
 bool
 xen_running_on_version_or_later(unsigned int major, unsigned int minor)
@@ -947,34 +968,8 @@ static void xen_write_msr(unsigned int msr, unsigned low, unsigned high)
        xen_write_msr_safe(msr, low, high);
 }
 
-void xen_setup_shared_info(void)
-{
-       set_fixmap(FIX_PARAVIRT_BOOTMAP, xen_start_info->shared_info);
-
-       HYPERVISOR_shared_info =
-               (struct shared_info *)fix_to_virt(FIX_PARAVIRT_BOOTMAP);
-
-       xen_setup_mfn_list_list();
-
-       if (system_state == SYSTEM_BOOTING) {
-#ifndef CONFIG_SMP
-               /*
-                * In UP this is as good a place as any to set up shared info.
-                * Limit this to boot only, at restore vcpu setup is done via
-                * xen_vcpu_restore().
-                */
-               xen_setup_vcpu_info_placement();
-#endif
-               /*
-                * Now that shared info is set up we can start using routines
-                * that point to pvclock area.
-                */
-               xen_init_time_ops();
-       }
-}
-
 /* This is called once we have the cpu_possible_mask */
-void __ref xen_setup_vcpu_info_placement(void)
+void __init xen_setup_vcpu_info_placement(void)
 {
        int cpu;
 
@@ -1203,15 +1198,24 @@ asmlinkage __visible void __init xen_start_kernel(void)
                return;
 
        xen_domain_type = XEN_PV_DOMAIN;
+       xen_start_flags = xen_start_info->flags;
 
        xen_setup_features();
 
-       xen_setup_machphys_mapping();
-
        /* Install Xen paravirt ops */
        pv_info = xen_info;
        pv_init_ops.patch = paravirt_patch_default;
        pv_cpu_ops = xen_cpu_ops;
+       xen_init_irq_ops();
+
+       /*
+        * Setup xen_vcpu early because it is needed for
+        * local_irq_disable(), irqs_disabled(), e.g. in printk().
+        *
+        * Don't do the full vcpu_info placement stuff until we have
+        * the cpu_possible_mask and a non-dummy shared_info.
+        */
+       xen_vcpu_info_reset(0);
 
        x86_platform.get_nmi_reason = xen_get_nmi_reason;
 
@@ -1219,15 +1223,19 @@ asmlinkage __visible void __init xen_start_kernel(void)
        x86_init.irqs.intr_mode_init    = x86_init_noop;
        x86_init.oem.arch_setup = xen_arch_setup;
        x86_init.oem.banner = xen_banner;
+       x86_init.hyper.init_platform = xen_pv_init_platform;
+       x86_init.hyper.guest_late_init = xen_pv_guest_late_init;
 
        /*
         * Set up some pagetable state before starting to set any ptes.
         */
 
+       xen_setup_machphys_mapping();
        xen_init_mmu_ops();
 
        /* Prevent unwanted bits from being set in PTEs. */
        __supported_pte_mask &= ~_PAGE_GLOBAL;
+       __default_kernel_pte_mask &= ~_PAGE_GLOBAL;
 
        /*
         * Prevent page tables from being allocated in highmem, even
@@ -1248,20 +1256,9 @@ asmlinkage __visible void __init xen_start_kernel(void)
        get_cpu_cap(&boot_cpu_data);
        x86_configure_nx();
 
-       xen_init_irq_ops();
-
        /* Let's presume PV guests always boot on vCPU with id 0. */
        per_cpu(xen_vcpu_id, 0) = 0;
 
-       /*
-        * Setup xen_vcpu early because idt_setup_early_handler needs it for
-        * local_irq_disable(), irqs_disabled().
-        *
-        * Don't do the full vcpu_info placement stuff until we have
-        * the cpu_possible_mask and a non-dummy shared_info.
-        */
-       xen_vcpu_info_reset(0);
-
        idt_setup_early_handler();
 
        xen_init_capabilities();
index aa1c6a6831a94dd383e11c575a2a0c91f32136b5..c85d1a88f47693232369411588cfc19084086b25 100644 (file)
@@ -97,6 +97,7 @@ void __init xen_prepare_pvh(void)
        }
 
        xen_pvh = 1;
+       xen_start_flags = pvh_start_info.flags;
 
        msr = cpuid_ebx(xen_cpuid_base() + 2);
        pfn = __pa(hypercall_page);
index 74179852e46c31108adf405e86230c3830add94a..7515a19fd324b54e15d5b6deb632e385913ce4fa 100644 (file)
@@ -128,8 +128,6 @@ static const struct pv_irq_ops xen_irq_ops __initconst = {
 
 void __init xen_init_irq_ops(void)
 {
-       /* For PVH we use default pv_irq_ops settings. */
-       if (!xen_feature(XENFEAT_hvm_callback_vector))
-               pv_irq_ops = xen_irq_ops;
+       pv_irq_ops = xen_irq_ops;
        x86_init.irqs.intr_init = xen_init_IRQ;
 }
index 2c30cabfda90fb94b36c88f5f35aef91d8f119d0..52206ad81e4bcabe5d1173781397fdfa7c6d2b3a 100644 (file)
@@ -1230,8 +1230,7 @@ static void __init xen_pagetable_p2m_free(void)
         * We roundup to the PMD, which means that if anybody at this stage is
         * using the __ka address of xen_start_info or
         * xen_start_info->shared_info they are in going to crash. Fortunatly
-        * we have already revectored in xen_setup_kernel_pagetable and in
-        * xen_setup_shared_info.
+        * we have already revectored in xen_setup_kernel_pagetable.
         */
        size = roundup(size, PMD_SIZE);
 
@@ -1292,8 +1291,7 @@ static void __init xen_pagetable_init(void)
 
        /* Remap memory freed due to conflicts with E820 map */
        xen_remap_memory();
-
-       xen_setup_shared_info();
+       xen_setup_mfn_list_list();
 }
 static void xen_write_cr2(unsigned long cr2)
 {
index 2e20ae2fa2d6c3b865f2c745ad9896a752954907..e3b18ad49889afc5ae35d2e2796aecd108a93819 100644 (file)
@@ -32,6 +32,7 @@
 #include <xen/interface/vcpu.h>
 #include <xen/interface/xenpmu.h>
 
+#include <asm/spec-ctrl.h>
 #include <asm/xen/interface.h>
 #include <asm/xen/hypercall.h>
 
@@ -70,6 +71,8 @@ static void cpu_bringup(void)
        cpu_data(cpu).x86_max_cores = 1;
        set_cpu_sibling_map(cpu);
 
+       speculative_store_bypass_ht_init();
+
        xen_setup_cpu_clockevents();
 
        notify_cpu_starting(cpu);
@@ -250,6 +253,8 @@ static void __init xen_pv_smp_prepare_cpus(unsigned int max_cpus)
        }
        set_cpu_sibling_map(0);
 
+       speculative_store_bypass_ht_init();
+
        xen_pmu_init(0);
 
        if (xen_smp_intr_init(0) || xen_smp_intr_init_pv(0))
index a2e0f110af56ddac120b382691dcf61e70e3db14..8303b58c79a983085235a29c1b6aa653f11d1091 100644 (file)
@@ -27,8 +27,9 @@ void xen_pv_pre_suspend(void)
 void xen_pv_post_suspend(int suspend_cancelled)
 {
        xen_build_mfn_list_list();
-
-       xen_setup_shared_info();
+       set_fixmap(FIX_PARAVIRT_BOOTMAP, xen_start_info->shared_info);
+       HYPERVISOR_shared_info = (void *)fix_to_virt(FIX_PARAVIRT_BOOTMAP);
+       xen_setup_mfn_list_list();
 
        if (suspend_cancelled) {
                xen_start_info->store_mfn =
index e0f1bcf01d639a6e845c1f25eb6ac85ccfac66c0..c84f1e039d849210b1e04517f62b9cce3ab2bddc 100644 (file)
@@ -31,6 +31,8 @@
 /* Xen may fire a timer up to this many ns early */
 #define TIMER_SLOP     100000
 
+static u64 xen_sched_clock_offset __read_mostly;
+
 /* Get the TSC speed from Xen */
 static unsigned long xen_tsc_khz(void)
 {
@@ -40,7 +42,7 @@ static unsigned long xen_tsc_khz(void)
        return pvclock_tsc_khz(info);
 }
 
-u64 xen_clocksource_read(void)
+static u64 xen_clocksource_read(void)
 {
         struct pvclock_vcpu_time_info *src;
        u64 ret;
@@ -57,6 +59,11 @@ static u64 xen_clocksource_get_cycles(struct clocksource *cs)
        return xen_clocksource_read();
 }
 
+static u64 xen_sched_clock(void)
+{
+       return xen_clocksource_read() - xen_sched_clock_offset;
+}
+
 static void xen_read_wallclock(struct timespec64 *ts)
 {
        struct shared_info *s = HYPERVISOR_shared_info;
@@ -367,7 +374,7 @@ void xen_timer_resume(void)
 }
 
 static const struct pv_time_ops xen_time_ops __initconst = {
-       .sched_clock = xen_clocksource_read,
+       .sched_clock = xen_sched_clock,
        .steal_clock = xen_steal_clock,
 };
 
@@ -503,8 +510,9 @@ static void __init xen_time_init(void)
                pvclock_gtod_register_notifier(&xen_pvclock_gtod_notifier);
 }
 
-void __ref xen_init_time_ops(void)
+void __init xen_init_time_ops(void)
 {
+       xen_sched_clock_offset = xen_clocksource_read();
        pv_time_ops = xen_time_ops;
 
        x86_init.timers.timer_init = xen_time_init;
@@ -542,11 +550,11 @@ void __init xen_hvm_init_time_ops(void)
                return;
 
        if (!xen_feature(XENFEAT_hvm_safe_pvclock)) {
-               printk(KERN_INFO "Xen doesn't support pvclock on HVM,"
-                               "disable pv timer\n");
+               pr_info("Xen doesn't support pvclock on HVM, disable pv timer");
                return;
        }
 
+       xen_sched_clock_offset = xen_clocksource_read();
        pv_time_ops = xen_time_ops;
        x86_init.timers.setup_percpu_clockev = xen_time_init;
        x86_cpuinit.setup_percpu_clockev = xen_hvm_setup_cpu_clockevents;
index 3b34745d0a52dd097d9c2d8955bd4afce1d24ee1..e78684597f579ae0d5d90d952dbd1f46188f2f73 100644 (file)
@@ -31,7 +31,6 @@ extern struct shared_info xen_dummy_shared_info;
 extern struct shared_info *HYPERVISOR_shared_info;
 
 void xen_setup_mfn_list_list(void);
-void xen_setup_shared_info(void);
 void xen_build_mfn_list_list(void);
 void xen_setup_machphys_mapping(void);
 void xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn);
@@ -68,12 +67,11 @@ void xen_init_irq_ops(void);
 void xen_setup_timer(int cpu);
 void xen_setup_runstate_info(int cpu);
 void xen_teardown_timer(int cpu);
-u64 xen_clocksource_read(void);
 void xen_setup_cpu_clockevents(void);
 void xen_save_time_memory_area(void);
 void xen_restore_time_memory_area(void);
-void __ref xen_init_time_ops(void);
-void __init xen_hvm_init_time_ops(void);
+void xen_init_time_ops(void);
+void xen_hvm_init_time_ops(void);
 
 irqreturn_t xen_debug_interrupt(int irq, void *dev_id);
 
index e7a23f2a519af7be68a9db30b4c112fb344efdda..7de0149e1cf7dd0e98c98f115b3f155edd34a7f7 100644 (file)
@@ -197,107 +197,9 @@ ATOMIC_OPS(xor)
 #undef ATOMIC_OP_RETURN
 #undef ATOMIC_OP
 
-/**
- * atomic_sub_and_test - subtract value from variable and test result
- * @i: integer value to subtract
- * @v: pointer of type atomic_t
- *
- * Atomically subtracts @i from @v and returns
- * true if the result is zero, or false for all
- * other cases.
- */
-#define atomic_sub_and_test(i,v) (atomic_sub_return((i),(v)) == 0)
-
-/**
- * atomic_inc - increment atomic variable
- * @v: pointer of type atomic_t
- *
- * Atomically increments @v by 1.
- */
-#define atomic_inc(v) atomic_add(1,(v))
-
-/**
- * atomic_inc - increment atomic variable
- * @v: pointer of type atomic_t
- *
- * Atomically increments @v by 1.
- */
-#define atomic_inc_return(v) atomic_add_return(1,(v))
-
-/**
- * atomic_dec - decrement atomic variable
- * @v: pointer of type atomic_t
- *
- * Atomically decrements @v by 1.
- */
-#define atomic_dec(v) atomic_sub(1,(v))
-
-/**
- * atomic_dec_return - decrement atomic variable
- * @v: pointer of type atomic_t
- *
- * Atomically decrements @v by 1.
- */
-#define atomic_dec_return(v) atomic_sub_return(1,(v))
-
-/**
- * atomic_dec_and_test - decrement and test
- * @v: pointer of type atomic_t
- *
- * Atomically decrements @v by 1 and
- * returns true if the result is 0, or false for all other
- * cases.
- */
-#define atomic_dec_and_test(v) (atomic_sub_return(1,(v)) == 0)
-
-/**
- * atomic_inc_and_test - increment and test
- * @v: pointer of type atomic_t
- *
- * Atomically increments @v by 1
- * and returns true if the result is zero, or false for all
- * other cases.
- */
-#define atomic_inc_and_test(v) (atomic_add_return(1,(v)) == 0)
-
-/**
- * atomic_add_negative - add and test if negative
- * @v: pointer of type atomic_t
- * @i: integer value to add
- *
- * Atomically adds @i to @v and returns true
- * if the result is negative, or false when
- * result is greater than or equal to zero.
- */
-#define atomic_add_negative(i,v) (atomic_add_return((i),(v)) < 0)
-
 #define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
 
-/**
- * __atomic_add_unless - add unless the number is a given value
- * @v: pointer of type atomic_t
- * @a: the amount to add to v...
- * @u: ...unless v is equal to u.
- *
- * Atomically adds @a to @v, so long as it was not @u.
- * Returns the old value of @v.
- */
-static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
-{
-       int c, old;
-       c = atomic_read(v);
-       for (;;) {
-               if (unlikely(c == (u)))
-                       break;
-               old = atomic_cmpxchg((v), c, c + (a));
-               if (likely(old == c))
-                       break;
-               c = old;
-       }
-       return c;
-}
-
 #endif /* __KERNEL__ */
 
 #endif /* _XTENSA_ATOMIC_H */
index dbe3053b284a4286970decf9f3d966f53e7c3a28..9f119c1ca0b5d8dac50613b3fa27f5a1cb303451 100644 (file)
@@ -30,13 +30,16 @@ struct arch_hw_breakpoint {
        u16 type;
 };
 
+struct perf_event_attr;
 struct perf_event;
 struct pt_regs;
 struct task_struct;
 
 int hw_breakpoint_slots(int type);
-int arch_check_bp_in_kernelspace(struct perf_event *bp);
-int arch_validate_hwbkpt_settings(struct perf_event *bp);
+int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw);
+int hw_breakpoint_arch_parse(struct perf_event *bp,
+                            const struct perf_event_attr *attr,
+                            struct arch_hw_breakpoint *hw);
 int hw_breakpoint_exceptions_notify(struct notifier_block *unused,
                                    unsigned long val, void *data);
 
index b35656ab7dbd184ba4bb099c9f716ea4b7ee9657..c2e387c19cdac7090828ee53cdd9424691d12cac 100644 (file)
@@ -33,14 +33,13 @@ int hw_breakpoint_slots(int type)
        }
 }
 
-int arch_check_bp_in_kernelspace(struct perf_event *bp)
+int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw)
 {
        unsigned int len;
        unsigned long va;
-       struct arch_hw_breakpoint *info = counter_arch_bp(bp);
 
-       va = info->address;
-       len = bp->attr.bp_len;
+       va = hw->address;
+       len = hw->len;
 
        return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE);
 }
@@ -48,50 +47,41 @@ int arch_check_bp_in_kernelspace(struct perf_event *bp)
 /*
  * Construct an arch_hw_breakpoint from a perf_event.
  */
-static int arch_build_bp_info(struct perf_event *bp)
+int hw_breakpoint_arch_parse(struct perf_event *bp,
+                            const struct perf_event_attr *attr,
+                            struct arch_hw_breakpoint *hw)
 {
-       struct arch_hw_breakpoint *info = counter_arch_bp(bp);
-
        /* Type */
-       switch (bp->attr.bp_type) {
+       switch (attr->bp_type) {
        case HW_BREAKPOINT_X:
-               info->type = XTENSA_BREAKPOINT_EXECUTE;
+               hw->type = XTENSA_BREAKPOINT_EXECUTE;
                break;
        case HW_BREAKPOINT_R:
-               info->type = XTENSA_BREAKPOINT_LOAD;
+               hw->type = XTENSA_BREAKPOINT_LOAD;
                break;
        case HW_BREAKPOINT_W:
-               info->type = XTENSA_BREAKPOINT_STORE;
+               hw->type = XTENSA_BREAKPOINT_STORE;
                break;
        case HW_BREAKPOINT_RW:
-               info->type = XTENSA_BREAKPOINT_LOAD | XTENSA_BREAKPOINT_STORE;
+               hw->type = XTENSA_BREAKPOINT_LOAD | XTENSA_BREAKPOINT_STORE;
                break;
        default:
                return -EINVAL;
        }
 
        /* Len */
-       info->len = bp->attr.bp_len;
-       if (info->len < 1 || info->len > 64 || !is_power_of_2(info->len))
+       hw->len = attr->bp_len;
+       if (hw->len < 1 || hw->len > 64 || !is_power_of_2(hw->len))
                return -EINVAL;
 
        /* Address */
-       info->address = bp->attr.bp_addr;
-       if (info->address & (info->len - 1))
+       hw->address = attr->bp_addr;
+       if (hw->address & (hw->len - 1))
                return -EINVAL;
 
        return 0;
 }
 
-int arch_validate_hwbkpt_settings(struct perf_event *bp)
-{
-       int ret;
-
-       /* Build the arch_hw_breakpoint. */
-       ret = arch_build_bp_info(bp);
-       return ret;
-}
-
 int hw_breakpoint_exceptions_notify(struct notifier_block *unused,
                                    unsigned long val, void *data)
 {
index 9710e275f23079b8b7548ee935ab653036e82c5d..047c5dca6d90260f0a06f1a5d69b95d61a41460e 100644 (file)
@@ -903,25 +903,27 @@ int bio_add_page(struct bio *bio, struct page *page,
 EXPORT_SYMBOL(bio_add_page);
 
 /**
- * bio_iov_iter_get_pages - pin user or kernel pages and add them to a bio
+ * __bio_iov_iter_get_pages - pin user or kernel pages and add them to a bio
  * @bio: bio to add pages to
  * @iter: iov iterator describing the region to be mapped
  *
- * Pins as many pages from *iter and appends them to @bio's bvec array. The
+ * Pins pages from *iter and appends them to @bio's bvec array. The
  * pages will have to be released using put_page() when done.
+ * For multi-segment *iter, this function only adds pages from the
+ * the next non-empty segment of the iov iterator.
  */
-int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
+static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
 {
-       unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt;
+       unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt, idx;
        struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt;
        struct page **pages = (struct page **)bv;
-       size_t offset, diff;
+       size_t offset;
        ssize_t size;
 
        size = iov_iter_get_pages(iter, pages, LONG_MAX, nr_pages, &offset);
        if (unlikely(size <= 0))
                return size ? size : -EFAULT;
-       nr_pages = (size + offset + PAGE_SIZE - 1) / PAGE_SIZE;
+       idx = nr_pages = (size + offset + PAGE_SIZE - 1) / PAGE_SIZE;
 
        /*
         * Deep magic below:  We need to walk the pinned pages backwards
@@ -934,21 +936,46 @@ int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
        bio->bi_iter.bi_size += size;
        bio->bi_vcnt += nr_pages;
 
-       diff = (nr_pages * PAGE_SIZE - offset) - size;
-       while (nr_pages--) {
-               bv[nr_pages].bv_page = pages[nr_pages];
-               bv[nr_pages].bv_len = PAGE_SIZE;
-               bv[nr_pages].bv_offset = 0;
+       while (idx--) {
+               bv[idx].bv_page = pages[idx];
+               bv[idx].bv_len = PAGE_SIZE;
+               bv[idx].bv_offset = 0;
        }
 
        bv[0].bv_offset += offset;
        bv[0].bv_len -= offset;
-       if (diff)
-               bv[bio->bi_vcnt - 1].bv_len -= diff;
+       bv[nr_pages - 1].bv_len -= nr_pages * PAGE_SIZE - offset - size;
 
        iov_iter_advance(iter, size);
        return 0;
 }
+
+/**
+ * bio_iov_iter_get_pages - pin user or kernel pages and add them to a bio
+ * @bio: bio to add pages to
+ * @iter: iov iterator describing the region to be mapped
+ *
+ * Pins pages from *iter and appends them to @bio's bvec array. The
+ * pages will have to be released using put_page() when done.
+ * The function tries, but does not guarantee, to pin as many pages as
+ * fit into the bio, or are requested in *iter, whatever is smaller.
+ * If MM encounters an error pinning the requested pages, it stops.
+ * Error is returned only if 0 pages could be pinned.
+ */
+int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
+{
+       unsigned short orig_vcnt = bio->bi_vcnt;
+
+       do {
+               int ret = __bio_iov_iter_get_pages(bio, iter);
+
+               if (unlikely(ret))
+                       return bio->bi_vcnt > orig_vcnt ? 0 : ret;
+
+       } while (iov_iter_count(iter) && !bio_full(bio));
+
+       return 0;
+}
 EXPORT_SYMBOL_GPL(bio_iov_iter_get_pages);
 
 static void submit_bio_wait_endio(struct bio *bio)
@@ -1807,9 +1834,6 @@ again:
        if (!bio_integrity_endio(bio))
                return;
 
-       if (WARN_ONCE(bio->bi_next, "driver left bi_next not NULL"))
-               bio->bi_next = NULL;
-
        /*
         * Need to have a real endio function for chained bios, otherwise
         * various corner cases will break (like stacking block devices that
@@ -1869,6 +1893,7 @@ struct bio *bio_split(struct bio *bio, int sectors,
                bio_integrity_trim(split);
 
        bio_advance(bio, split->bi_iter.bi_size);
+       bio->bi_iter.bi_done = 0;
 
        if (bio_flagged(bio, BIO_TRACE_COMPLETION))
                bio_set_flag(split, BIO_TRACE_COMPLETION);
index cf0ee764b908b384f69be9efbb9d7a1352eb7a52..ee33590f54eb46ae439cb9790a3b71bb3cc23d0f 100644 (file)
@@ -273,10 +273,6 @@ static void req_bio_endio(struct request *rq, struct bio *bio,
        bio_advance(bio, nbytes);
 
        /* don't actually finish bio if it's part of flush sequence */
-       /*
-        * XXX this code looks suspicious - it's not consistent with advancing
-        * req->bio in caller
-        */
        if (bio->bi_iter.bi_size == 0 && !(rq->rq_flags & RQF_FLUSH_SEQ))
                bio_endio(bio);
 }
@@ -2159,11 +2155,12 @@ static inline bool bio_check_ro(struct bio *bio, struct hd_struct *part)
        if (part->policy && op_is_write(bio_op(bio))) {
                char b[BDEVNAME_SIZE];
 
-               printk(KERN_ERR
+               WARN_ONCE(1,
                       "generic_make_request: Trying to write "
                        "to read-only block-device %s (partno %d)\n",
                        bio_devname(bio, b), part->partno);
-               return true;
+               /* Older lvm-tools actually trigger this */
+               return false;
        }
 
        return false;
@@ -3081,10 +3078,8 @@ bool blk_update_request(struct request *req, blk_status_t error,
                struct bio *bio = req->bio;
                unsigned bio_bytes = min(bio->bi_iter.bi_size, nr_bytes);
 
-               if (bio_bytes == bio->bi_iter.bi_size) {
+               if (bio_bytes == bio->bi_iter.bi_size)
                        req->bio = bio->bi_next;
-                       bio->bi_next = NULL;
-               }
 
                /* Completion has already been traced */
                bio_clear_flag(bio, BIO_TRACE_COMPLETION);
@@ -3479,6 +3474,10 @@ static void __blk_rq_prep_clone(struct request *dst, struct request *src)
        dst->cpu = src->cpu;
        dst->__sector = blk_rq_pos(src);
        dst->__data_len = blk_rq_bytes(src);
+       if (src->rq_flags & RQF_SPECIAL_PAYLOAD) {
+               dst->rq_flags |= RQF_SPECIAL_PAYLOAD;
+               dst->special_vec = src->special_vec;
+       }
        dst->nr_phys_segments = src->nr_phys_segments;
        dst->ioprio = src->ioprio;
        dst->extra_len = src->extra_len;
index ffa622366922fed04e9dbd2606ffa294b25a697b..1c4532e9293800662d92b809d78637e06607fd90 100644 (file)
@@ -356,7 +356,7 @@ static const char *const blk_mq_rq_state_name_array[] = {
 
 static const char *blk_mq_rq_state_name(enum mq_rq_state rq_state)
 {
-       if (WARN_ON_ONCE((unsigned int)rq_state >
+       if (WARN_ON_ONCE((unsigned int)rq_state >=
                         ARRAY_SIZE(blk_mq_rq_state_name_array)))
                return "(?)";
        return blk_mq_rq_state_name_array[rq_state];
index 09b2ee6694fb16858a104b7021b986aff603a91a..3de0836163c2c5c4800677878a2116738fb43a9c 100644 (file)
@@ -271,7 +271,7 @@ static bool bt_tags_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
         * test and set the bit before assining ->rqs[].
         */
        rq = tags->rqs[bitnr];
-       if (rq && blk_mq_rq_state(rq) == MQ_RQ_IN_FLIGHT)
+       if (rq && blk_mq_request_started(rq))
                iter_data->fn(rq, iter_data->data, reserved);
 
        return true;
index 70c65bb6c0131c84130fae44808acb51cf427ace..654b0dc7e00191c5d61c35b249037d36a2c98932 100644 (file)
@@ -558,10 +558,8 @@ static void __blk_mq_complete_request(struct request *rq)
        bool shared = false;
        int cpu;
 
-       if (cmpxchg(&rq->state, MQ_RQ_IN_FLIGHT, MQ_RQ_COMPLETE) !=
-                       MQ_RQ_IN_FLIGHT)
+       if (!blk_mq_mark_complete(rq))
                return;
-
        if (rq->internal_tag != -1)
                blk_mq_sched_completed_request(rq);
 
@@ -781,7 +779,6 @@ static void blk_mq_rq_timed_out(struct request *req, bool reserved)
                WARN_ON_ONCE(ret != BLK_EH_RESET_TIMER);
        }
 
-       req->rq_flags &= ~RQF_TIMED_OUT;
        blk_add_timer(req);
 }
 
@@ -1076,6 +1073,9 @@ static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx **hctx,
 
 #define BLK_MQ_RESOURCE_DELAY  3               /* ms units */
 
+/*
+ * Returns true if we did some work AND can potentially do more.
+ */
 bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list,
                             bool got_budget)
 {
@@ -1206,8 +1206,17 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list,
                        blk_mq_run_hw_queue(hctx, true);
                else if (needs_restart && (ret == BLK_STS_RESOURCE))
                        blk_mq_delay_run_hw_queue(hctx, BLK_MQ_RESOURCE_DELAY);
+
+               return false;
        }
 
+       /*
+        * If the host/device is unable to accept more work, inform the
+        * caller of that.
+        */
+       if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE)
+               return false;
+
        return (queued + errors) != 0;
 }
 
index 01e2b353a2b9aadc2b5d20fe227739d1d0ae296b..15c1f5e12eb89460bc42eb7f5807eaa03254e51d 100644 (file)
@@ -144,6 +144,7 @@ do_local:
 
        local_irq_restore(flags);
 }
+EXPORT_SYMBOL(__blk_complete_request);
 
 /**
  * blk_complete_request - end I/O on a request
index 4b8a48d48ba13394cf0ae7a7dc0016696ae5efd6..f2cfd56e1606ed9d8e1da979a1e1e6cdcb506a38 100644 (file)
@@ -210,6 +210,7 @@ void blk_add_timer(struct request *req)
        if (!req->timeout)
                req->timeout = q->rq_timeout;
 
+       req->rq_flags &= ~RQF_TIMED_OUT;
        blk_rq_set_deadline(req, jiffies + req->timeout);
 
        /*
index 66602c48995643dcff921e6f10bba8cd203d3c5c..3da540faf6735c2c3ccb6a81ae2ffb4443aedd3c 100644 (file)
@@ -267,8 +267,6 @@ bsg_map_hdr(struct request_queue *q, struct sg_io_v4 *hdr, fmode_t mode)
        } else if (hdr->din_xfer_len) {
                ret = blk_rq_map_user(q, rq, NULL, uptr64(hdr->din_xferp),
                                hdr->din_xfer_len, GFP_KERNEL);
-       } else {
-               ret = blk_rq_map_user(q, rq, NULL, NULL, 0, GFP_KERNEL);
        }
 
        if (ret)
index 945f4b8610e0c7d85242b500141d4bc1c0f671a2..e0de4dd448b3c7238e8656b572de72206302bf87 100644 (file)
@@ -877,7 +877,7 @@ static size_t response_get_string(const struct parsed_resp *resp, int n,
                return 0;
        }
 
-       if (n > resp->num) {
+       if (n >= resp->num) {
                pr_debug("Response has %d tokens. Can't access %d\n",
                         resp->num, n);
                return 0;
@@ -916,7 +916,7 @@ static u64 response_get_u64(const struct parsed_resp *resp, int n)
                return 0;
        }
 
-       if (n > resp->num) {
+       if (n >= resp->num) {
                pr_debug("Response has %d tokens. Can't access %d\n",
                         resp->num, n);
                return 0;
index 150d82da8e996d8c28be4ae3e4bd64a0331c9c08..1efd6fa0dc608c2a3d598b56c798f3e772a2bdbc 100644 (file)
@@ -1,3 +1,3 @@
 #include <linux/kernel.h>
 
-extern const char __initdata *const blacklist_hashes[];
+extern const char __initconst *const blacklist_hashes[];
index 49fa8582138b2df45e087f3a31a80ac5d5bbdc2a..c166f424871c86a356b15eff8bdd3b2be6406a87 100644 (file)
@@ -1060,12 +1060,19 @@ void af_alg_async_cb(struct crypto_async_request *_req, int err)
 }
 EXPORT_SYMBOL_GPL(af_alg_async_cb);
 
-__poll_t af_alg_poll_mask(struct socket *sock, __poll_t events)
+/**
+ * af_alg_poll - poll system call handler
+ */
+__poll_t af_alg_poll(struct file *file, struct socket *sock,
+                        poll_table *wait)
 {
        struct sock *sk = sock->sk;
        struct alg_sock *ask = alg_sk(sk);
        struct af_alg_ctx *ctx = ask->private;
-       __poll_t mask = 0;
+       __poll_t mask;
+
+       sock_poll_wait(file, sk_sleep(sk), wait);
+       mask = 0;
 
        if (!ctx->more || ctx->used)
                mask |= EPOLLIN | EPOLLRDNORM;
@@ -1075,7 +1082,7 @@ __poll_t af_alg_poll_mask(struct socket *sock, __poll_t events)
 
        return mask;
 }
-EXPORT_SYMBOL_GPL(af_alg_poll_mask);
+EXPORT_SYMBOL_GPL(af_alg_poll);
 
 /**
  * af_alg_alloc_areq - allocate struct af_alg_async_req
@@ -1148,8 +1155,10 @@ int af_alg_get_rsgl(struct sock *sk, struct msghdr *msg, int flags,
 
                /* make one iovec available as scatterlist */
                err = af_alg_make_sg(&rsgl->sgl, &msg->msg_iter, seglen);
-               if (err < 0)
+               if (err < 0) {
+                       rsgl->sg_num_bytes = 0;
                        return err;
+               }
 
                /* chain the new scatterlist with previous one */
                if (areq->last_rsgl)
index 825524f274384fdfd2a569be01e593d8f41a72b2..c40a8c7ee8aedcb0f6adb3afb1e0bb60a233d68c 100644 (file)
@@ -375,7 +375,7 @@ static struct proto_ops algif_aead_ops = {
        .sendmsg        =       aead_sendmsg,
        .sendpage       =       af_alg_sendpage,
        .recvmsg        =       aead_recvmsg,
-       .poll_mask      =       af_alg_poll_mask,
+       .poll           =       af_alg_poll,
 };
 
 static int aead_check_key(struct socket *sock)
@@ -471,7 +471,7 @@ static struct proto_ops algif_aead_ops_nokey = {
        .sendmsg        =       aead_sendmsg_nokey,
        .sendpage       =       aead_sendpage_nokey,
        .recvmsg        =       aead_recvmsg_nokey,
-       .poll_mask      =       af_alg_poll_mask,
+       .poll           =       af_alg_poll,
 };
 
 static void *aead_bind(const char *name, u32 type, u32 mask)
index 4c04eb9888adf82f68a18d17c9d6e73adc74aa90..cfdaab2b7d766d517e239687bf2232e09a749991 100644 (file)
@@ -206,7 +206,7 @@ static struct proto_ops algif_skcipher_ops = {
        .sendmsg        =       skcipher_sendmsg,
        .sendpage       =       af_alg_sendpage,
        .recvmsg        =       skcipher_recvmsg,
-       .poll_mask      =       af_alg_poll_mask,
+       .poll           =       af_alg_poll,
 };
 
 static int skcipher_check_key(struct socket *sock)
@@ -302,7 +302,7 @@ static struct proto_ops algif_skcipher_ops_nokey = {
        .sendmsg        =       skcipher_sendmsg_nokey,
        .sendpage       =       skcipher_sendpage_nokey,
        .recvmsg        =       skcipher_recvmsg_nokey,
-       .poll_mask      =       af_alg_poll_mask,
+       .poll           =       af_alg_poll,
 };
 
 static void *skcipher_bind(const char *name, u32 type, u32 mask)
index 7d81e6bb461a330a225658dde9b002b3b24e26bc..b6cabac4b62ba6b920cb5947c56db5839711bcc7 100644 (file)
@@ -249,6 +249,15 @@ int x509_note_signature(void *context, size_t hdrlen,
                return -EINVAL;
        }
 
+       if (strcmp(ctx->cert->sig->pkey_algo, "rsa") == 0) {
+               /* Discard the BIT STRING metadata */
+               if (vlen < 1 || *(const u8 *)value != 0)
+                       return -EBADMSG;
+
+               value++;
+               vlen--;
+       }
+
        ctx->cert->raw_sig = value;
        ctx->cert->raw_sig_size = vlen;
        return 0;
index 9fbcde307daf90b554ac5e96da627f0f77eb24e9..5eede3749e646b425614aa86de9143c82545fcc6 100644 (file)
@@ -274,8 +274,9 @@ static void crypto_morus640_decrypt_chunk(struct morus640_state *state, u8 *dst,
                union morus640_block_in tail;
 
                memcpy(tail.bytes, src, size);
+               memset(tail.bytes + size, 0, MORUS640_BLOCK_SIZE - size);
 
-               crypto_morus640_load_a(&m, src);
+               crypto_morus640_load_a(&m, tail.bytes);
                crypto_morus640_core(state, &m);
                crypto_morus640_store_a(tail.bytes, &m);
                memset(tail.bytes + size, 0, MORUS640_BLOCK_SIZE - size);
index 264ec12c0b9c334a16d743651771a18b616616a0..7f6735d9003f13c1e4adb7ffde8d8be5cc700fe1 100644 (file)
@@ -152,7 +152,7 @@ static SHA3_INLINE void keccakf_round(u64 st[25])
        st[24] ^= bc[ 4];
 }
 
-static void __optimize("O3") keccakf(u64 st[25])
+static void keccakf(u64 st[25])
 {
        int round;
 
index 38a286975c31e152206b3e55b28473a2763a717a..9706613eecf9e2320209225b41d4856fecbf3ae1 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/pm_domain.h>
 #include <linux/pm_runtime.h>
 #include <linux/pwm.h>
+#include <linux/suspend.h>
 #include <linux/delay.h>
 
 #include "internal.h"
@@ -878,6 +879,7 @@ static void acpi_lpss_dismiss(struct device *dev)
 #define LPSS_GPIODEF0_DMA_LLP          BIT(13)
 
 static DEFINE_MUTEX(lpss_iosf_mutex);
+static bool lpss_iosf_d3_entered;
 
 static void lpss_iosf_enter_d3_state(void)
 {
@@ -920,6 +922,9 @@ static void lpss_iosf_enter_d3_state(void)
 
        iosf_mbi_modify(LPSS_IOSF_UNIT_LPIOEP, MBI_CR_WRITE,
                        LPSS_IOSF_GPIODEF0, value1, mask1);
+
+       lpss_iosf_d3_entered = true;
+
 exit:
        mutex_unlock(&lpss_iosf_mutex);
 }
@@ -934,6 +939,11 @@ static void lpss_iosf_exit_d3_state(void)
 
        mutex_lock(&lpss_iosf_mutex);
 
+       if (!lpss_iosf_d3_entered)
+               goto exit;
+
+       lpss_iosf_d3_entered = false;
+
        iosf_mbi_modify(LPSS_IOSF_UNIT_LPIOEP, MBI_CR_WRITE,
                        LPSS_IOSF_GPIODEF0, value1, mask1);
 
@@ -943,6 +953,7 @@ static void lpss_iosf_exit_d3_state(void)
        iosf_mbi_modify(LPSS_IOSF_UNIT_LPIO1, MBI_CFG_WRITE,
                        LPSS_IOSF_PMCSR, value2, mask2);
 
+exit:
        mutex_unlock(&lpss_iosf_mutex);
 }
 
@@ -961,7 +972,8 @@ static int acpi_lpss_suspend(struct device *dev, bool wakeup)
         * wrong status for devices being about to be powered off. See
         * lpss_iosf_enter_d3_state() for further information.
         */
-       if (lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available())
+       if (acpi_target_system_state() == ACPI_STATE_S0 &&
+           lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available())
                lpss_iosf_enter_d3_state();
 
        return ret;
index fc0c2e2328cd35218c71a2db56f3634cce4a0d46..fe9d46d81750792350270c4f6c1920a77e236a05 100644 (file)
@@ -51,16 +51,23 @@ acpi_status acpi_hw_legacy_sleep(u8 sleep_state)
                return_ACPI_STATUS(status);
        }
 
-       /*
-        * 1) Disable all GPEs
-        * 2) Enable all wakeup GPEs
-        */
+       /* Disable all GPEs */
        status = acpi_hw_disable_all_gpes();
        if (ACPI_FAILURE(status)) {
                return_ACPI_STATUS(status);
        }
+       /*
+        * If the target sleep state is S5, clear all GPEs and fixed events too
+        */
+       if (sleep_state == ACPI_STATE_S5) {
+               status = acpi_hw_clear_acpi_status();
+               if (ACPI_FAILURE(status)) {
+                       return_ACPI_STATUS(status);
+               }
+       }
        acpi_gbl_system_awake_and_running = FALSE;
 
+        /* Enable all wakeup GPEs */
        status = acpi_hw_enable_all_wakeup_gpes();
        if (ACPI_FAILURE(status)) {
                return_ACPI_STATUS(status);
index bc5f05906bd1c871403896c3a2bce9122f3a67df..44f35ab3347d1a57aded2402e6e386c4d792d759 100644 (file)
@@ -497,6 +497,18 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state)
                        status =
                            acpi_ps_create_op(walk_state, aml_op_start, &op);
                        if (ACPI_FAILURE(status)) {
+                               /*
+                                * ACPI_PARSE_MODULE_LEVEL means that we are loading a table by
+                                * executing it as a control method. However, if we encounter
+                                * an error while loading the table, we need to keep trying to
+                                * load the table rather than aborting the table load. Set the
+                                * status to AE_OK to proceed with the table load.
+                                */
+                               if ((walk_state->
+                                    parse_flags & ACPI_PARSE_MODULE_LEVEL)
+                                   && status == AE_ALREADY_EXISTS) {
+                                       status = AE_OK;
+                               }
                                if (status == AE_CTRL_PARSE_CONTINUE) {
                                        continue;
                                }
@@ -694,6 +706,25 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state)
                            acpi_ps_next_parse_state(walk_state, op, status);
                        if (status == AE_CTRL_PENDING) {
                                status = AE_OK;
+                       } else
+                           if ((walk_state->
+                                parse_flags & ACPI_PARSE_MODULE_LEVEL)
+                               && status != AE_CTRL_TRANSFER
+                               && ACPI_FAILURE(status)) {
+                               /*
+                                * ACPI_PARSE_MODULE_LEVEL flag means that we are currently
+                                * loading a table by executing it as a control method.
+                                * However, if we encounter an error while loading the table,
+                                * we need to keep trying to load the table rather than
+                                * aborting the table load (setting the status to AE_OK
+                                * continues the table load). If we get a failure at this
+                                * point, it means that the dispatcher got an error while
+                                * processing Op (most likely an AML operand error) or a
+                                * control method was called from module level and the
+                                * dispatcher returned AE_CTRL_TRANSFER. In the latter case,
+                                * leave the status alone, there's nothing wrong with it.
+                                */
+                               status = AE_OK;
                        }
                }
 
index 5a64ddaed8a3782f94e278424368a7ce7167bfbb..e474302726926dd0c997c9432da0f9488af65011 100644 (file)
@@ -182,19 +182,19 @@ acpi_ut_prefixed_namespace_error(const char *module_name,
        switch (lookup_status) {
        case AE_ALREADY_EXISTS:
 
-               acpi_os_printf("\n" ACPI_MSG_BIOS_ERROR);
+               acpi_os_printf(ACPI_MSG_BIOS_ERROR);
                message = "Failure creating";
                break;
 
        case AE_NOT_FOUND:
 
-               acpi_os_printf("\n" ACPI_MSG_BIOS_ERROR);
+               acpi_os_printf(ACPI_MSG_BIOS_ERROR);
                message = "Could not resolve";
                break;
 
        default:
 
-               acpi_os_printf("\n" ACPI_MSG_ERROR);
+               acpi_os_printf(ACPI_MSG_ERROR);
                message = "Failure resolving";
                break;
        }
index b0113a5802a3c073f5787de456bc601f0f8c11cd..d79ad844c78fcee1e51cfa7cde066363e45f51ef 100644 (file)
@@ -717,10 +717,11 @@ void battery_hook_register(struct acpi_battery_hook *hook)
                         */
                        pr_err("extension failed to load: %s", hook->name);
                        __battery_hook_unregister(hook, 0);
-                       return;
+                       goto end;
                }
        }
        pr_info("new extension: %s\n", hook->name);
+end:
        mutex_unlock(&hook_mutex);
 }
 EXPORT_SYMBOL_GPL(battery_hook_register);
@@ -732,7 +733,7 @@ EXPORT_SYMBOL_GPL(battery_hook_register);
 */
 static void battery_hook_add_battery(struct acpi_battery *battery)
 {
-       struct acpi_battery_hook *hook_node;
+       struct acpi_battery_hook *hook_node, *tmp;
 
        mutex_lock(&hook_mutex);
        INIT_LIST_HEAD(&battery->list);
@@ -744,15 +745,15 @@ static void battery_hook_add_battery(struct acpi_battery *battery)
         * when a battery gets hotplugged or initialized
         * during the battery module initialization.
         */
-       list_for_each_entry(hook_node, &battery_hook_list, list) {
+       list_for_each_entry_safe(hook_node, tmp, &battery_hook_list, list) {
                if (hook_node->add_battery(battery->bat)) {
                        /*
                         * The notification of the extensions has failed, to
                         * prevent further errors we will unload the extension.
                         */
-                       __battery_hook_unregister(hook_node, 0);
                        pr_err("error in extension, unloading: %s",
                                        hook_node->name);
+                       __battery_hook_unregister(hook_node, 0);
                }
        }
        mutex_unlock(&hook_mutex);
index bb94cf0731feb92b89b78cec274668fe204ed1e4..917f77f4cb556b9f4a1061c9b10dea72f8a9d81f 100644 (file)
@@ -2037,6 +2037,17 @@ static inline void acpi_ec_query_exit(void)
        }
 }
 
+static const struct dmi_system_id acpi_ec_no_wakeup[] = {
+       {
+               .ident = "Thinkpad X1 Carbon 6th",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+                       DMI_MATCH(DMI_PRODUCT_FAMILY, "Thinkpad X1 Carbon 6th"),
+               },
+       },
+       { },
+};
+
 int __init acpi_ec_init(void)
 {
        int result;
@@ -2047,6 +2058,15 @@ int __init acpi_ec_init(void)
        if (result)
                return result;
 
+       /*
+        * Disable EC wakeup on following systems to prevent periodic
+        * wakeup from EC GPE.
+        */
+       if (dmi_check_system(acpi_ec_no_wakeup)) {
+               ec_no_wakeup = true;
+               pr_debug("Disabling EC wakeup on suspend-to-idle\n");
+       }
+
        /* Drivers must be started after acpi_ec_query_init() */
        dsdt_fail = acpi_bus_register_driver(&acpi_ec_driver);
        /*
index d15814e1727fad991bf8c24c2d7fa728ad1bfcad..7c479002e798bf92f3dc58263c3c2064182922bb 100644 (file)
@@ -408,6 +408,8 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
        const guid_t *guid;
        int rc, i;
 
+       if (cmd_rc)
+               *cmd_rc = -EINVAL;
        func = cmd;
        if (cmd == ND_CMD_CALL) {
                call_pkg = buf;
@@ -518,6 +520,8 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
                 * If we return an error (like elsewhere) then caller wouldn't
                 * be able to rely upon data returned to make calculation.
                 */
+               if (cmd_rc)
+                       *cmd_rc = 0;
                return 0;
        }
 
@@ -1273,7 +1277,7 @@ static ssize_t scrub_show(struct device *dev,
 
                mutex_lock(&acpi_desc->init_mutex);
                rc = sprintf(buf, "%d%s", acpi_desc->scrub_count,
-                               work_busy(&acpi_desc->dwork.work)
+                               acpi_desc->scrub_busy
                                && !acpi_desc->cancel ? "+\n" : "\n");
                mutex_unlock(&acpi_desc->init_mutex);
        }
@@ -2939,6 +2943,32 @@ static unsigned int __acpi_nfit_scrub(struct acpi_nfit_desc *acpi_desc,
        return 0;
 }
 
+static void __sched_ars(struct acpi_nfit_desc *acpi_desc, unsigned int tmo)
+{
+       lockdep_assert_held(&acpi_desc->init_mutex);
+
+       acpi_desc->scrub_busy = 1;
+       /* note this should only be set from within the workqueue */
+       if (tmo)
+               acpi_desc->scrub_tmo = tmo;
+       queue_delayed_work(nfit_wq, &acpi_desc->dwork, tmo * HZ);
+}
+
+static void sched_ars(struct acpi_nfit_desc *acpi_desc)
+{
+       __sched_ars(acpi_desc, 0);
+}
+
+static void notify_ars_done(struct acpi_nfit_desc *acpi_desc)
+{
+       lockdep_assert_held(&acpi_desc->init_mutex);
+
+       acpi_desc->scrub_busy = 0;
+       acpi_desc->scrub_count++;
+       if (acpi_desc->scrub_count_state)
+               sysfs_notify_dirent(acpi_desc->scrub_count_state);
+}
+
 static void acpi_nfit_scrub(struct work_struct *work)
 {
        struct acpi_nfit_desc *acpi_desc;
@@ -2949,14 +2979,10 @@ static void acpi_nfit_scrub(struct work_struct *work)
        mutex_lock(&acpi_desc->init_mutex);
        query_rc = acpi_nfit_query_poison(acpi_desc);
        tmo = __acpi_nfit_scrub(acpi_desc, query_rc);
-       if (tmo) {
-               queue_delayed_work(nfit_wq, &acpi_desc->dwork, tmo * HZ);
-               acpi_desc->scrub_tmo = tmo;
-       } else {
-               acpi_desc->scrub_count++;
-               if (acpi_desc->scrub_count_state)
-                       sysfs_notify_dirent(acpi_desc->scrub_count_state);
-       }
+       if (tmo)
+               __sched_ars(acpi_desc, tmo);
+       else
+               notify_ars_done(acpi_desc);
        memset(acpi_desc->ars_status, 0, acpi_desc->max_ars);
        mutex_unlock(&acpi_desc->init_mutex);
 }
@@ -3037,7 +3063,7 @@ static int acpi_nfit_register_regions(struct acpi_nfit_desc *acpi_desc)
                        break;
                }
 
-       queue_delayed_work(nfit_wq, &acpi_desc->dwork, 0);
+       sched_ars(acpi_desc);
        return 0;
 }
 
@@ -3239,7 +3265,7 @@ int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc, unsigned long flags)
                }
        }
        if (scheduled) {
-               queue_delayed_work(nfit_wq, &acpi_desc->dwork, 0);
+               sched_ars(acpi_desc);
                dev_dbg(dev, "ars_scan triggered\n");
        }
        mutex_unlock(&acpi_desc->init_mutex);
index 7d15856a739f9dc70cbb4e325d95395829cb6b63..a97ff42fe311bfa5041f54d67124aed4b85deb4d 100644 (file)
@@ -203,6 +203,7 @@ struct acpi_nfit_desc {
        unsigned int max_ars;
        unsigned int scrub_count;
        unsigned int scrub_mode;
+       unsigned int scrub_busy:1;
        unsigned int cancel:1;
        unsigned long dimm_cmd_force_en;
        unsigned long bus_cmd_force_en;
index 7ca41bf023c9f354cad85617f0cceab228640d65..8df9abfa947b0dca4719c674fd645d187ade242d 100644 (file)
@@ -45,6 +45,8 @@
 #include <linux/uaccess.h>
 #include <linux/io-64-nonatomic-lo-hi.h>
 
+#include "acpica/accommon.h"
+#include "acpica/acnamesp.h"
 #include "internal.h"
 
 #define _COMPONENT             ACPI_OS_SERVICES
@@ -1490,6 +1492,76 @@ int acpi_check_region(resource_size_t start, resource_size_t n,
 }
 EXPORT_SYMBOL(acpi_check_region);
 
+static acpi_status acpi_deactivate_mem_region(acpi_handle handle, u32 level,
+                                             void *_res, void **return_value)
+{
+       struct acpi_mem_space_context **mem_ctx;
+       union acpi_operand_object *handler_obj;
+       union acpi_operand_object *region_obj2;
+       union acpi_operand_object *region_obj;
+       struct resource *res = _res;
+       acpi_status status;
+
+       region_obj = acpi_ns_get_attached_object(handle);
+       if (!region_obj)
+               return AE_OK;
+
+       handler_obj = region_obj->region.handler;
+       if (!handler_obj)
+               return AE_OK;
+
+       if (region_obj->region.space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
+               return AE_OK;
+
+       if (!(region_obj->region.flags & AOPOBJ_SETUP_COMPLETE))
+               return AE_OK;
+
+       region_obj2 = acpi_ns_get_secondary_object(region_obj);
+       if (!region_obj2)
+               return AE_OK;
+
+       mem_ctx = (void *)&region_obj2->extra.region_context;
+
+       if (!(mem_ctx[0]->address >= res->start &&
+             mem_ctx[0]->address < res->end))
+               return AE_OK;
+
+       status = handler_obj->address_space.setup(region_obj,
+                                                 ACPI_REGION_DEACTIVATE,
+                                                 NULL, (void **)mem_ctx);
+       if (ACPI_SUCCESS(status))
+               region_obj->region.flags &= ~(AOPOBJ_SETUP_COMPLETE);
+
+       return status;
+}
+
+/**
+ * acpi_release_memory - Release any mappings done to a memory region
+ * @handle: Handle to namespace node
+ * @res: Memory resource
+ * @level: A level that terminates the search
+ *
+ * Walks through @handle and unmaps all SystemMemory Operation Regions that
+ * overlap with @res and that have already been activated (mapped).
+ *
+ * This is a helper that allows drivers to place special requirements on memory
+ * region that may overlap with operation regions, primarily allowing them to
+ * safely map the region as non-cached memory.
+ *
+ * The unmapped Operation Regions will be automatically remapped next time they
+ * are called, so the drivers do not need to do anything else.
+ */
+acpi_status acpi_release_memory(acpi_handle handle, struct resource *res,
+                               u32 level)
+{
+       if (!(res->flags & IORESOURCE_MEM))
+               return AE_TYPE;
+
+       return acpi_walk_namespace(ACPI_TYPE_REGION, handle, level,
+                                  acpi_deactivate_mem_region, NULL, res, NULL);
+}
+EXPORT_SYMBOL_GPL(acpi_release_memory);
+
 /*
  * Let drivers know whether the resource checks are effective
  */
index e5ea1974d1e3820db7e97f8437a7931de70784f8..d1e26cb599bfca340e076500b9e27ec2f3c0bc73 100644 (file)
@@ -481,8 +481,14 @@ static int topology_get_acpi_cpu_tag(struct acpi_table_header *table,
        if (cpu_node) {
                cpu_node = acpi_find_processor_package_id(table, cpu_node,
                                                          level, flag);
-               /* Only the first level has a guaranteed id */
-               if (level == 0)
+               /*
+                * As per specification if the processor structure represents
+                * an actual processor, then ACPI processor ID must be valid.
+                * For processor containers ACPI_PPTT_ACPI_PROCESSOR_ID_VALID
+                * should be set if the UID is valid
+                */
+               if (level == 0 ||
+                   cpu_node->flags & ACPI_PPTT_ACPI_PROCESSOR_ID_VALID)
                        return cpu_node->acpi_processor_id;
                return ACPI_PTR_DIFF(cpu_node, table);
        }
index 2b16e7c8fff357645d3cec4069330b89985347bb..39b181d6bd0d8cf2cbcd9dde1cf89b373ecae6a4 100644 (file)
@@ -398,7 +398,6 @@ config SATA_DWC_VDEBUG
 
 config SATA_HIGHBANK
        tristate "Calxeda Highbank SATA support"
-       depends on HAS_DMA
        depends on ARCH_HIGHBANK || COMPILE_TEST
        help
          This option enables support for the Calxeda Highbank SoC's
@@ -408,7 +407,6 @@ config SATA_HIGHBANK
 
 config SATA_MV
        tristate "Marvell SATA support"
-       depends on HAS_DMA
        depends on PCI || ARCH_DOVE || ARCH_MV78XX0 || \
                   ARCH_MVEBU || ARCH_ORION5X || COMPILE_TEST
        select GENERIC_PHY
index 738fb22978ddcd14ad1956c5119972f19b17d2a6..b2b9eba1d214765723165f1d3d7c1bda64720207 100644 (file)
@@ -400,6 +400,7 @@ static const struct pci_device_id ahci_pci_tbl[] = {
        { PCI_VDEVICE(INTEL, 0x0f23), board_ahci_mobile }, /* Bay Trail AHCI */
        { PCI_VDEVICE(INTEL, 0x22a3), board_ahci_mobile }, /* Cherry Tr. AHCI */
        { PCI_VDEVICE(INTEL, 0x5ae3), board_ahci_mobile }, /* ApolloLake AHCI */
+       { PCI_VDEVICE(INTEL, 0x34d3), board_ahci_mobile }, /* Ice Lake LP AHCI */
 
        /* JMicron 360/1/3/5/6, match class to avoid IDE function */
        { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
@@ -1280,6 +1281,59 @@ static bool ahci_broken_suspend(struct pci_dev *pdev)
        return strcmp(buf, dmi->driver_data) < 0;
 }
 
+static bool ahci_broken_lpm(struct pci_dev *pdev)
+{
+       static const struct dmi_system_id sysids[] = {
+               /* Various Lenovo 50 series have LPM issues with older BIOSen */
+               {
+                       .matches = {
+                               DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+                               DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X250"),
+                       },
+                       .driver_data = "20180406", /* 1.31 */
+               },
+               {
+                       .matches = {
+                               DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+                               DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad L450"),
+                       },
+                       .driver_data = "20180420", /* 1.28 */
+               },
+               {
+                       .matches = {
+                               DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+                               DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T450s"),
+                       },
+                       .driver_data = "20180315", /* 1.33 */
+               },
+               {
+                       .matches = {
+                               DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+                               DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad W541"),
+                       },
+                       /*
+                        * Note date based on release notes, 2.35 has been
+                        * reported to be good, but I've been unable to get
+                        * a hold of the reporter to get the DMI BIOS date.
+                        * TODO: fix this.
+                        */
+                       .driver_data = "20180310", /* 2.35 */
+               },
+               { }     /* terminate list */
+       };
+       const struct dmi_system_id *dmi = dmi_first_match(sysids);
+       int year, month, date;
+       char buf[9];
+
+       if (!dmi)
+               return false;
+
+       dmi_get_date(DMI_BIOS_DATE, &year, &month, &date);
+       snprintf(buf, sizeof(buf), "%04d%02d%02d", year, month, date);
+
+       return strcmp(buf, dmi->driver_data) < 0;
+}
+
 static bool ahci_broken_online(struct pci_dev *pdev)
 {
 #define ENCODE_BUSDEVFN(bus, slot, func)                       \
@@ -1694,6 +1748,12 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
                        "quirky BIOS, skipping spindown on poweroff\n");
        }
 
+       if (ahci_broken_lpm(pdev)) {
+               pi.flags |= ATA_FLAG_NO_LPM;
+               dev_warn(&pdev->dev,
+                        "BIOS update required for Link Power Management support\n");
+       }
+
        if (ahci_broken_suspend(pdev)) {
                hpriv->flags |= AHCI_HFLAG_NO_SUSPEND;
                dev_warn(&pdev->dev,
index 0045dacd814b44ec21f87e4acceb07e69056f214..72d90b4c3aaefa4b9051d02383b55c9e3899072b 100644 (file)
@@ -82,7 +82,7 @@ static void ahci_mvebu_regret_option(struct ahci_host_priv *hpriv)
  *
  * Return: 0 on success; Error code otherwise.
  */
-int ahci_mvebu_stop_engine(struct ata_port *ap)
+static int ahci_mvebu_stop_engine(struct ata_port *ap)
 {
        void __iomem *port_mmio = ahci_port_base(ap);
        u32 tmp, port_fbs;
index 965842a08743f38d08d4a4c58cc345ffc4c81f97..09620c2ffa0f72e1a696d10d3e4480818b101e51 100644 (file)
@@ -35,6 +35,7 @@
 #include <linux/kernel.h>
 #include <linux/gfp.h>
 #include <linux/module.h>
+#include <linux/nospec.h>
 #include <linux/blkdev.h>
 #include <linux/delay.h>
 #include <linux/interrupt.h>
@@ -1146,10 +1147,12 @@ static ssize_t ahci_led_store(struct ata_port *ap, const char *buf,
 
        /* get the slot number from the message */
        pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8;
-       if (pmp < EM_MAX_SLOTS)
+       if (pmp < EM_MAX_SLOTS) {
+               pmp = array_index_nospec(pmp, EM_MAX_SLOTS);
                emp = &pp->em_priv[pmp];
-       else
+       } else {
                return -EINVAL;
+       }
 
        /* mask off the activity bits if we are in sw_activity
         * mode, user should turn off sw_activity before setting
index 27d15ed7fa3d03771f020cf064749f6f9fe38633..cc71c63df3819f8da0ff312fed83dc17d706136c 100644 (file)
@@ -2493,6 +2493,9 @@ int ata_dev_configure(struct ata_device *dev)
            (id[ATA_ID_SATA_CAPABILITY] & 0xe) == 0x2)
                dev->horkage |= ATA_HORKAGE_NOLPM;
 
+       if (ap->flags & ATA_FLAG_NO_LPM)
+               dev->horkage |= ATA_HORKAGE_NOLPM;
+
        if (dev->horkage & ATA_HORKAGE_NOLPM) {
                ata_dev_warn(dev, "LPM support broken, forcing max_power\n");
                dev->link->ap->target_lpm_policy = ATA_LPM_MAX_POWER;
index d5412145d76d60c2cc1393f07315bff2431c28a0..01306c018398fa16583cab46bd1e51b9ccf86309 100644 (file)
@@ -614,8 +614,7 @@ void ata_scsi_cmd_error_handler(struct Scsi_Host *host, struct ata_port *ap,
                list_for_each_entry_safe(scmd, tmp, eh_work_q, eh_entry) {
                        struct ata_queued_cmd *qc;
 
-                       for (i = 0; i < ATA_MAX_QUEUE; i++) {
-                               qc = __ata_qc_from_tag(ap, i);
+                       ata_qc_for_each_raw(ap, qc, i) {
                                if (qc->flags & ATA_QCFLAG_ACTIVE &&
                                    qc->scsicmd == scmd)
                                        break;
@@ -818,14 +817,13 @@ EXPORT_SYMBOL_GPL(ata_port_wait_eh);
 
 static int ata_eh_nr_in_flight(struct ata_port *ap)
 {
+       struct ata_queued_cmd *qc;
        unsigned int tag;
        int nr = 0;
 
        /* count only non-internal commands */
-       for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
-               if (ata_tag_internal(tag))
-                       continue;
-               if (ata_qc_from_tag(ap, tag))
+       ata_qc_for_each(ap, qc, tag) {
+               if (qc)
                        nr++;
        }
 
@@ -847,13 +845,13 @@ void ata_eh_fastdrain_timerfn(struct timer_list *t)
                goto out_unlock;
 
        if (cnt == ap->fastdrain_cnt) {
+               struct ata_queued_cmd *qc;
                unsigned int tag;
 
                /* No progress during the last interval, tag all
                 * in-flight qcs as timed out and freeze the port.
                 */
-               for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
-                       struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);
+               ata_qc_for_each(ap, qc, tag) {
                        if (qc)
                                qc->err_mask |= AC_ERR_TIMEOUT;
                }
@@ -999,6 +997,7 @@ void ata_port_schedule_eh(struct ata_port *ap)
 
 static int ata_do_link_abort(struct ata_port *ap, struct ata_link *link)
 {
+       struct ata_queued_cmd *qc;
        int tag, nr_aborted = 0;
 
        WARN_ON(!ap->ops->error_handler);
@@ -1007,9 +1006,7 @@ static int ata_do_link_abort(struct ata_port *ap, struct ata_link *link)
        ata_eh_set_pending(ap, 0);
 
        /* include internal tag in iteration */
-       for (tag = 0; tag <= ATA_MAX_QUEUE; tag++) {
-               struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);
-
+       ata_qc_for_each_with_internal(ap, qc, tag) {
                if (qc && (!link || qc->dev->link == link)) {
                        qc->flags |= ATA_QCFLAG_FAILED;
                        ata_qc_complete(qc);
@@ -1712,9 +1709,7 @@ void ata_eh_analyze_ncq_error(struct ata_link *link)
                return;
 
        /* has LLDD analyzed already? */
-       for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
-               qc = __ata_qc_from_tag(ap, tag);
-
+       ata_qc_for_each_raw(ap, qc, tag) {
                if (!(qc->flags & ATA_QCFLAG_FAILED))
                        continue;
 
@@ -2136,6 +2131,7 @@ static void ata_eh_link_autopsy(struct ata_link *link)
 {
        struct ata_port *ap = link->ap;
        struct ata_eh_context *ehc = &link->eh_context;
+       struct ata_queued_cmd *qc;
        struct ata_device *dev;
        unsigned int all_err_mask = 0, eflags = 0;
        int tag, nr_failed = 0, nr_quiet = 0;
@@ -2168,9 +2164,7 @@ static void ata_eh_link_autopsy(struct ata_link *link)
 
        all_err_mask |= ehc->i.err_mask;
 
-       for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
-               struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
-
+       ata_qc_for_each_raw(ap, qc, tag) {
                if (!(qc->flags & ATA_QCFLAG_FAILED) ||
                    ata_dev_phys_link(qc->dev) != link)
                        continue;
@@ -2436,6 +2430,7 @@ static void ata_eh_link_report(struct ata_link *link)
 {
        struct ata_port *ap = link->ap;
        struct ata_eh_context *ehc = &link->eh_context;
+       struct ata_queued_cmd *qc;
        const char *frozen, *desc;
        char tries_buf[6] = "";
        int tag, nr_failed = 0;
@@ -2447,9 +2442,7 @@ static void ata_eh_link_report(struct ata_link *link)
        if (ehc->i.desc[0] != '\0')
                desc = ehc->i.desc;
 
-       for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
-               struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
-
+       ata_qc_for_each_raw(ap, qc, tag) {
                if (!(qc->flags & ATA_QCFLAG_FAILED) ||
                    ata_dev_phys_link(qc->dev) != link ||
                    ((qc->flags & ATA_QCFLAG_QUIET) &&
@@ -2511,8 +2504,7 @@ static void ata_eh_link_report(struct ata_link *link)
                  ehc->i.serror & SERR_DEV_XCHG ? "DevExch " : "");
 #endif
 
-       for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
-               struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
+       ata_qc_for_each_raw(ap, qc, tag) {
                struct ata_taskfile *cmd = &qc->tf, *res = &qc->result_tf;
                char data_buf[20] = "";
                char cdb_buf[70] = "";
@@ -3992,12 +3984,11 @@ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
  */
 void ata_eh_finish(struct ata_port *ap)
 {
+       struct ata_queued_cmd *qc;
        int tag;
 
        /* retry or finish qcs */
-       for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
-               struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
-
+       ata_qc_for_each_raw(ap, qc, tag) {
                if (!(qc->flags & ATA_QCFLAG_FAILED))
                        continue;
 
index 6a91d04351d9b64d20251febd070ad28bfb3eb38..aad1b01447de6924df4b1c1713d5fc402df112a0 100644 (file)
@@ -3805,10 +3805,20 @@ static unsigned int ata_scsi_zbc_out_xlat(struct ata_queued_cmd *qc)
                 */
                goto invalid_param_len;
        }
-       if (block > dev->n_sectors)
-               goto out_of_range;
 
        all = cdb[14] & 0x1;
+       if (all) {
+               /*
+                * Ignore the block address (zone ID) as defined by ZBC.
+                */
+               block = 0;
+       } else if (block >= dev->n_sectors) {
+               /*
+                * Block must be a valid zone ID (a zone start LBA).
+                */
+               fp = 2;
+               goto invalid_fld;
+       }
 
        if (ata_ncq_enabled(qc->dev) &&
            ata_fpdma_zac_mgmt_out_supported(qc->dev)) {
@@ -3837,10 +3847,6 @@ static unsigned int ata_scsi_zbc_out_xlat(struct ata_queued_cmd *qc)
  invalid_fld:
        ata_scsi_set_invalid_field(qc->dev, scmd, fp, 0xff);
        return 1;
- out_of_range:
-       /* "Logical Block Address out of range" */
-       ata_scsi_set_sense(qc->dev, scmd, ILLEGAL_REQUEST, 0x21, 0x00);
-       return 1;
 invalid_param_len:
        /* "Parameter list length error" */
        ata_scsi_set_sense(qc->dev, scmd, ILLEGAL_REQUEST, 0x1a, 0x0);
index b8d9cfc60374e08dbed9b2f646d51aa4fec0e025..4dc528bf8e85e3088fa55859d056613e8db73281 100644 (file)
@@ -395,12 +395,6 @@ static inline unsigned int sata_fsl_tag(unsigned int tag,
 {
        /* We let libATA core do actual (queue) tag allocation */
 
-       /* all non NCQ/queued commands should have tag#0 */
-       if (ata_tag_internal(tag)) {
-               DPRINTK("mapping internal cmds to tag#0\n");
-               return 0;
-       }
-
        if (unlikely(tag >= SATA_FSL_QUEUE_DEPTH)) {
                DPRINTK("tag %d invalid : out of range\n", tag);
                return 0;
@@ -1229,8 +1223,7 @@ static void sata_fsl_host_intr(struct ata_port *ap)
 
        /* Workaround for data length mismatch errata */
        if (unlikely(hstatus & INT_ON_DATA_LENGTH_MISMATCH)) {
-               for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
-                       qc = ata_qc_from_tag(ap, tag);
+               ata_qc_for_each_with_internal(ap, qc, tag) {
                        if (qc && ata_is_atapi(qc->tf.protocol)) {
                                u32 hcontrol;
                                /* Set HControl[27] to clear error registers */
index 10ae11aa1926f3dca460e81879d3b24d8bc58a8b..72c9b922a77bc7793bb20ccd6432f249bcce45e1 100644 (file)
@@ -675,7 +675,6 @@ static int nv_adma_slave_config(struct scsi_device *sdev)
        struct ata_port *ap = ata_shost_to_port(sdev->host);
        struct nv_adma_port_priv *pp = ap->private_data;
        struct nv_adma_port_priv *port0, *port1;
-       struct scsi_device *sdev0, *sdev1;
        struct pci_dev *pdev = to_pci_dev(ap->host->dev);
        unsigned long segment_boundary, flags;
        unsigned short sg_tablesize;
@@ -736,8 +735,6 @@ static int nv_adma_slave_config(struct scsi_device *sdev)
 
        port0 = ap->host->ports[0]->private_data;
        port1 = ap->host->ports[1]->private_data;
-       sdev0 = ap->host->ports[0]->link.device[0].sdev;
-       sdev1 = ap->host->ports[1]->link.device[0].sdev;
        if ((port0->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
            (port1->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)) {
                /*
index ff81a576347e5154c10c997717548be69e81bbab..82532c299bb5964a429e81353b9c5f94d9bb5ed2 100644 (file)
@@ -1618,7 +1618,7 @@ static int rx_init(struct atm_dev *dev)
        skb_queue_head_init(&iadev->rx_dma_q);  
        iadev->rx_free_desc_qhead = NULL;   
 
-       iadev->rx_open = kcalloc(4, iadev->num_vc, GFP_KERNEL);
+       iadev->rx_open = kcalloc(iadev->num_vc, sizeof(void *), GFP_KERNEL);
        if (!iadev->rx_open) {
                printk(KERN_ERR DEV_LABEL "itf %d couldn't get free page\n",
                dev->number);  
index a8d2eb0ceb8d8f78788182f81f8e1e9f9dc8fbbb..2c288d1f42bba0fcdf31ccec72c069bfa60688b9 100644 (file)
@@ -1483,6 +1483,8 @@ static int zatm_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
                                        return -EFAULT;
                                if (pool < 0 || pool > ZATM_LAST_POOL)
                                        return -EINVAL;
+                               pool = array_index_nospec(pool,
+                                                         ZATM_LAST_POOL + 1);
                                if (copy_from_user(&info,
                                    &((struct zatm_pool_req __user *) arg)->info,
                                    sizeof(info))) return -EFAULT;
index b074f242a43594fc3d3a383a9dce5d126e8f3a78..704f442958103545aa89ad0e986130aa6ebc5b06 100644 (file)
@@ -8,10 +8,7 @@ obj-y                  := component.o core.o bus.o dd.o syscore.o \
                           topology.o container.o property.o cacheinfo.o \
                           devcon.o
 obj-$(CONFIG_DEVTMPFS) += devtmpfs.o
-obj-$(CONFIG_DMA_CMA) += dma-contiguous.o
 obj-y                  += power/
-obj-$(CONFIG_HAS_DMA)  += dma-mapping.o
-obj-$(CONFIG_HAVE_GENERIC_DMA_COHERENT) += dma-coherent.o
 obj-$(CONFIG_ISA_BUS_API)      += isa.o
 obj-y                          += firmware_loader/
 obj-$(CONFIG_NUMA)     += node.o
index 36622b52e419db9573c5cfb31f03bf740324e961..df3e1a44707acc74010cf5ce6fab815c4f744896 100644 (file)
@@ -236,6 +236,13 @@ struct device_link *device_link_add(struct device *consumer,
                        link->rpm_active = true;
                }
                pm_runtime_new_link(consumer);
+               /*
+                * If the link is being added by the consumer driver at probe
+                * time, balance the decrementation of the supplier's runtime PM
+                * usage counter after consumer probe in driver_probe_device().
+                */
+               if (consumer->links.status == DL_DEV_PROBING)
+                       pm_runtime_get_noresume(supplier);
        }
        get_device(supplier);
        link->supplier = supplier;
@@ -255,12 +262,12 @@ struct device_link *device_link_add(struct device *consumer,
                        switch (consumer->links.status) {
                        case DL_DEV_PROBING:
                                /*
-                                * Balance the decrementation of the supplier's
-                                * runtime PM usage counter after consumer probe
-                                * in driver_probe_device().
+                                * Some callers expect the link creation during
+                                * consumer driver probe to resume the supplier
+                                * even without DL_FLAG_RPM_ACTIVE.
                                 */
                                if (flags & DL_FLAG_PM_RUNTIME)
-                                       pm_runtime_get_sync(supplier);
+                                       pm_runtime_resume(supplier);
 
                                link->status = DL_STATE_CONSUMER_PROBE;
                                break;
index 1435d7281c66e3d82e877fc98eb8b7ae7321ec19..6ebcd65d64b6dc64603ede7676a68529fcb74210 100644 (file)
@@ -434,14 +434,6 @@ re_probe:
                        goto probe_failed;
        }
 
-       /*
-        * Ensure devices are listed in devices_kset in correct order
-        * It's important to move Dev to the end of devices_kset before
-        * calling .probe, because it could be recursive and parent Dev
-        * should always go first
-        */
-       devices_kset_move_last(dev);
-
        if (dev->bus->probe) {
                ret = dev->bus->probe(dev);
                if (ret)
index 4925af5c4cf039e6cc07918967aa6995353e4bd8..9e8484189034b83efb218fb3eb4604a0b2cd8f6d 100644 (file)
@@ -2235,7 +2235,7 @@ static void genpd_dev_pm_sync(struct device *dev)
 }
 
 static int __genpd_dev_pm_attach(struct device *dev, struct device_node *np,
-                                unsigned int index)
+                                unsigned int index, bool power_on)
 {
        struct of_phandle_args pd_args;
        struct generic_pm_domain *pd;
@@ -2271,9 +2271,11 @@ static int __genpd_dev_pm_attach(struct device *dev, struct device_node *np,
        dev->pm_domain->detach = genpd_dev_pm_detach;
        dev->pm_domain->sync = genpd_dev_pm_sync;
 
-       genpd_lock(pd);
-       ret = genpd_power_on(pd, 0);
-       genpd_unlock(pd);
+       if (power_on) {
+               genpd_lock(pd);
+               ret = genpd_power_on(pd, 0);
+               genpd_unlock(pd);
+       }
 
        if (ret)
                genpd_remove_device(pd, dev);
@@ -2307,7 +2309,7 @@ int genpd_dev_pm_attach(struct device *dev)
                                       "#power-domain-cells") != 1)
                return 0;
 
-       return __genpd_dev_pm_attach(dev, dev->of_node, 0);
+       return __genpd_dev_pm_attach(dev, dev->of_node, 0, true);
 }
 EXPORT_SYMBOL_GPL(genpd_dev_pm_attach);
 
@@ -2359,14 +2361,14 @@ struct device *genpd_dev_pm_attach_by_id(struct device *dev,
        }
 
        /* Try to attach the device to the PM domain at the specified index. */
-       ret = __genpd_dev_pm_attach(genpd_dev, dev->of_node, index);
+       ret = __genpd_dev_pm_attach(genpd_dev, dev->of_node, index, false);
        if (ret < 1) {
                device_unregister(genpd_dev);
                return ret ? ERR_PTR(ret) : NULL;
        }
 
-       pm_runtime_set_active(genpd_dev);
        pm_runtime_enable(genpd_dev);
+       genpd_queue_power_off_work(dev_to_genpd(genpd_dev));
 
        return genpd_dev;
 }
@@ -2487,10 +2489,9 @@ EXPORT_SYMBOL_GPL(of_genpd_parse_idle_states);
  * power domain corresponding to a DT node's "required-opps" property.
  *
  * @dev: Device for which the performance-state needs to be found.
- * @opp_node: DT node where the "required-opps" property is present. This can be
+ * @np: DT node where the "required-opps" property is present. This can be
  *     the device node itself (if it doesn't have an OPP table) or a node
  *     within the OPP table of a device (if device has an OPP table).
- * @state: Pointer to return performance state.
  *
  * Returns performance state corresponding to the "required-opps" property of
  * a DT node. This calls platform specific genpd->opp_to_performance_state()
@@ -2499,7 +2500,7 @@ EXPORT_SYMBOL_GPL(of_genpd_parse_idle_states);
  * Returns performance state on success and 0 on failure.
  */
 unsigned int of_genpd_opp_to_performance_state(struct device *dev,
-                                              struct device_node *opp_node)
+                                              struct device_node *np)
 {
        struct generic_pm_domain *genpd;
        struct dev_pm_opp *opp;
@@ -2514,7 +2515,7 @@ unsigned int of_genpd_opp_to_performance_state(struct device *dev,
 
        genpd_lock(genpd);
 
-       opp = of_dev_pm_opp_find_required_opp(&genpd->dev, opp_node);
+       opp = of_dev_pm_opp_find_required_opp(&genpd->dev, np);
        if (IS_ERR(opp)) {
                dev_err(dev, "Failed to find required OPP: %ld\n",
                        PTR_ERR(opp));
index a47e4987ee467ed04578b47499c29def872525f6..d146fedc38bb26535e3960963058a9635e5d7f7b 100644 (file)
@@ -1244,8 +1244,8 @@ drbd_request_prepare(struct drbd_device *device, struct bio *bio, unsigned long
        _drbd_start_io_acct(device, req);
 
        /* process discards always from our submitter thread */
-       if ((bio_op(bio) & REQ_OP_WRITE_ZEROES) ||
-           (bio_op(bio) & REQ_OP_DISCARD))
+       if (bio_op(bio) == REQ_OP_WRITE_ZEROES ||
+           bio_op(bio) == REQ_OP_DISCARD)
                goto queue_for_submitter_thread;
 
        if (rw == WRITE && req->private_bio && req->i.size
index 1476cb3439f46e53a8f42a9397fb6b19afd8ff95..5e793dd7adfbd096239f4d0994d2f20e24b2b596 100644 (file)
@@ -282,8 +282,8 @@ void drbd_request_endio(struct bio *bio)
                what = COMPLETED_OK;
        }
 
-       bio_put(req->private_bio);
        req->private_bio = ERR_PTR(blk_status_to_errno(bio->bi_status));
+       bio_put(bio);
 
        /* not req_mod(), we need irqsave here! */
        spin_lock_irqsave(&device->resource->req_lock, flags);
index d6b6f434fd4bb7652faf597ef9ab6c7b6dd7c362..4cb1d1be3cfbc9c14a6129ecdc2de5368ac668c4 100644 (file)
@@ -1613,6 +1613,7 @@ static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode,
                arg = (unsigned long) compat_ptr(arg);
        case LOOP_SET_FD:
        case LOOP_CHANGE_FD:
+       case LOOP_SET_BLOCK_SIZE:
                err = lo_ioctl(bdev, mode, cmd, arg);
                break;
        default:
index 3b7083b8ecbb3b0ffcad0d2879954780075333ee..3fb95c8d9fd83567496d77e1e4ade83975658401 100644 (file)
@@ -76,6 +76,7 @@ struct link_dead_args {
 #define NBD_HAS_CONFIG_REF             4
 #define NBD_BOUND                      5
 #define NBD_DESTROY_ON_DISCONNECT      6
+#define NBD_DISCONNECT_ON_CLOSE        7
 
 struct nbd_config {
        u32 flags;
@@ -111,12 +112,16 @@ struct nbd_device {
        struct task_struct *task_setup;
 };
 
+#define NBD_CMD_REQUEUED       1
+
 struct nbd_cmd {
        struct nbd_device *nbd;
+       struct mutex lock;
        int index;
        int cookie;
-       struct completion send_complete;
        blk_status_t status;
+       unsigned long flags;
+       u32 cmd_cookie;
 };
 
 #if IS_ENABLED(CONFIG_DEBUG_FS)
@@ -138,12 +143,42 @@ static void nbd_config_put(struct nbd_device *nbd);
 static void nbd_connect_reply(struct genl_info *info, int index);
 static int nbd_genl_status(struct sk_buff *skb, struct genl_info *info);
 static void nbd_dead_link_work(struct work_struct *work);
+static void nbd_disconnect_and_put(struct nbd_device *nbd);
 
 static inline struct device *nbd_to_dev(struct nbd_device *nbd)
 {
        return disk_to_dev(nbd->disk);
 }
 
+static void nbd_requeue_cmd(struct nbd_cmd *cmd)
+{
+       struct request *req = blk_mq_rq_from_pdu(cmd);
+
+       if (!test_and_set_bit(NBD_CMD_REQUEUED, &cmd->flags))
+               blk_mq_requeue_request(req, true);
+}
+
+#define NBD_COOKIE_BITS 32
+
+static u64 nbd_cmd_handle(struct nbd_cmd *cmd)
+{
+       struct request *req = blk_mq_rq_from_pdu(cmd);
+       u32 tag = blk_mq_unique_tag(req);
+       u64 cookie = cmd->cmd_cookie;
+
+       return (cookie << NBD_COOKIE_BITS) | tag;
+}
+
+static u32 nbd_handle_to_tag(u64 handle)
+{
+       return (u32)handle;
+}
+
+static u32 nbd_handle_to_cookie(u64 handle)
+{
+       return (u32)(handle >> NBD_COOKIE_BITS);
+}
+
 static const char *nbdcmd_to_ascii(int cmd)
 {
        switch (cmd) {
@@ -317,6 +352,9 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
        }
        config = nbd->config;
 
+       if (!mutex_trylock(&cmd->lock))
+               return BLK_EH_RESET_TIMER;
+
        if (config->num_connections > 1) {
                dev_err_ratelimited(nbd_to_dev(nbd),
                                    "Connection timed out, retrying (%d/%d alive)\n",
@@ -341,7 +379,8 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
                                        nbd_mark_nsock_dead(nbd, nsock, 1);
                                mutex_unlock(&nsock->tx_lock);
                        }
-                       blk_mq_requeue_request(req, true);
+                       mutex_unlock(&cmd->lock);
+                       nbd_requeue_cmd(cmd);
                        nbd_config_put(nbd);
                        return BLK_EH_DONE;
                }
@@ -351,6 +390,7 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
        }
        set_bit(NBD_TIMEDOUT, &config->runtime_flags);
        cmd->status = BLK_STS_IOERR;
+       mutex_unlock(&cmd->lock);
        sock_shutdown(nbd);
        nbd_config_put(nbd);
 done:
@@ -428,9 +468,9 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
        struct iov_iter from;
        unsigned long size = blk_rq_bytes(req);
        struct bio *bio;
+       u64 handle;
        u32 type;
        u32 nbd_cmd_flags = 0;
-       u32 tag = blk_mq_unique_tag(req);
        int sent = nsock->sent, skip = 0;
 
        iov_iter_kvec(&from, WRITE | ITER_KVEC, &iov, 1, sizeof(request));
@@ -472,6 +512,8 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
                        goto send_pages;
                }
                iov_iter_advance(&from, sent);
+       } else {
+               cmd->cmd_cookie++;
        }
        cmd->index = index;
        cmd->cookie = nsock->cookie;
@@ -480,7 +522,8 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
                request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9);
                request.len = htonl(size);
        }
-       memcpy(request.handle, &tag, sizeof(tag));
+       handle = nbd_cmd_handle(cmd);
+       memcpy(request.handle, &handle, sizeof(handle));
 
        dev_dbg(nbd_to_dev(nbd), "request %p: sending control (%s@%llu,%uB)\n",
                req, nbdcmd_to_ascii(type),
@@ -498,6 +541,7 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
                                nsock->pending = req;
                                nsock->sent = sent;
                        }
+                       set_bit(NBD_CMD_REQUEUED, &cmd->flags);
                        return BLK_STS_RESOURCE;
                }
                dev_err_ratelimited(disk_to_dev(nbd->disk),
@@ -539,6 +583,7 @@ send_pages:
                                         */
                                        nsock->pending = req;
                                        nsock->sent = sent;
+                                       set_bit(NBD_CMD_REQUEUED, &cmd->flags);
                                        return BLK_STS_RESOURCE;
                                }
                                dev_err(disk_to_dev(nbd->disk),
@@ -571,10 +616,12 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
        struct nbd_reply reply;
        struct nbd_cmd *cmd;
        struct request *req = NULL;
+       u64 handle;
        u16 hwq;
        u32 tag;
        struct kvec iov = {.iov_base = &reply, .iov_len = sizeof(reply)};
        struct iov_iter to;
+       int ret = 0;
 
        reply.magic = 0;
        iov_iter_kvec(&to, READ | ITER_KVEC, &iov, 1, sizeof(reply));
@@ -592,8 +639,8 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
                return ERR_PTR(-EPROTO);
        }
 
-       memcpy(&tag, reply.handle, sizeof(u32));
-
+       memcpy(&handle, reply.handle, sizeof(handle));
+       tag = nbd_handle_to_tag(handle);
        hwq = blk_mq_unique_tag_to_hwq(tag);
        if (hwq < nbd->tag_set.nr_hw_queues)
                req = blk_mq_tag_to_rq(nbd->tag_set.tags[hwq],
@@ -604,11 +651,25 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
                return ERR_PTR(-ENOENT);
        }
        cmd = blk_mq_rq_to_pdu(req);
+
+       mutex_lock(&cmd->lock);
+       if (cmd->cmd_cookie != nbd_handle_to_cookie(handle)) {
+               dev_err(disk_to_dev(nbd->disk), "Double reply on req %p, cmd_cookie %u, handle cookie %u\n",
+                       req, cmd->cmd_cookie, nbd_handle_to_cookie(handle));
+               ret = -ENOENT;
+               goto out;
+       }
+       if (test_bit(NBD_CMD_REQUEUED, &cmd->flags)) {
+               dev_err(disk_to_dev(nbd->disk), "Raced with timeout on req %p\n",
+                       req);
+               ret = -ENOENT;
+               goto out;
+       }
        if (ntohl(reply.error)) {
                dev_err(disk_to_dev(nbd->disk), "Other side returned error (%d)\n",
                        ntohl(reply.error));
                cmd->status = BLK_STS_IOERR;
-               return cmd;
+               goto out;
        }
 
        dev_dbg(nbd_to_dev(nbd), "request %p: got reply\n", req);
@@ -633,18 +694,18 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
                                if (nbd_disconnected(config) ||
                                    config->num_connections <= 1) {
                                        cmd->status = BLK_STS_IOERR;
-                                       return cmd;
+                                       goto out;
                                }
-                               return ERR_PTR(-EIO);
+                               ret = -EIO;
+                               goto out;
                        }
                        dev_dbg(nbd_to_dev(nbd), "request %p: got %d bytes data\n",
                                req, bvec.bv_len);
                }
-       } else {
-               /* See the comment in nbd_queue_rq. */
-               wait_for_completion(&cmd->send_complete);
        }
-       return cmd;
+out:
+       mutex_unlock(&cmd->lock);
+       return ret ? ERR_PTR(ret) : cmd;
 }
 
 static void recv_work(struct work_struct *work)
@@ -803,7 +864,7 @@ again:
         */
        blk_mq_start_request(req);
        if (unlikely(nsock->pending && nsock->pending != req)) {
-               blk_mq_requeue_request(req, true);
+               nbd_requeue_cmd(cmd);
                ret = 0;
                goto out;
        }
@@ -816,7 +877,7 @@ again:
                dev_err_ratelimited(disk_to_dev(nbd->disk),
                                    "Request send failed, requeueing\n");
                nbd_mark_nsock_dead(nbd, nsock, 1);
-               blk_mq_requeue_request(req, true);
+               nbd_requeue_cmd(cmd);
                ret = 0;
        }
 out:
@@ -840,7 +901,8 @@ static blk_status_t nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
         * that the server is misbehaving (or there was an error) before we're
         * done sending everything over the wire.
         */
-       init_completion(&cmd->send_complete);
+       mutex_lock(&cmd->lock);
+       clear_bit(NBD_CMD_REQUEUED, &cmd->flags);
 
        /* We can be called directly from the user space process, which means we
         * could possibly have signals pending so our sendmsg will fail.  In
@@ -852,7 +914,7 @@ static blk_status_t nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
                ret = BLK_STS_IOERR;
        else if (!ret)
                ret = BLK_STS_OK;
-       complete(&cmd->send_complete);
+       mutex_unlock(&cmd->lock);
 
        return ret;
 }
@@ -1305,6 +1367,12 @@ out:
 static void nbd_release(struct gendisk *disk, fmode_t mode)
 {
        struct nbd_device *nbd = disk->private_data;
+       struct block_device *bdev = bdget_disk(disk, 0);
+
+       if (test_bit(NBD_DISCONNECT_ON_CLOSE, &nbd->config->runtime_flags) &&
+                       bdev->bd_openers == 0)
+               nbd_disconnect_and_put(nbd);
+
        nbd_config_put(nbd);
        nbd_put(nbd);
 }
@@ -1452,6 +1520,8 @@ static int nbd_init_request(struct blk_mq_tag_set *set, struct request *rq,
 {
        struct nbd_cmd *cmd = blk_mq_rq_to_pdu(rq);
        cmd->nbd = set->driver_data;
+       cmd->flags = 0;
+       mutex_init(&cmd->lock);
        return 0;
 }
 
@@ -1705,6 +1775,10 @@ again:
                                &config->runtime_flags);
                        put_dev = true;
                }
+               if (flags & NBD_CFLAG_DISCONNECT_ON_CLOSE) {
+                       set_bit(NBD_DISCONNECT_ON_CLOSE,
+                               &config->runtime_flags);
+               }
        }
 
        if (info->attrs[NBD_ATTR_SOCKETS]) {
@@ -1749,6 +1823,17 @@ out:
        return ret;
 }
 
+static void nbd_disconnect_and_put(struct nbd_device *nbd)
+{
+       mutex_lock(&nbd->config_lock);
+       nbd_disconnect(nbd);
+       nbd_clear_sock(nbd);
+       mutex_unlock(&nbd->config_lock);
+       if (test_and_clear_bit(NBD_HAS_CONFIG_REF,
+                              &nbd->config->runtime_flags))
+               nbd_config_put(nbd);
+}
+
 static int nbd_genl_disconnect(struct sk_buff *skb, struct genl_info *info)
 {
        struct nbd_device *nbd;
@@ -1781,13 +1866,7 @@ static int nbd_genl_disconnect(struct sk_buff *skb, struct genl_info *info)
                nbd_put(nbd);
                return 0;
        }
-       mutex_lock(&nbd->config_lock);
-       nbd_disconnect(nbd);
-       nbd_clear_sock(nbd);
-       mutex_unlock(&nbd->config_lock);
-       if (test_and_clear_bit(NBD_HAS_CONFIG_REF,
-                              &nbd->config->runtime_flags))
-               nbd_config_put(nbd);
+       nbd_disconnect_and_put(nbd);
        nbd_config_put(nbd);
        nbd_put(nbd);
        return 0;
@@ -1798,7 +1877,7 @@ static int nbd_genl_reconfigure(struct sk_buff *skb, struct genl_info *info)
        struct nbd_device *nbd = NULL;
        struct nbd_config *config;
        int index;
-       int ret = -EINVAL;
+       int ret = 0;
        bool put_dev = false;
 
        if (!netlink_capable(skb, CAP_SYS_ADMIN))
@@ -1838,6 +1917,7 @@ static int nbd_genl_reconfigure(struct sk_buff *skb, struct genl_info *info)
            !nbd->task_recv) {
                dev_err(nbd_to_dev(nbd),
                        "not configured, cannot reconfigure\n");
+               ret = -EINVAL;
                goto out;
        }
 
@@ -1862,6 +1942,14 @@ static int nbd_genl_reconfigure(struct sk_buff *skb, struct genl_info *info)
                                               &config->runtime_flags))
                                refcount_inc(&nbd->refs);
                }
+
+               if (flags & NBD_CFLAG_DISCONNECT_ON_CLOSE) {
+                       set_bit(NBD_DISCONNECT_ON_CLOSE,
+                                       &config->runtime_flags);
+               } else {
+                       clear_bit(NBD_DISCONNECT_ON_CLOSE,
+                                       &config->runtime_flags);
+               }
        }
 
        if (info->attrs[NBD_ATTR_SOCKETS]) {
index 7948049f6c4321b02e1611383dae1be86a7748f1..042c778e5a4e0bf2009c38a6b1cf37bc5d23ce89 100644 (file)
@@ -1365,7 +1365,7 @@ static blk_qc_t null_queue_bio(struct request_queue *q, struct bio *bio)
 static enum blk_eh_timer_return null_rq_timed_out_fn(struct request *rq)
 {
        pr_info("null: rq %p timed out\n", rq);
-       blk_mq_complete_request(rq);
+       __blk_complete_request(rq);
        return BLK_EH_DONE;
 }
 
index fa0729c1e776e21834f02caa2d42ac7867c5ad36..d81c653b9bf61587db1684305dc296a85d05818a 100644 (file)
@@ -61,7 +61,7 @@ static int atomic_inc_return_safe(atomic_t *v)
 {
        unsigned int counter;
 
-       counter = (unsigned int)__atomic_add_unless(v, 1, 0);
+       counter = (unsigned int)atomic_fetch_add_unless(v, 1, 0);
        if (counter <= (unsigned int)INT_MAX)
                return (int)counter;
 
index 7436b2d27fa38513602207c6b4bc9213c97af436..a390c6d4f72df976e0880150207fef26acc3db97 100644 (file)
@@ -298,7 +298,8 @@ static void reset_bdev(struct zram *zram)
        zram->backing_dev = NULL;
        zram->old_block_size = 0;
        zram->bdev = NULL;
-
+       zram->disk->queue->backing_dev_info->capabilities |=
+                               BDI_CAP_SYNCHRONOUS_IO;
        kvfree(zram->bitmap);
        zram->bitmap = NULL;
 }
@@ -400,6 +401,18 @@ static ssize_t backing_dev_store(struct device *dev,
        zram->backing_dev = backing_dev;
        zram->bitmap = bitmap;
        zram->nr_pages = nr_pages;
+       /*
+        * With writeback feature, zram does asynchronous IO so it's no longer
+        * synchronous device so let's remove synchronous io flag. Othewise,
+        * upper layer(e.g., swap) could wait IO completion rather than
+        * (submit and return), which will cause system sluggish.
+        * Furthermore, when the IO function returns(e.g., swap_readpage),
+        * upper layer expects IO was done so it could deallocate the page
+        * freely but in fact, IO is going on so finally could cause
+        * use-after-free when the IO is really done.
+        */
+       zram->disk->queue->backing_dev_info->capabilities &=
+                       ~BDI_CAP_SYNCHRONOUS_IO;
        up_write(&zram->init_lock);
 
        pr_info("setup backing device %s\n", file_name);
index 14d159e2042d5c488c1e23b3247508aab0a2ebff..2dc33e65d2d0c957199f1e3c1bf8028d4e09ca88 100644 (file)
@@ -29,7 +29,7 @@
 #include <linux/slab.h>
 #include <linux/string.h>
 #include <linux/types.h>
-#include <linux/unaligned/le_struct.h>
+#include <asm/unaligned.h>
 #include <net/bluetooth/bluetooth.h>
 #include <net/bluetooth/hci_core.h>
 
index 1cc29629d23807b83bfee9f7d23fec1d5757941d..80d60f43db56123076fed5ea2047da35af88af8f 100644 (file)
@@ -169,9 +169,9 @@ static int sysc_get_clocks(struct sysc *ddata)
        const char *name;
        int nr_fck = 0, nr_ick = 0, i, error = 0;
 
-       ddata->clock_roles = devm_kzalloc(ddata->dev,
-                                         sizeof(*ddata->clock_roles) *
+       ddata->clock_roles = devm_kcalloc(ddata->dev,
                                          SYSC_MAX_CLOCKS,
+                                         sizeof(*ddata->clock_roles),
                                          GFP_KERNEL);
        if (!ddata->clock_roles)
                return -ENOMEM;
@@ -200,8 +200,8 @@ static int sysc_get_clocks(struct sysc *ddata)
                return -EINVAL;
        }
 
-       ddata->clocks = devm_kzalloc(ddata->dev,
-                                    sizeof(*ddata->clocks) * ddata->nr_clocks,
+       ddata->clocks = devm_kcalloc(ddata->dev,
+                                    ddata->nr_clocks, sizeof(*ddata->clocks),
                                     GFP_KERNEL);
        if (!ddata->clocks)
                return -ENOMEM;
index 53fe633df1e8d9c1187e862b6a305a1905bbc2bb..c9bf2c219841846570c6cffefe8e2e4c59583997 100644 (file)
@@ -11,7 +11,7 @@
 
 #include "agp.h"
 
-static int alpha_core_agp_vm_fault(struct vm_fault *vmf)
+static vm_fault_t alpha_core_agp_vm_fault(struct vm_fault *vmf)
 {
        alpha_agp_info *agp = agp_bridge->dev_private_data;
        dma_addr_t dma_addr;
index e50c29c97ca74d20542a176387d3de8ee4780b79..c69e39fdd02b8c5c9a35931271c45383f4b18da3 100644 (file)
@@ -156,7 +156,7 @@ static u64 amd64_configure(struct pci_dev *hammer, u64 gatt_table)
 
        /* Address to map to */
        pci_read_config_dword(hammer, AMD64_GARTAPERTUREBASE, &tmp);
-       aperturebase = tmp << 25;
+       aperturebase = (u64)tmp << 25;
        aper_base = (aperturebase & PCI_BASE_ADDRESS_MEM_MASK);
 
        enable_gart_translation(hammer, gatt_table);
@@ -277,7 +277,7 @@ static int fix_northbridge(struct pci_dev *nb, struct pci_dev *agp, u16 cap)
        pci_read_config_dword(nb, AMD64_GARTAPERTURECTL, &nb_order);
        nb_order = (nb_order >> 1) & 7;
        pci_read_config_dword(nb, AMD64_GARTAPERTUREBASE, &nb_base);
-       nb_aper = nb_base << 25;
+       nb_aper = (u64)nb_base << 25;
 
        /* Northbridge seems to contain crap. Try the AGP bridge. */
 
index 91bb98c42a1ca76376ae0db4b43fd7a89fca27b0..aaf9e5afaad435e2342a15fc963aa91367079957 100644 (file)
@@ -516,11 +516,18 @@ EXPORT_SYMBOL_GPL(hwrng_register);
 
 void hwrng_unregister(struct hwrng *rng)
 {
+       int err;
+
        mutex_lock(&rng_mutex);
 
        list_del(&rng->list);
-       if (current_rng == rng)
-               enable_best_rng();
+       if (current_rng == rng) {
+               err = enable_best_rng();
+               if (err) {
+                       drop_current_rng();
+                       cur_rng_set_by_user = 0;
+               }
+       }
 
        if (list_empty(&rng_list)) {
                mutex_unlock(&rng_mutex);
index ad353be871bf005c6c0ca875d663bed49906c302..90ec010bffbd9776c012586b4e01b24cdd0bd2d6 100644 (file)
@@ -2088,8 +2088,10 @@ static int try_smi_init(struct smi_info *new_smi)
        return 0;
 
 out_err:
-       ipmi_unregister_smi(new_smi->intf);
-       new_smi->intf = NULL;
+       if (new_smi->intf) {
+               ipmi_unregister_smi(new_smi->intf);
+               new_smi->intf = NULL;
+       }
 
        kfree(init_name);
 
index fbfc05e3f3d1756a58455dbdbf73c90162898f4c..bb882ab161fe1bbb4b678cc9bf105b77273296e3 100644 (file)
@@ -210,34 +210,23 @@ static void kcs_bmc_handle_cmd(struct kcs_bmc *kcs_bmc)
 int kcs_bmc_handle_event(struct kcs_bmc *kcs_bmc)
 {
        unsigned long flags;
-       int ret = 0;
+       int ret = -ENODATA;
        u8 status;
 
        spin_lock_irqsave(&kcs_bmc->lock, flags);
 
-       if (!kcs_bmc->running) {
-               kcs_force_abort(kcs_bmc);
-               ret = -ENODEV;
-               goto out_unlock;
-       }
-
-       status = read_status(kcs_bmc) & (KCS_STATUS_IBF | KCS_STATUS_CMD_DAT);
-
-       switch (status) {
-       case KCS_STATUS_IBF | KCS_STATUS_CMD_DAT:
-               kcs_bmc_handle_cmd(kcs_bmc);
-               break;
-
-       case KCS_STATUS_IBF:
-               kcs_bmc_handle_data(kcs_bmc);
-               break;
+       status = read_status(kcs_bmc);
+       if (status & KCS_STATUS_IBF) {
+               if (!kcs_bmc->running)
+                       kcs_force_abort(kcs_bmc);
+               else if (status & KCS_STATUS_CMD_DAT)
+                       kcs_bmc_handle_cmd(kcs_bmc);
+               else
+                       kcs_bmc_handle_data(kcs_bmc);
 
-       default:
-               ret = -ENODATA;
-               break;
+               ret = 0;
        }
 
-out_unlock:
        spin_unlock_irqrestore(&kcs_bmc->lock, flags);
 
        return ret;
index ffeb60d3434c5150650d1ef4c9a3aec3d8ffd59e..df66a9dd0aae3c5ef27a4b92410d347eb8f82a3a 100644 (file)
@@ -708,6 +708,7 @@ static int mmap_zero(struct file *file, struct vm_area_struct *vma)
 #endif
        if (vma->vm_flags & VM_SHARED)
                return shmem_zero_setup(vma);
+       vma_set_anonymous(vma);
        return 0;
 }
 
index a8fb0020ba5ccfb9f4b72b689544299815fab60a..bd449ad524423c92584489b49496dfede65c52ae 100644 (file)
@@ -402,7 +402,8 @@ static struct poolinfo {
 /*
  * Static global variables
  */
-static DECLARE_WAIT_QUEUE_HEAD(random_wait);
+static DECLARE_WAIT_QUEUE_HEAD(random_read_wait);
+static DECLARE_WAIT_QUEUE_HEAD(random_write_wait);
 static struct fasync_struct *fasync;
 
 static DEFINE_SPINLOCK(random_ready_list_lock);
@@ -721,8 +722,8 @@ retry:
 
                /* should we wake readers? */
                if (entropy_bits >= random_read_wakeup_bits &&
-                   wq_has_sleeper(&random_wait)) {
-                       wake_up_interruptible_poll(&random_wait, POLLIN);
+                   wq_has_sleeper(&random_read_wait)) {
+                       wake_up_interruptible(&random_read_wait);
                        kill_fasync(&fasync, SIGIO, POLL_IN);
                }
                /* If the input pool is getting full, send some
@@ -1396,7 +1397,7 @@ retry:
        trace_debit_entropy(r->name, 8 * ibytes);
        if (ibytes &&
            (r->entropy_count >> ENTROPY_SHIFT) < random_write_wakeup_bits) {
-               wake_up_interruptible_poll(&random_wait, POLLOUT);
+               wake_up_interruptible(&random_write_wait);
                kill_fasync(&fasync, SIGIO, POLL_OUT);
        }
 
@@ -1838,7 +1839,7 @@ _random_read(int nonblock, char __user *buf, size_t nbytes)
                if (nonblock)
                        return -EAGAIN;
 
-               wait_event_interruptible(random_wait,
+               wait_event_interruptible(random_read_wait,
                        ENTROPY_BITS(&input_pool) >=
                        random_read_wakeup_bits);
                if (signal_pending(current))
@@ -1875,17 +1876,14 @@ urandom_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
        return ret;
 }
 
-static struct wait_queue_head *
-random_get_poll_head(struct file *file, __poll_t events)
-{
-       return &random_wait;
-}
-
 static __poll_t
-random_poll_mask(struct file *file, __poll_t events)
+random_poll(struct file *file, poll_table * wait)
 {
-       __poll_t mask = 0;
+       __poll_t mask;
 
+       poll_wait(file, &random_read_wait, wait);
+       poll_wait(file, &random_write_wait, wait);
+       mask = 0;
        if (ENTROPY_BITS(&input_pool) >= random_read_wakeup_bits)
                mask |= EPOLLIN | EPOLLRDNORM;
        if (ENTROPY_BITS(&input_pool) < random_write_wakeup_bits)
@@ -1897,14 +1895,22 @@ static int
 write_pool(struct entropy_store *r, const char __user *buffer, size_t count)
 {
        size_t bytes;
-       __u32 buf[16];
+       __u32 t, buf[16];
        const char __user *p = buffer;
 
        while (count > 0) {
+               int b, i = 0;
+
                bytes = min(count, sizeof(buf));
                if (copy_from_user(&buf, p, bytes))
                        return -EFAULT;
 
+               for (b = bytes ; b > 0 ; b -= sizeof(__u32), i++) {
+                       if (!arch_get_random_int(&t))
+                               break;
+                       buf[i] ^= t;
+               }
+
                count -= bytes;
                p += bytes;
 
@@ -1992,8 +1998,7 @@ static int random_fasync(int fd, struct file *filp, int on)
 const struct file_operations random_fops = {
        .read  = random_read,
        .write = random_write,
-       .get_poll_head  = random_get_poll_head,
-       .poll_mask  = random_poll_mask,
+       .poll  = random_poll,
        .unlocked_ioctl = random_ioctl,
        .fasync = random_fasync,
        .llseek = noop_llseek,
@@ -2326,7 +2331,7 @@ void add_hwgenerator_randomness(const char *buffer, size_t count,
         * We'll be woken up again once below random_write_wakeup_thresh,
         * or when the calling thread is about to terminate.
         */
-       wait_event_interruptible(random_wait, kthread_should_stop() ||
+       wait_event_interruptible(random_write_wait, kthread_should_stop() ||
                        ENTROPY_BITS(&input_pool) <= random_write_wakeup_bits);
        mix_pool_bytes(poolp, buffer, count);
        credit_entropy_bits(poolp, entropy);
index ae40cbe770f059d0ca1aded1dc7fc3530b3153df..0bb25dd009d18467c7ae6f5238e1a88c24844c7b 100644 (file)
@@ -96,7 +96,7 @@ obj-$(CONFIG_ARCH_SPRD)                       += sprd/
 obj-$(CONFIG_ARCH_STI)                 += st/
 obj-$(CONFIG_ARCH_STRATIX10)           += socfpga/
 obj-$(CONFIG_ARCH_SUNXI)               += sunxi/
-obj-$(CONFIG_ARCH_SUNXI)               += sunxi-ng/
+obj-$(CONFIG_SUNXI_CCU)                        += sunxi-ng/
 obj-$(CONFIG_ARCH_TEGRA)               += tegra/
 obj-y                                  += ti/
 obj-$(CONFIG_CLK_UNIPHIER)             += uniphier/
index 38b366b00c571eda38e7faf7febc751bbeb212ad..7b70a074095df9e3871677e51981928248a7192a 100644 (file)
@@ -24,7 +24,7 @@
 #define ASPEED_MPLL_PARAM      0x20
 #define ASPEED_HPLL_PARAM      0x24
 #define  AST2500_HPLL_BYPASS_EN        BIT(20)
-#define  AST2400_HPLL_STRAPPED BIT(18)
+#define  AST2400_HPLL_PROGRAMMED BIT(18)
 #define  AST2400_HPLL_BYPASS_EN        BIT(17)
 #define ASPEED_MISC_CTRL       0x2c
 #define  UART_DIV13_EN         BIT(12)
@@ -91,8 +91,8 @@ static const struct aspeed_gate_data aspeed_gates[] = {
        [ASPEED_CLK_GATE_GCLK] =        {  1,  7, "gclk-gate",          NULL,   0 }, /* 2D engine */
        [ASPEED_CLK_GATE_MCLK] =        {  2, -1, "mclk-gate",          "mpll", CLK_IS_CRITICAL }, /* SDRAM */
        [ASPEED_CLK_GATE_VCLK] =        {  3,  6, "vclk-gate",          NULL,   0 }, /* Video Capture */
-       [ASPEED_CLK_GATE_BCLK] =        {  4,  8, "bclk-gate",          "bclk", 0 }, /* PCIe/PCI */
-       [ASPEED_CLK_GATE_DCLK] =        {  5, -1, "dclk-gate",          NULL,   0 }, /* DAC */
+       [ASPEED_CLK_GATE_BCLK] =        {  4,  8, "bclk-gate",          "bclk", CLK_IS_CRITICAL }, /* PCIe/PCI */
+       [ASPEED_CLK_GATE_DCLK] =        {  5, -1, "dclk-gate",          NULL,   CLK_IS_CRITICAL }, /* DAC */
        [ASPEED_CLK_GATE_REFCLK] =      {  6, -1, "refclk-gate",        "clkin", CLK_IS_CRITICAL },
        [ASPEED_CLK_GATE_USBPORT2CLK] = {  7,  3, "usb-port2-gate",     NULL,   0 }, /* USB2.0 Host port 2 */
        [ASPEED_CLK_GATE_LCLK] =        {  8,  5, "lclk-gate",          NULL,   0 }, /* LPC */
@@ -212,9 +212,22 @@ static int aspeed_clk_is_enabled(struct clk_hw *hw)
 {
        struct aspeed_clk_gate *gate = to_aspeed_clk_gate(hw);
        u32 clk = BIT(gate->clock_idx);
+       u32 rst = BIT(gate->reset_idx);
        u32 enval = (gate->flags & CLK_GATE_SET_TO_DISABLE) ? 0 : clk;
        u32 reg;
 
+       /*
+        * If the IP is in reset, treat the clock as not enabled,
+        * this happens with some clocks such as the USB one when
+        * coming from cold reset. Without this, aspeed_clk_enable()
+        * will fail to lift the reset.
+        */
+       if (gate->reset_idx >= 0) {
+               regmap_read(gate->map, ASPEED_RESET_CTRL, &reg);
+               if (reg & rst)
+                       return 0;
+       }
+
        regmap_read(gate->map, ASPEED_CLK_STOP_CTRL, &reg);
 
        return ((reg & clk) == enval) ? 1 : 0;
@@ -565,29 +578,45 @@ builtin_platform_driver(aspeed_clk_driver);
 static void __init aspeed_ast2400_cc(struct regmap *map)
 {
        struct clk_hw *hw;
-       u32 val, freq, div;
+       u32 val, div, clkin, hpll;
+       const u16 hpll_rates[][4] = {
+               {384, 360, 336, 408},
+               {400, 375, 350, 425},
+       };
+       int rate;
 
        /*
         * CLKIN is the crystal oscillator, 24, 48 or 25MHz selected by
         * strapping
         */
        regmap_read(map, ASPEED_STRAP, &val);
-       if (val & CLKIN_25MHZ_EN)
-               freq = 25000000;
-       else if (val & AST2400_CLK_SOURCE_SEL)
-               freq = 48000000;
-       else
-               freq = 24000000;
-       hw = clk_hw_register_fixed_rate(NULL, "clkin", NULL, 0, freq);
-       pr_debug("clkin @%u MHz\n", freq / 1000000);
+       rate = (val >> 8) & 3;
+       if (val & CLKIN_25MHZ_EN) {
+               clkin = 25000000;
+               hpll = hpll_rates[1][rate];
+       } else if (val & AST2400_CLK_SOURCE_SEL) {
+               clkin = 48000000;
+               hpll = hpll_rates[0][rate];
+       } else {
+               clkin = 24000000;
+               hpll = hpll_rates[0][rate];
+       }
+       hw = clk_hw_register_fixed_rate(NULL, "clkin", NULL, 0, clkin);
+       pr_debug("clkin @%u MHz\n", clkin / 1000000);
 
        /*
         * High-speed PLL clock derived from the crystal. This the CPU clock,
-        * and we assume that it is enabled
+        * and we assume that it is enabled. It can be configured through the
+        * HPLL_PARAM register, or set to a specified frequency by strapping.
         */
        regmap_read(map, ASPEED_HPLL_PARAM, &val);
-       WARN(val & AST2400_HPLL_STRAPPED, "hpll is strapped not configured");
-       aspeed_clk_data->hws[ASPEED_CLK_HPLL] = aspeed_ast2400_calc_pll("hpll", val);
+       if (val & AST2400_HPLL_PROGRAMMED)
+               hw = aspeed_ast2400_calc_pll("hpll", val);
+       else
+               hw = clk_hw_register_fixed_rate(NULL, "hpll", "clkin", 0,
+                               hpll * 1000000);
+
+       aspeed_clk_data->hws[ASPEED_CLK_HPLL] = hw;
 
        /*
         * Strap bits 11:10 define the CPU/AHB clock frequency ratio (aka HCLK)
index 9760b526ca31da90c4c288ac096cb0fee7d16cbf..e2ed078abd90c7b0e775a4e8e510ab46151c82ef 100644 (file)
@@ -24,7 +24,6 @@
 #include <linux/pm_runtime.h>
 #include <linux/sched.h>
 #include <linux/clkdev.h>
-#include <linux/stringify.h>
 
 #include "clk.h"
 
@@ -2559,7 +2558,7 @@ static const struct {
        unsigned long flag;
        const char *name;
 } clk_flags[] = {
-#define ENTRY(f) { f, __stringify(f) }
+#define ENTRY(f) { f, #f }
        ENTRY(CLK_SET_RATE_GATE),
        ENTRY(CLK_SET_PARENT_GATE),
        ENTRY(CLK_SET_RATE_PARENT),
index 7513411140b693ecdb1d4c825de69e0e9e679d15..9ab3db8b3988375d02b88c5e76f0843a9a6c475f 100644 (file)
@@ -35,9 +35,6 @@ static struct clk *__of_clk_get(struct device_node *np, int index,
        struct clk *clk;
        int rc;
 
-       if (index < 0)
-               return ERR_PTR(-EINVAL);
-
        rc = of_parse_phandle_with_args(np, "clocks", "#clock-cells", index,
                                        &clkspec);
        if (rc)
@@ -199,7 +196,7 @@ struct clk *clk_get(struct device *dev, const char *con_id)
        const char *dev_id = dev ? dev_name(dev) : NULL;
        struct clk *clk;
 
-       if (dev) {
+       if (dev && dev->of_node) {
                clk = __of_clk_get_by_name(dev->of_node, dev_id, con_id);
                if (!IS_ERR(clk) || PTR_ERR(clk) == -EPROBE_DEFER)
                        return clk;
index aae62a5b8734e859e76a16f995b05f43bf5f9b4a..d1bbee19ed0fcf74edfb2019fc4907ba00533a66 100644 (file)
@@ -672,7 +672,7 @@ static int of_da8xx_usb_phy_clk_init(struct device *dev, struct regmap *regmap)
 
        usb1 = da8xx_cfgchip_register_usb1_clk48(dev, regmap);
        if (IS_ERR(usb1)) {
-               if (PTR_ERR(usb0) == -EPROBE_DEFER)
+               if (PTR_ERR(usb1) == -EPROBE_DEFER)
                        return -EPROBE_DEFER;
 
                dev_warn(dev, "Failed to register usb1_clk48 (%ld)\n",
index 6a42529d31a91aa644d64c8708b45297ec361efe..cc5614567a70d61cf76aa6777caf9c2f39479c5d 100644 (file)
@@ -107,7 +107,7 @@ extern const struct davinci_psc_init_data of_da850_psc1_init_data;
 #ifdef CONFIG_ARCH_DAVINCI_DM355
 extern const struct davinci_psc_init_data dm355_psc_init_data;
 #endif
-#ifdef CONFIG_ARCH_DAVINCI_DM356
+#ifdef CONFIG_ARCH_DAVINCI_DM365
 extern const struct davinci_psc_init_data dm365_psc_init_data;
 #endif
 #ifdef CONFIG_ARCH_DAVINCI_DM644x
index 58f546e048073160e40bedf637894b189072b143..e4cf96ba704ed98ff2a277af75da8c7a3a5b8ee0 100644 (file)
@@ -51,7 +51,7 @@ static unsigned long audio_divider_recalc_rate(struct clk_hw *hw,
        struct meson_clk_audio_div_data *adiv = meson_clk_audio_div_data(clk);
        unsigned long divider;
 
-       divider = meson_parm_read(clk->map, &adiv->div);
+       divider = meson_parm_read(clk->map, &adiv->div) + 1;
 
        return DIV_ROUND_UP_ULL((u64)parent_rate, divider);
 }
index 240658404367f38670b79a2d40fcd1cdba92c67b..177fffb9ebefe4c5d059e149a912f3c14ecc3ea2 100644 (file)
@@ -498,6 +498,7 @@ static struct clk_regmap gxbb_fclk_div2 = {
                .ops = &clk_regmap_gate_ops,
                .parent_names = (const char *[]){ "fclk_div2_div" },
                .num_parents = 1,
+               .flags = CLK_IS_CRITICAL,
        },
 };
 
index 6860bd5a37c5e50e9be9e26c40981dea33d57dfc..44e4e27eddada1dd96a3a3d9ae414bd711a89d4d 100644 (file)
@@ -35,6 +35,7 @@
 #define CLK_SEL                0x10
 #define CLK_DIS                0x14
 
+#define  ARMADA_37XX_DVFS_LOAD_1 1
 #define LOAD_LEVEL_NR  4
 
 #define ARMADA_37XX_NB_L0L1    0x18
@@ -507,6 +508,40 @@ static long clk_pm_cpu_round_rate(struct clk_hw *hw, unsigned long rate,
        return -EINVAL;
 }
 
+/*
+ * Switching the CPU from the L2 or L3 frequencies (300 and 200 Mhz
+ * respectively) to L0 frequency (1.2 Ghz) requires a significant
+ * amount of time to let VDD stabilize to the appropriate
+ * voltage. This amount of time is large enough that it cannot be
+ * covered by the hardware countdown register. Due to this, the CPU
+ * might start operating at L0 before the voltage is stabilized,
+ * leading to CPU stalls.
+ *
+ * To work around this problem, we prevent switching directly from the
+ * L2/L3 frequencies to the L0 frequency, and instead switch to the L1
+ * frequency in-between. The sequence therefore becomes:
+ * 1. First switch from L2/L3(200/300MHz) to L1(600MHZ)
+ * 2. Sleep 20ms for stabling VDD voltage
+ * 3. Then switch from L1(600MHZ) to L0(1200Mhz).
+ */
+static void clk_pm_cpu_set_rate_wa(unsigned long rate, struct regmap *base)
+{
+       unsigned int cur_level;
+
+       if (rate != 1200 * 1000 * 1000)
+               return;
+
+       regmap_read(base, ARMADA_37XX_NB_CPU_LOAD, &cur_level);
+       cur_level &= ARMADA_37XX_NB_CPU_LOAD_MASK;
+       if (cur_level <= ARMADA_37XX_DVFS_LOAD_1)
+               return;
+
+       regmap_update_bits(base, ARMADA_37XX_NB_CPU_LOAD,
+                          ARMADA_37XX_NB_CPU_LOAD_MASK,
+                          ARMADA_37XX_DVFS_LOAD_1);
+       msleep(20);
+}
+
 static int clk_pm_cpu_set_rate(struct clk_hw *hw, unsigned long rate,
                               unsigned long parent_rate)
 {
@@ -537,6 +572,9 @@ static int clk_pm_cpu_set_rate(struct clk_hw *hw, unsigned long rate,
                         */
                        reg = ARMADA_37XX_NB_CPU_LOAD;
                        mask = ARMADA_37XX_NB_CPU_LOAD_MASK;
+
+                       clk_pm_cpu_set_rate_wa(rate, base);
+
                        regmap_update_bits(base, reg, mask, load_level);
 
                        return rate;
index 9f35b3fe1d9731f78fcd095cc544274a3c1c872a..ff8d66fd94e6198ac7abe91ce4f8e4dcc071897c 100644 (file)
@@ -2781,6 +2781,7 @@ static struct clk_branch gcc_ufs_rx_cfg_clk = {
 
 static struct clk_branch gcc_ufs_tx_symbol_0_clk = {
        .halt_reg = 0x75018,
+       .halt_check = BRANCH_HALT_SKIP,
        .clkr = {
                .enable_reg = 0x75018,
                .enable_mask = BIT(0),
index 1a25ee4f3658672bc8f4a4bc8c55c0492289ea61..4b20d1b67a1b77b0f6c735213c43e28a4dfa6dee 100644 (file)
@@ -2910,6 +2910,7 @@ static struct gdsc mmagic_bimc_gdsc = {
                .name = "mmagic_bimc",
        },
        .pwrsts = PWRSTS_OFF_ON,
+       .flags = ALWAYS_ON,
 };
 
 static struct gdsc mmagic_video_gdsc = {
index acaa14cfa25ca3922178e865ddb4daefc676ee2a..49454700f2e5c2e469cbdb037c54900f8c74cf79 100644 (file)
@@ -1,24 +1,24 @@
 # SPDX-License-Identifier: GPL-2.0
 # Common objects
-lib-$(CONFIG_SUNXI_CCU)                += ccu_common.o
-lib-$(CONFIG_SUNXI_CCU)                += ccu_mmc_timing.o
-lib-$(CONFIG_SUNXI_CCU)                += ccu_reset.o
+obj-y                          += ccu_common.o
+obj-y                          += ccu_mmc_timing.o
+obj-y                          += ccu_reset.o
 
 # Base clock types
-lib-$(CONFIG_SUNXI_CCU)                += ccu_div.o
-lib-$(CONFIG_SUNXI_CCU)                += ccu_frac.o
-lib-$(CONFIG_SUNXI_CCU)                += ccu_gate.o
-lib-$(CONFIG_SUNXI_CCU)                += ccu_mux.o
-lib-$(CONFIG_SUNXI_CCU)                += ccu_mult.o
-lib-$(CONFIG_SUNXI_CCU)                += ccu_phase.o
-lib-$(CONFIG_SUNXI_CCU)                += ccu_sdm.o
+obj-y                          += ccu_div.o
+obj-y                          += ccu_frac.o
+obj-y                          += ccu_gate.o
+obj-y                          += ccu_mux.o
+obj-y                          += ccu_mult.o
+obj-y                          += ccu_phase.o
+obj-y                          += ccu_sdm.o
 
 # Multi-factor clocks
-lib-$(CONFIG_SUNXI_CCU)                += ccu_nk.o
-lib-$(CONFIG_SUNXI_CCU)                += ccu_nkm.o
-lib-$(CONFIG_SUNXI_CCU)                += ccu_nkmp.o
-lib-$(CONFIG_SUNXI_CCU)                += ccu_nm.o
-lib-$(CONFIG_SUNXI_CCU)                += ccu_mp.o
+obj-y                          += ccu_nk.o
+obj-y                          += ccu_nkm.o
+obj-y                          += ccu_nkmp.o
+obj-y                          += ccu_nm.o
+obj-y                          += ccu_mp.o
 
 # SoC support
 obj-$(CONFIG_SUN50I_A64_CCU)   += ccu-sun50i-a64.o
@@ -38,12 +38,3 @@ obj-$(CONFIG_SUN8I_R40_CCU)  += ccu-sun8i-r40.o
 obj-$(CONFIG_SUN9I_A80_CCU)    += ccu-sun9i-a80.o
 obj-$(CONFIG_SUN9I_A80_CCU)    += ccu-sun9i-a80-de.o
 obj-$(CONFIG_SUN9I_A80_CCU)    += ccu-sun9i-a80-usb.o
-
-# The lib-y file goals is supposed to work only in arch/*/lib or lib/. In our
-# case, we want to use that goal, but even though lib.a will be properly
-# generated, it will not be linked in, eventually resulting in a linker error
-# for missing symbols.
-#
-# We can work around that by explicitly adding lib.a to the obj-y goal. This is
-# an undocumented behaviour, but works well for now.
-obj-$(CONFIG_SUNXI_CCU)                += lib.a
index 00caf37e52f9c6cad3913c2c0b18bd32aa20756a..c070cc7992e9c84d45216565e7f399d54f12cfcd 100644 (file)
@@ -49,7 +49,7 @@ obj-$(CONFIG_CLKSRC_SAMSUNG_PWM)      += samsung_pwm_timer.o
 obj-$(CONFIG_FSL_FTM_TIMER)    += fsl_ftm_timer.o
 obj-$(CONFIG_VF_PIT_TIMER)     += vf_pit_timer.o
 obj-$(CONFIG_CLKSRC_QCOM)      += qcom-timer.o
-obj-$(CONFIG_MTK_TIMER)                += mtk_timer.o
+obj-$(CONFIG_MTK_TIMER)                += timer-mediatek.o
 obj-$(CONFIG_CLKSRC_PISTACHIO) += time-pistachio.o
 obj-$(CONFIG_CLKSRC_TI_32K)    += timer-ti-32k.o
 obj-$(CONFIG_CLKSRC_NPS)       += timer-nps.o
index 57cb2f00fc07ce7f5ffb526bd9bb03ed11287626..d8c7f5750cdb025dfd3eae42d691318fc472e29b 100644 (file)
@@ -735,7 +735,7 @@ static void __arch_timer_setup(unsigned type,
                clk->features |= CLOCK_EVT_FEAT_DYNIRQ;
                clk->name = "arch_mem_timer";
                clk->rating = 400;
-               clk->cpumask = cpu_all_mask;
+               clk->cpumask = cpu_possible_mask;
                if (arch_timer_mem_use_virtual) {
                        clk->set_state_shutdown = arch_timer_shutdown_virt_mem;
                        clk->set_state_oneshot_stopped = arch_timer_shutdown_virt_mem;
diff --git a/drivers/clocksource/mtk_timer.c b/drivers/clocksource/mtk_timer.c
deleted file mode 100644 (file)
index f9b724f..0000000
+++ /dev/null
@@ -1,268 +0,0 @@
-/*
- * Mediatek SoCs General-Purpose Timer handling.
- *
- * Copyright (C) 2014 Matthias Brugger
- *
- * Matthias Brugger <matthias.bgg@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#define pr_fmt(fmt)    KBUILD_MODNAME ": " fmt
-
-#include <linux/clk.h>
-#include <linux/clockchips.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/irqreturn.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_irq.h>
-#include <linux/sched_clock.h>
-#include <linux/slab.h>
-
-#define GPT_IRQ_EN_REG         0x00
-#define GPT_IRQ_ENABLE(val)    BIT((val) - 1)
-#define GPT_IRQ_ACK_REG                0x08
-#define GPT_IRQ_ACK(val)       BIT((val) - 1)
-
-#define TIMER_CTRL_REG(val)    (0x10 * (val))
-#define TIMER_CTRL_OP(val)     (((val) & 0x3) << 4)
-#define TIMER_CTRL_OP_ONESHOT  (0)
-#define TIMER_CTRL_OP_REPEAT   (1)
-#define TIMER_CTRL_OP_FREERUN  (3)
-#define TIMER_CTRL_CLEAR       (2)
-#define TIMER_CTRL_ENABLE      (1)
-#define TIMER_CTRL_DISABLE     (0)
-
-#define TIMER_CLK_REG(val)     (0x04 + (0x10 * (val)))
-#define TIMER_CLK_SRC(val)     (((val) & 0x1) << 4)
-#define TIMER_CLK_SRC_SYS13M   (0)
-#define TIMER_CLK_SRC_RTC32K   (1)
-#define TIMER_CLK_DIV1         (0x0)
-#define TIMER_CLK_DIV2         (0x1)
-
-#define TIMER_CNT_REG(val)     (0x08 + (0x10 * (val)))
-#define TIMER_CMP_REG(val)     (0x0C + (0x10 * (val)))
-
-#define GPT_CLK_EVT    1
-#define GPT_CLK_SRC    2
-
-struct mtk_clock_event_device {
-       void __iomem *gpt_base;
-       u32 ticks_per_jiffy;
-       struct clock_event_device dev;
-};
-
-static void __iomem *gpt_sched_reg __read_mostly;
-
-static u64 notrace mtk_read_sched_clock(void)
-{
-       return readl_relaxed(gpt_sched_reg);
-}
-
-static inline struct mtk_clock_event_device *to_mtk_clk(
-                               struct clock_event_device *c)
-{
-       return container_of(c, struct mtk_clock_event_device, dev);
-}
-
-static void mtk_clkevt_time_stop(struct mtk_clock_event_device *evt, u8 timer)
-{
-       u32 val;
-
-       val = readl(evt->gpt_base + TIMER_CTRL_REG(timer));
-       writel(val & ~TIMER_CTRL_ENABLE, evt->gpt_base +
-                       TIMER_CTRL_REG(timer));
-}
-
-static void mtk_clkevt_time_setup(struct mtk_clock_event_device *evt,
-                               unsigned long delay, u8 timer)
-{
-       writel(delay, evt->gpt_base + TIMER_CMP_REG(timer));
-}
-
-static void mtk_clkevt_time_start(struct mtk_clock_event_device *evt,
-               bool periodic, u8 timer)
-{
-       u32 val;
-
-       /* Acknowledge interrupt */
-       writel(GPT_IRQ_ACK(timer), evt->gpt_base + GPT_IRQ_ACK_REG);
-
-       val = readl(evt->gpt_base + TIMER_CTRL_REG(timer));
-
-       /* Clear 2 bit timer operation mode field */
-       val &= ~TIMER_CTRL_OP(0x3);
-
-       if (periodic)
-               val |= TIMER_CTRL_OP(TIMER_CTRL_OP_REPEAT);
-       else
-               val |= TIMER_CTRL_OP(TIMER_CTRL_OP_ONESHOT);
-
-       writel(val | TIMER_CTRL_ENABLE | TIMER_CTRL_CLEAR,
-              evt->gpt_base + TIMER_CTRL_REG(timer));
-}
-
-static int mtk_clkevt_shutdown(struct clock_event_device *clk)
-{
-       mtk_clkevt_time_stop(to_mtk_clk(clk), GPT_CLK_EVT);
-       return 0;
-}
-
-static int mtk_clkevt_set_periodic(struct clock_event_device *clk)
-{
-       struct mtk_clock_event_device *evt = to_mtk_clk(clk);
-
-       mtk_clkevt_time_stop(evt, GPT_CLK_EVT);
-       mtk_clkevt_time_setup(evt, evt->ticks_per_jiffy, GPT_CLK_EVT);
-       mtk_clkevt_time_start(evt, true, GPT_CLK_EVT);
-       return 0;
-}
-
-static int mtk_clkevt_next_event(unsigned long event,
-                                  struct clock_event_device *clk)
-{
-       struct mtk_clock_event_device *evt = to_mtk_clk(clk);
-
-       mtk_clkevt_time_stop(evt, GPT_CLK_EVT);
-       mtk_clkevt_time_setup(evt, event, GPT_CLK_EVT);
-       mtk_clkevt_time_start(evt, false, GPT_CLK_EVT);
-
-       return 0;
-}
-
-static irqreturn_t mtk_timer_interrupt(int irq, void *dev_id)
-{
-       struct mtk_clock_event_device *evt = dev_id;
-
-       /* Acknowledge timer0 irq */
-       writel(GPT_IRQ_ACK(GPT_CLK_EVT), evt->gpt_base + GPT_IRQ_ACK_REG);
-       evt->dev.event_handler(&evt->dev);
-
-       return IRQ_HANDLED;
-}
-
-static void
-__init mtk_timer_setup(struct mtk_clock_event_device *evt, u8 timer, u8 option)
-{
-       writel(TIMER_CTRL_CLEAR | TIMER_CTRL_DISABLE,
-               evt->gpt_base + TIMER_CTRL_REG(timer));
-
-       writel(TIMER_CLK_SRC(TIMER_CLK_SRC_SYS13M) | TIMER_CLK_DIV1,
-                       evt->gpt_base + TIMER_CLK_REG(timer));
-
-       writel(0x0, evt->gpt_base + TIMER_CMP_REG(timer));
-
-       writel(TIMER_CTRL_OP(option) | TIMER_CTRL_ENABLE,
-                       evt->gpt_base + TIMER_CTRL_REG(timer));
-}
-
-static void mtk_timer_enable_irq(struct mtk_clock_event_device *evt, u8 timer)
-{
-       u32 val;
-
-       /* Disable all interrupts */
-       writel(0x0, evt->gpt_base + GPT_IRQ_EN_REG);
-
-       /* Acknowledge all spurious pending interrupts */
-       writel(0x3f, evt->gpt_base + GPT_IRQ_ACK_REG);
-
-       val = readl(evt->gpt_base + GPT_IRQ_EN_REG);
-       writel(val | GPT_IRQ_ENABLE(timer),
-                       evt->gpt_base + GPT_IRQ_EN_REG);
-}
-
-static int __init mtk_timer_init(struct device_node *node)
-{
-       struct mtk_clock_event_device *evt;
-       struct resource res;
-       unsigned long rate = 0;
-       struct clk *clk;
-
-       evt = kzalloc(sizeof(*evt), GFP_KERNEL);
-       if (!evt)
-               return -ENOMEM;
-
-       evt->dev.name = "mtk_tick";
-       evt->dev.rating = 300;
-       evt->dev.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
-       evt->dev.set_state_shutdown = mtk_clkevt_shutdown;
-       evt->dev.set_state_periodic = mtk_clkevt_set_periodic;
-       evt->dev.set_state_oneshot = mtk_clkevt_shutdown;
-       evt->dev.tick_resume = mtk_clkevt_shutdown;
-       evt->dev.set_next_event = mtk_clkevt_next_event;
-       evt->dev.cpumask = cpu_possible_mask;
-
-       evt->gpt_base = of_io_request_and_map(node, 0, "mtk-timer");
-       if (IS_ERR(evt->gpt_base)) {
-               pr_err("Can't get resource\n");
-               goto err_kzalloc;
-       }
-
-       evt->dev.irq = irq_of_parse_and_map(node, 0);
-       if (evt->dev.irq <= 0) {
-               pr_err("Can't parse IRQ\n");
-               goto err_mem;
-       }
-
-       clk = of_clk_get(node, 0);
-       if (IS_ERR(clk)) {
-               pr_err("Can't get timer clock\n");
-               goto err_irq;
-       }
-
-       if (clk_prepare_enable(clk)) {
-               pr_err("Can't prepare clock\n");
-               goto err_clk_put;
-       }
-       rate = clk_get_rate(clk);
-
-       if (request_irq(evt->dev.irq, mtk_timer_interrupt,
-                       IRQF_TIMER | IRQF_IRQPOLL, "mtk_timer", evt)) {
-               pr_err("failed to setup irq %d\n", evt->dev.irq);
-               goto err_clk_disable;
-       }
-
-       evt->ticks_per_jiffy = DIV_ROUND_UP(rate, HZ);
-
-       /* Configure clock source */
-       mtk_timer_setup(evt, GPT_CLK_SRC, TIMER_CTRL_OP_FREERUN);
-       clocksource_mmio_init(evt->gpt_base + TIMER_CNT_REG(GPT_CLK_SRC),
-                       node->name, rate, 300, 32, clocksource_mmio_readl_up);
-       gpt_sched_reg = evt->gpt_base + TIMER_CNT_REG(GPT_CLK_SRC);
-       sched_clock_register(mtk_read_sched_clock, 32, rate);
-
-       /* Configure clock event */
-       mtk_timer_setup(evt, GPT_CLK_EVT, TIMER_CTRL_OP_REPEAT);
-       clockevents_config_and_register(&evt->dev, rate, 0x3,
-                                       0xffffffff);
-
-       mtk_timer_enable_irq(evt, GPT_CLK_EVT);
-
-       return 0;
-
-err_clk_disable:
-       clk_disable_unprepare(clk);
-err_clk_put:
-       clk_put(clk);
-err_irq:
-       irq_dispose_mapping(evt->dev.irq);
-err_mem:
-       iounmap(evt->gpt_base);
-       of_address_to_resource(node, 0, &res);
-       release_mem_region(res.start, resource_size(&res));
-err_kzalloc:
-       kfree(evt);
-
-       return -EINVAL;
-}
-TIMER_OF_DECLARE(mtk_mt6577, "mediatek,mt6577-timer", mtk_timer_init);
index c337a8100a7b991988fa63cd31f0de96194803a2..aa624885e0e239df05aa08be95577eb064c392dd 100644 (file)
@@ -230,7 +230,7 @@ static int __init tegra20_init_timer(struct device_node *np)
                return ret;
        }
 
-       tegra_clockevent.cpumask = cpu_all_mask;
+       tegra_clockevent.cpumask = cpu_possible_mask;
        tegra_clockevent.irq = tegra_timer_irq.irq;
        clockevents_config_and_register(&tegra_clockevent, 1000000,
                                        0x1, 0x1fffffff);
@@ -259,6 +259,6 @@ static int __init tegra20_init_rtc(struct device_node *np)
        else
                clk_prepare_enable(clk);
 
-       return register_persistent_clock(NULL, tegra_read_persistent_clock64);
+       return register_persistent_clock(tegra_read_persistent_clock64);
 }
 TIMER_OF_DECLARE(tegra20_rtc, "nvidia,tegra20-rtc", tegra20_init_rtc);
index 5e23d7b4a72200f7117283cce4cb47f003022e27..b4bd2f5b801d07f9209c27c5947e0cdd9ed1bd9e 100644 (file)
@@ -185,7 +185,7 @@ static struct timer_of to = {
                .set_state_oneshot = atcpit100_clkevt_set_oneshot,
                .tick_resume = atcpit100_clkevt_shutdown,
                .set_next_event = atcpit100_clkevt_next_event,
-               .cpumask = cpu_all_mask,
+               .cpumask = cpu_possible_mask,
        },
 
        .of_irq = {
index 0eee03250cfc87ef69b4e7744e7274b477071fcb..f5b2eda30bf336f79fd0b9553b7b6f1ef44f8dd4 100644 (file)
@@ -211,7 +211,7 @@ static int __init keystone_timer_init(struct device_node *np)
        event_dev->set_state_shutdown = keystone_shutdown;
        event_dev->set_state_periodic = keystone_set_periodic;
        event_dev->set_state_oneshot = keystone_shutdown;
-       event_dev->cpumask = cpu_all_mask;
+       event_dev->cpumask = cpu_possible_mask;
        event_dev->owner = THIS_MODULE;
        event_dev->name = TIMER_NAME;
        event_dev->irq = irq;
diff --git a/drivers/clocksource/timer-mediatek.c b/drivers/clocksource/timer-mediatek.c
new file mode 100644 (file)
index 0000000..eb10321
--- /dev/null
@@ -0,0 +1,328 @@
+/*
+ * Mediatek SoCs General-Purpose Timer handling.
+ *
+ * Copyright (C) 2014 Matthias Brugger
+ *
+ * Matthias Brugger <matthias.bgg@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt)    KBUILD_MODNAME ": " fmt
+
+#include <linux/clockchips.h>
+#include <linux/clocksource.h>
+#include <linux/interrupt.h>
+#include <linux/irqreturn.h>
+#include <linux/sched_clock.h>
+#include <linux/slab.h>
+#include "timer-of.h"
+
+#define TIMER_CLK_EVT           (1)
+#define TIMER_CLK_SRC           (2)
+
+#define TIMER_SYNC_TICKS        (3)
+
+/* gpt */
+#define GPT_IRQ_EN_REG          0x00
+#define GPT_IRQ_ENABLE(val)     BIT((val) - 1)
+#define GPT_IRQ_ACK_REG                0x08
+#define GPT_IRQ_ACK(val)        BIT((val) - 1)
+
+#define GPT_CTRL_REG(val)       (0x10 * (val))
+#define GPT_CTRL_OP(val)        (((val) & 0x3) << 4)
+#define GPT_CTRL_OP_ONESHOT     (0)
+#define GPT_CTRL_OP_REPEAT      (1)
+#define GPT_CTRL_OP_FREERUN     (3)
+#define GPT_CTRL_CLEAR          (2)
+#define GPT_CTRL_ENABLE         (1)
+#define GPT_CTRL_DISABLE        (0)
+
+#define GPT_CLK_REG(val)        (0x04 + (0x10 * (val)))
+#define GPT_CLK_SRC(val)        (((val) & 0x1) << 4)
+#define GPT_CLK_SRC_SYS13M      (0)
+#define GPT_CLK_SRC_RTC32K      (1)
+#define GPT_CLK_DIV1            (0x0)
+#define GPT_CLK_DIV2            (0x1)
+
+#define GPT_CNT_REG(val)        (0x08 + (0x10 * (val)))
+#define GPT_CMP_REG(val)        (0x0C + (0x10 * (val)))
+
+/* system timer */
+#define SYST_BASE               (0x40)
+
+#define SYST_CON                (SYST_BASE + 0x0)
+#define SYST_VAL                (SYST_BASE + 0x4)
+
+#define SYST_CON_REG(to)        (timer_of_base(to) + SYST_CON)
+#define SYST_VAL_REG(to)        (timer_of_base(to) + SYST_VAL)
+
+/*
+ * SYST_CON_EN: Clock enable. Shall be set to
+ *   - Start timer countdown.
+ *   - Allow timeout ticks being updated.
+ *   - Allow changing interrupt functions.
+ *
+ * SYST_CON_IRQ_EN: Set to allow interrupt.
+ *
+ * SYST_CON_IRQ_CLR: Set to clear interrupt.
+ */
+#define SYST_CON_EN              BIT(0)
+#define SYST_CON_IRQ_EN          BIT(1)
+#define SYST_CON_IRQ_CLR         BIT(4)
+
+static void __iomem *gpt_sched_reg __read_mostly;
+
+static void mtk_syst_ack_irq(struct timer_of *to)
+{
+       /* Clear and disable interrupt */
+       writel(SYST_CON_IRQ_CLR | SYST_CON_EN, SYST_CON_REG(to));
+}
+
+static irqreturn_t mtk_syst_handler(int irq, void *dev_id)
+{
+       struct clock_event_device *clkevt = dev_id;
+       struct timer_of *to = to_timer_of(clkevt);
+
+       mtk_syst_ack_irq(to);
+       clkevt->event_handler(clkevt);
+
+       return IRQ_HANDLED;
+}
+
+static int mtk_syst_clkevt_next_event(unsigned long ticks,
+                                     struct clock_event_device *clkevt)
+{
+       struct timer_of *to = to_timer_of(clkevt);
+
+       /* Enable clock to allow timeout tick update later */
+       writel(SYST_CON_EN, SYST_CON_REG(to));
+
+       /*
+        * Write new timeout ticks. Timer shall start countdown
+        * after timeout ticks are updated.
+        */
+       writel(ticks, SYST_VAL_REG(to));
+
+       /* Enable interrupt */
+       writel(SYST_CON_EN | SYST_CON_IRQ_EN, SYST_CON_REG(to));
+
+       return 0;
+}
+
+static int mtk_syst_clkevt_shutdown(struct clock_event_device *clkevt)
+{
+       /* Disable timer */
+       writel(0, SYST_CON_REG(to_timer_of(clkevt)));
+
+       return 0;
+}
+
+static int mtk_syst_clkevt_resume(struct clock_event_device *clkevt)
+{
+       return mtk_syst_clkevt_shutdown(clkevt);
+}
+
+static int mtk_syst_clkevt_oneshot(struct clock_event_device *clkevt)
+{
+       return 0;
+}
+
+static u64 notrace mtk_gpt_read_sched_clock(void)
+{
+       return readl_relaxed(gpt_sched_reg);
+}
+
+static void mtk_gpt_clkevt_time_stop(struct timer_of *to, u8 timer)
+{
+       u32 val;
+
+       val = readl(timer_of_base(to) + GPT_CTRL_REG(timer));
+       writel(val & ~GPT_CTRL_ENABLE, timer_of_base(to) +
+              GPT_CTRL_REG(timer));
+}
+
+static void mtk_gpt_clkevt_time_setup(struct timer_of *to,
+                                     unsigned long delay, u8 timer)
+{
+       writel(delay, timer_of_base(to) + GPT_CMP_REG(timer));
+}
+
+static void mtk_gpt_clkevt_time_start(struct timer_of *to,
+                                     bool periodic, u8 timer)
+{
+       u32 val;
+
+       /* Acknowledge interrupt */
+       writel(GPT_IRQ_ACK(timer), timer_of_base(to) + GPT_IRQ_ACK_REG);
+
+       val = readl(timer_of_base(to) + GPT_CTRL_REG(timer));
+
+       /* Clear 2 bit timer operation mode field */
+       val &= ~GPT_CTRL_OP(0x3);
+
+       if (periodic)
+               val |= GPT_CTRL_OP(GPT_CTRL_OP_REPEAT);
+       else
+               val |= GPT_CTRL_OP(GPT_CTRL_OP_ONESHOT);
+
+       writel(val | GPT_CTRL_ENABLE | GPT_CTRL_CLEAR,
+              timer_of_base(to) + GPT_CTRL_REG(timer));
+}
+
+static int mtk_gpt_clkevt_shutdown(struct clock_event_device *clk)
+{
+       mtk_gpt_clkevt_time_stop(to_timer_of(clk), TIMER_CLK_EVT);
+
+       return 0;
+}
+
+static int mtk_gpt_clkevt_set_periodic(struct clock_event_device *clk)
+{
+       struct timer_of *to = to_timer_of(clk);
+
+       mtk_gpt_clkevt_time_stop(to, TIMER_CLK_EVT);
+       mtk_gpt_clkevt_time_setup(to, to->of_clk.period, TIMER_CLK_EVT);
+       mtk_gpt_clkevt_time_start(to, true, TIMER_CLK_EVT);
+
+       return 0;
+}
+
+static int mtk_gpt_clkevt_next_event(unsigned long event,
+                                    struct clock_event_device *clk)
+{
+       struct timer_of *to = to_timer_of(clk);
+
+       mtk_gpt_clkevt_time_stop(to, TIMER_CLK_EVT);
+       mtk_gpt_clkevt_time_setup(to, event, TIMER_CLK_EVT);
+       mtk_gpt_clkevt_time_start(to, false, TIMER_CLK_EVT);
+
+       return 0;
+}
+
+static irqreturn_t mtk_gpt_interrupt(int irq, void *dev_id)
+{
+       struct clock_event_device *clkevt = (struct clock_event_device *)dev_id;
+       struct timer_of *to = to_timer_of(clkevt);
+
+       /* Acknowledge timer0 irq */
+       writel(GPT_IRQ_ACK(TIMER_CLK_EVT), timer_of_base(to) + GPT_IRQ_ACK_REG);
+       clkevt->event_handler(clkevt);
+
+       return IRQ_HANDLED;
+}
+
+static void
+__init mtk_gpt_setup(struct timer_of *to, u8 timer, u8 option)
+{
+       writel(GPT_CTRL_CLEAR | GPT_CTRL_DISABLE,
+              timer_of_base(to) + GPT_CTRL_REG(timer));
+
+       writel(GPT_CLK_SRC(GPT_CLK_SRC_SYS13M) | GPT_CLK_DIV1,
+              timer_of_base(to) + GPT_CLK_REG(timer));
+
+       writel(0x0, timer_of_base(to) + GPT_CMP_REG(timer));
+
+       writel(GPT_CTRL_OP(option) | GPT_CTRL_ENABLE,
+              timer_of_base(to) + GPT_CTRL_REG(timer));
+}
+
+static void mtk_gpt_enable_irq(struct timer_of *to, u8 timer)
+{
+       u32 val;
+
+       /* Disable all interrupts */
+       writel(0x0, timer_of_base(to) + GPT_IRQ_EN_REG);
+
+       /* Acknowledge all spurious pending interrupts */
+       writel(0x3f, timer_of_base(to) + GPT_IRQ_ACK_REG);
+
+       val = readl(timer_of_base(to) + GPT_IRQ_EN_REG);
+       writel(val | GPT_IRQ_ENABLE(timer),
+              timer_of_base(to) + GPT_IRQ_EN_REG);
+}
+
+static struct timer_of to = {
+       .flags = TIMER_OF_IRQ | TIMER_OF_BASE | TIMER_OF_CLOCK,
+
+       .clkevt = {
+               .name = "mtk-clkevt",
+               .rating = 300,
+               .cpumask = cpu_possible_mask,
+       },
+
+       .of_irq = {
+               .flags = IRQF_TIMER | IRQF_IRQPOLL,
+       },
+};
+
+static int __init mtk_syst_init(struct device_node *node)
+{
+       int ret;
+
+       to.clkevt.features = CLOCK_EVT_FEAT_DYNIRQ | CLOCK_EVT_FEAT_ONESHOT;
+       to.clkevt.set_state_shutdown = mtk_syst_clkevt_shutdown;
+       to.clkevt.set_state_oneshot = mtk_syst_clkevt_oneshot;
+       to.clkevt.tick_resume = mtk_syst_clkevt_resume;
+       to.clkevt.set_next_event = mtk_syst_clkevt_next_event;
+       to.of_irq.handler = mtk_syst_handler;
+
+       ret = timer_of_init(node, &to);
+       if (ret)
+               goto err;
+
+       clockevents_config_and_register(&to.clkevt, timer_of_rate(&to),
+                                       TIMER_SYNC_TICKS, 0xffffffff);
+
+       return 0;
+err:
+       timer_of_cleanup(&to);
+       return ret;
+}
+
+static int __init mtk_gpt_init(struct device_node *node)
+{
+       int ret;
+
+       to.clkevt.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
+       to.clkevt.set_state_shutdown = mtk_gpt_clkevt_shutdown;
+       to.clkevt.set_state_periodic = mtk_gpt_clkevt_set_periodic;
+       to.clkevt.set_state_oneshot = mtk_gpt_clkevt_shutdown;
+       to.clkevt.tick_resume = mtk_gpt_clkevt_shutdown;
+       to.clkevt.set_next_event = mtk_gpt_clkevt_next_event;
+       to.of_irq.handler = mtk_gpt_interrupt;
+
+       ret = timer_of_init(node, &to);
+       if (ret)
+               goto err;
+
+       /* Configure clock source */
+       mtk_gpt_setup(&to, TIMER_CLK_SRC, GPT_CTRL_OP_FREERUN);
+       clocksource_mmio_init(timer_of_base(&to) + GPT_CNT_REG(TIMER_CLK_SRC),
+                             node->name, timer_of_rate(&to), 300, 32,
+                             clocksource_mmio_readl_up);
+       gpt_sched_reg = timer_of_base(&to) + GPT_CNT_REG(TIMER_CLK_SRC);
+       sched_clock_register(mtk_gpt_read_sched_clock, 32, timer_of_rate(&to));
+
+       /* Configure clock event */
+       mtk_gpt_setup(&to, TIMER_CLK_EVT, GPT_CTRL_OP_REPEAT);
+       clockevents_config_and_register(&to.clkevt, timer_of_rate(&to),
+                                       TIMER_SYNC_TICKS, 0xffffffff);
+
+       mtk_gpt_enable_irq(&to, TIMER_CLK_EVT);
+
+       return 0;
+err:
+       timer_of_cleanup(&to);
+       return ret;
+}
+TIMER_OF_DECLARE(mtk_mt6577, "mediatek,mt6577-timer", mtk_gpt_init);
+TIMER_OF_DECLARE(mtk_mt6765, "mediatek,mt6765-timer", mtk_syst_init);
index ef9ebeafb3edf0d8b41d78413cbfbc0db3b6a322..430cb99d8d79b4894c1c219821d6fbdb37bf6e24 100644 (file)
@@ -156,4 +156,54 @@ static int __init sprd_timer_init(struct device_node *np)
        return 0;
 }
 
+static struct timer_of suspend_to = {
+       .flags = TIMER_OF_BASE | TIMER_OF_CLOCK,
+};
+
+static u64 sprd_suspend_timer_read(struct clocksource *cs)
+{
+       return ~(u64)readl_relaxed(timer_of_base(&suspend_to) +
+                                  TIMER_VALUE_SHDW_LO) & cs->mask;
+}
+
+static int sprd_suspend_timer_enable(struct clocksource *cs)
+{
+       sprd_timer_update_counter(timer_of_base(&suspend_to),
+                                 TIMER_VALUE_LO_MASK);
+       sprd_timer_enable(timer_of_base(&suspend_to), TIMER_CTL_PERIOD_MODE);
+
+       return 0;
+}
+
+static void sprd_suspend_timer_disable(struct clocksource *cs)
+{
+       sprd_timer_disable(timer_of_base(&suspend_to));
+}
+
+static struct clocksource suspend_clocksource = {
+       .name   = "sprd_suspend_timer",
+       .rating = 200,
+       .read   = sprd_suspend_timer_read,
+       .enable = sprd_suspend_timer_enable,
+       .disable = sprd_suspend_timer_disable,
+       .mask   = CLOCKSOURCE_MASK(32),
+       .flags  = CLOCK_SOURCE_IS_CONTINUOUS | CLOCK_SOURCE_SUSPEND_NONSTOP,
+};
+
+static int __init sprd_suspend_timer_init(struct device_node *np)
+{
+       int ret;
+
+       ret = timer_of_init(np, &suspend_to);
+       if (ret)
+               return ret;
+
+       clocksource_register_hz(&suspend_clocksource,
+                               timer_of_rate(&suspend_to));
+
+       return 0;
+}
+
 TIMER_OF_DECLARE(sc9860_timer, "sprd,sc9860-timer", sprd_timer_init);
+TIMER_OF_DECLARE(sc9860_persistent_timer, "sprd,sc9860-suspend-timer",
+                sprd_suspend_timer_init);
index e5cdc3af684cbbe2370406c79af63df143776cd3..2717f88c79040a1ec26e2dd0d01f5d2dd25d9734 100644 (file)
@@ -304,8 +304,10 @@ static int __init stm32_timer_init(struct device_node *node)
 
        to->private_data = kzalloc(sizeof(struct stm32_timer_private),
                                   GFP_KERNEL);
-       if (!to->private_data)
+       if (!to->private_data) {
+               ret = -ENOMEM;
                goto deinit;
+       }
 
        rstc = of_reset_control_get(node, NULL);
        if (!IS_ERR(rstc)) {
index 880a861ab3c82dd1709b4accc9d9200593ea9ffa..29e2e1a78a43372ee96e64bb9b93d6b21b5288f7 100644 (file)
@@ -78,8 +78,7 @@ static struct ti_32k ti_32k_timer = {
                .rating         = 250,
                .read           = ti_32k_read_cycles,
                .mask           = CLOCKSOURCE_MASK(32),
-               .flags          = CLOCK_SOURCE_IS_CONTINUOUS |
-                               CLOCK_SOURCE_SUSPEND_NONSTOP,
+               .flags          = CLOCK_SOURCE_IS_CONTINUOUS,
        },
 };
 
index a6a0338eea77f73fbae0b549e3ec77265c00b2f4..f74689334f7cb729f4f9f1840401f97fabda5819 100644 (file)
@@ -162,7 +162,7 @@ static int __init zevio_timer_add(struct device_node *node)
                timer->clkevt.set_state_oneshot = zevio_timer_set_oneshot;
                timer->clkevt.tick_resume       = zevio_timer_set_oneshot;
                timer->clkevt.rating            = 200;
-               timer->clkevt.cpumask           = cpu_all_mask;
+               timer->clkevt.cpumask           = cpu_possible_mask;
                timer->clkevt.features          = CLOCK_EVT_FEAT_ONESHOT;
                timer->clkevt.irq               = irqnr;
 
index 1de5ec8d5ea3e9995e3ffd413f728df078c25f8f..d4ed0022b0dd2e6250748f332f5ad6f42417f864 100644 (file)
@@ -294,6 +294,7 @@ struct pstate_funcs {
 static struct pstate_funcs pstate_funcs __read_mostly;
 
 static int hwp_active __read_mostly;
+static int hwp_mode_bdw __read_mostly;
 static bool per_cpu_limits __read_mostly;
 static bool hwp_boost __read_mostly;
 
@@ -310,12 +311,20 @@ static DEFINE_MUTEX(intel_pstate_limits_lock);
 
 #ifdef CONFIG_ACPI
 
-static bool intel_pstate_get_ppc_enable_status(void)
+static bool intel_pstate_acpi_pm_profile_server(void)
 {
        if (acpi_gbl_FADT.preferred_profile == PM_ENTERPRISE_SERVER ||
            acpi_gbl_FADT.preferred_profile == PM_PERFORMANCE_SERVER)
                return true;
 
+       return false;
+}
+
+static bool intel_pstate_get_ppc_enable_status(void)
+{
+       if (intel_pstate_acpi_pm_profile_server())
+               return true;
+
        return acpi_ppc;
 }
 
@@ -458,6 +467,11 @@ static inline void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *pol
 static inline void intel_pstate_exit_perf_limits(struct cpufreq_policy *policy)
 {
 }
+
+static inline bool intel_pstate_acpi_pm_profile_server(void)
+{
+       return false;
+}
 #endif
 
 static inline void update_turbo_state(void)
@@ -1413,7 +1427,15 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
        cpu->pstate.turbo_pstate = pstate_funcs.get_turbo();
        cpu->pstate.scaling = pstate_funcs.get_scaling();
        cpu->pstate.max_freq = cpu->pstate.max_pstate * cpu->pstate.scaling;
-       cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * cpu->pstate.scaling;
+
+       if (hwp_active && !hwp_mode_bdw) {
+               unsigned int phy_max, current_max;
+
+               intel_pstate_get_hwp_max(cpu->cpu, &phy_max, &current_max);
+               cpu->pstate.turbo_freq = phy_max * cpu->pstate.scaling;
+       } else {
+               cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * cpu->pstate.scaling;
+       }
 
        if (pstate_funcs.get_aperf_mperf_shift)
                cpu->aperf_mperf_shift = pstate_funcs.get_aperf_mperf_shift();
@@ -1832,7 +1854,7 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
                intel_pstate_hwp_enable(cpu);
 
                id = x86_match_cpu(intel_pstate_hwp_boost_ids);
-               if (id)
+               if (id && intel_pstate_acpi_pm_profile_server())
                        hwp_boost = true;
        }
 
@@ -2385,6 +2407,18 @@ static bool __init intel_pstate_no_acpi_pss(void)
        return true;
 }
 
+static bool __init intel_pstate_no_acpi_pcch(void)
+{
+       acpi_status status;
+       acpi_handle handle;
+
+       status = acpi_get_handle(NULL, "\\_SB", &handle);
+       if (ACPI_FAILURE(status))
+               return true;
+
+       return !acpi_has_method(handle, "PCCH");
+}
+
 static bool __init intel_pstate_has_acpi_ppc(void)
 {
        int i;
@@ -2444,7 +2478,10 @@ static bool __init intel_pstate_platform_pwr_mgmt_exists(void)
 
        switch (plat_info[idx].data) {
        case PSS:
-               return intel_pstate_no_acpi_pss();
+               if (!intel_pstate_no_acpi_pss())
+                       return false;
+
+               return intel_pstate_no_acpi_pcch();
        case PPC:
                return intel_pstate_has_acpi_ppc() && !force_load;
        }
@@ -2467,28 +2504,36 @@ static inline bool intel_pstate_has_acpi_ppc(void) { return false; }
 static inline void intel_pstate_request_control_from_smm(void) {}
 #endif /* CONFIG_ACPI */
 
+#define INTEL_PSTATE_HWP_BROADWELL     0x01
+
+#define ICPU_HWP(model, hwp_mode) \
+       { X86_VENDOR_INTEL, 6, model, X86_FEATURE_HWP, hwp_mode }
+
 static const struct x86_cpu_id hwp_support_ids[] __initconst = {
-       { X86_VENDOR_INTEL, 6, X86_MODEL_ANY, X86_FEATURE_HWP },
+       ICPU_HWP(INTEL_FAM6_BROADWELL_X, INTEL_PSTATE_HWP_BROADWELL),
+       ICPU_HWP(INTEL_FAM6_BROADWELL_XEON_D, INTEL_PSTATE_HWP_BROADWELL),
+       ICPU_HWP(X86_MODEL_ANY, 0),
        {}
 };
 
 static int __init intel_pstate_init(void)
 {
+       const struct x86_cpu_id *id;
        int rc;
 
        if (no_load)
                return -ENODEV;
 
-       if (x86_match_cpu(hwp_support_ids)) {
+       id = x86_match_cpu(hwp_support_ids);
+       if (id) {
                copy_cpu_funcs(&core_funcs);
                if (!no_hwp) {
                        hwp_active++;
+                       hwp_mode_bdw = id->driver_data;
                        intel_pstate.attr = hwp_cpufreq_attrs;
                        goto hwp_cpu_matched;
                }
        } else {
-               const struct x86_cpu_id *id;
-
                id = x86_match_cpu(intel_pstate_cpu_ids);
                if (!id)
                        return -ENODEV;
index 3f0ce2ae35ee432637c28e7dddd2851a20e29f16..0c56c97596725edf323a2abf471bbaf69582b903 100644 (file)
@@ -580,6 +580,10 @@ static int __init pcc_cpufreq_init(void)
 {
        int ret;
 
+       /* Skip initialization if another cpufreq driver is there. */
+       if (cpufreq_get_current_driver())
+               return 0;
+
        if (acpi_disabled)
                return 0;
 
index d049fe4b80c48e00d169f3835bb7b70b8022a879..efc9a7ae485707e32c2dade8c64c58fb798a8c8d 100644 (file)
@@ -42,6 +42,8 @@ enum _msm8996_version {
        NUM_OF_MSM8996_VERSIONS,
 };
 
+struct platform_device *cpufreq_dt_pdev, *kryo_cpufreq_pdev;
+
 static enum _msm8996_version __init qcom_cpufreq_kryo_get_msm_id(void)
 {
        size_t len;
@@ -74,7 +76,6 @@ static enum _msm8996_version __init qcom_cpufreq_kryo_get_msm_id(void)
 static int qcom_cpufreq_kryo_probe(struct platform_device *pdev)
 {
        struct opp_table *opp_tables[NR_CPUS] = {0};
-       struct platform_device *cpufreq_dt_pdev;
        enum _msm8996_version msm8996_version;
        struct nvmem_cell *speedbin_nvmem;
        struct device_node *np;
@@ -86,8 +87,8 @@ static int qcom_cpufreq_kryo_probe(struct platform_device *pdev)
        int ret;
 
        cpu_dev = get_cpu_device(0);
-       if (NULL == cpu_dev)
-               ret = -ENODEV;
+       if (!cpu_dev)
+               return -ENODEV;
 
        msm8996_version = qcom_cpufreq_kryo_get_msm_id();
        if (NUM_OF_MSM8996_VERSIONS == msm8996_version) {
@@ -96,8 +97,8 @@ static int qcom_cpufreq_kryo_probe(struct platform_device *pdev)
        }
 
        np = dev_pm_opp_of_get_opp_desc_node(cpu_dev);
-       if (IS_ERR(np))
-               return PTR_ERR(np);
+       if (!np)
+               return -ENOENT;
 
        ret = of_device_is_compatible(np, "operating-points-v2-kryo-cpu");
        if (!ret) {
@@ -115,6 +116,8 @@ static int qcom_cpufreq_kryo_probe(struct platform_device *pdev)
 
        speedbin = nvmem_cell_read(speedbin_nvmem, &len);
        nvmem_cell_put(speedbin_nvmem);
+       if (IS_ERR(speedbin))
+               return PTR_ERR(speedbin);
 
        switch (msm8996_version) {
        case MSM8996_V3:
@@ -127,6 +130,7 @@ static int qcom_cpufreq_kryo_probe(struct platform_device *pdev)
                BUG();
                break;
        }
+       kfree(speedbin);
 
        for_each_possible_cpu(cpu) {
                cpu_dev = get_cpu_device(cpu);
@@ -162,8 +166,15 @@ free_opp:
        return ret;
 }
 
+static int qcom_cpufreq_kryo_remove(struct platform_device *pdev)
+{
+       platform_device_unregister(cpufreq_dt_pdev);
+       return 0;
+}
+
 static struct platform_driver qcom_cpufreq_kryo_driver = {
        .probe = qcom_cpufreq_kryo_probe,
+       .remove = qcom_cpufreq_kryo_remove,
        .driver = {
                .name = "qcom-cpufreq-kryo",
        },
@@ -172,6 +183,7 @@ static struct platform_driver qcom_cpufreq_kryo_driver = {
 static const struct of_device_id qcom_cpufreq_kryo_match_list[] __initconst = {
        { .compatible = "qcom,apq8096", },
        { .compatible = "qcom,msm8996", },
+       {}
 };
 
 /*
@@ -198,8 +210,9 @@ static int __init qcom_cpufreq_kryo_init(void)
        if (unlikely(ret < 0))
                return ret;
 
-       ret = PTR_ERR_OR_ZERO(platform_device_register_simple(
-               "qcom-cpufreq-kryo", -1, NULL, 0));
+       kryo_cpufreq_pdev = platform_device_register_simple(
+               "qcom-cpufreq-kryo", -1, NULL, 0);
+       ret = PTR_ERR_OR_ZERO(kryo_cpufreq_pdev);
        if (0 == ret)
                return 0;
 
@@ -208,5 +221,12 @@ static int __init qcom_cpufreq_kryo_init(void)
 }
 module_init(qcom_cpufreq_kryo_init);
 
+static void __init qcom_cpufreq_kryo_exit(void)
+{
+       platform_device_unregister(kryo_cpufreq_pdev);
+       platform_driver_unregister(&qcom_cpufreq_kryo_driver);
+}
+module_exit(qcom_cpufreq_kryo_exit);
+
 MODULE_DESCRIPTION("Qualcomm Technologies, Inc. Kryo CPUfreq driver");
 MODULE_LICENSE("GPL v2");
index 00c7aab8e7d0f5861e778dc4d26affe5c1234603..afebbd87c4aa1d22ca179f558552cb2f410fcc0a 100644 (file)
@@ -1548,15 +1548,14 @@ skip_copy:
                        tp->urg_data = 0;
 
                if ((avail + offset) >= skb->len) {
-                       if (likely(skb))
-                               chtls_free_skb(sk, skb);
-                       buffers_freed++;
                        if (ULP_SKB_CB(skb)->flags & ULPCB_FLAG_TLS_HDR) {
                                tp->copied_seq += skb->len;
                                hws->rcvpld = skb->hdr_len;
                        } else {
                                tp->copied_seq += hws->rcvpld;
                        }
+                       chtls_free_skb(sk, skb);
+                       buffers_freed++;
                        hws->copied_seq = 0;
                        if (copied >= target &&
                            !skb_peek(&sk->sk_receive_queue))
index 1c6cbda56afe9964c65f58e0cf43024b26b08ba3..09d823d36d3a4ec71913e235a82e4fd101132728 100644 (file)
@@ -266,6 +266,8 @@ static inline void padlock_xcrypt_ecb(const u8 *input, u8 *output, void *key,
                return;
        }
 
+       count -= initial;
+
        if (initial)
                asm volatile (".byte 0xf3,0x0f,0xa7,0xc8"       /* rep xcryptecb */
                              : "+S"(input), "+D"(output)
@@ -273,7 +275,7 @@ static inline void padlock_xcrypt_ecb(const u8 *input, u8 *output, void *key,
 
        asm volatile (".byte 0xf3,0x0f,0xa7,0xc8"       /* rep xcryptecb */
                      : "+S"(input), "+D"(output)
-                     : "d"(control_word), "b"(key), "c"(count - initial));
+                     : "d"(control_word), "b"(key), "c"(count));
 }
 
 static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key,
@@ -284,6 +286,8 @@ static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key,
        if (count < cbc_fetch_blocks)
                return cbc_crypt(input, output, key, iv, control_word, count);
 
+       count -= initial;
+
        if (initial)
                asm volatile (".byte 0xf3,0x0f,0xa7,0xd0"       /* rep xcryptcbc */
                              : "+S" (input), "+D" (output), "+a" (iv)
@@ -291,7 +295,7 @@ static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key,
 
        asm volatile (".byte 0xf3,0x0f,0xa7,0xd0"       /* rep xcryptcbc */
                      : "+S" (input), "+D" (output), "+a" (iv)
-                     : "d" (control_word), "b" (key), "c" (count-initial));
+                     : "d" (control_word), "b" (key), "c" (count));
        return iv;
 }
 
index de2f8297a210bb4ea3998815353e72b7c11598da..108c37fca78279c06e896afcc411220996872760 100644 (file)
@@ -189,14 +189,16 @@ static int check_vma(struct dev_dax *dev_dax, struct vm_area_struct *vma,
 
        /* prevent private mappings from being established */
        if ((vma->vm_flags & VM_MAYSHARE) != VM_MAYSHARE) {
-               dev_info(dev, "%s: %s: fail, attempted private mapping\n",
+               dev_info_ratelimited(dev,
+                               "%s: %s: fail, attempted private mapping\n",
                                current->comm, func);
                return -EINVAL;
        }
 
        mask = dax_region->align - 1;
        if (vma->vm_start & mask || vma->vm_end & mask) {
-               dev_info(dev, "%s: %s: fail, unaligned vma (%#lx - %#lx, %#lx)\n",
+               dev_info_ratelimited(dev,
+                               "%s: %s: fail, unaligned vma (%#lx - %#lx, %#lx)\n",
                                current->comm, func, vma->vm_start, vma->vm_end,
                                mask);
                return -EINVAL;
@@ -204,13 +206,15 @@ static int check_vma(struct dev_dax *dev_dax, struct vm_area_struct *vma,
 
        if ((dax_region->pfn_flags & (PFN_DEV|PFN_MAP)) == PFN_DEV
                        && (vma->vm_flags & VM_DONTCOPY) == 0) {
-               dev_info(dev, "%s: %s: fail, dax range requires MADV_DONTFORK\n",
+               dev_info_ratelimited(dev,
+                               "%s: %s: fail, dax range requires MADV_DONTFORK\n",
                                current->comm, func);
                return -EINVAL;
        }
 
        if (!vma_is_dax(vma)) {
-               dev_info(dev, "%s: %s: fail, vma is not DAX capable\n",
+               dev_info_ratelimited(dev,
+                               "%s: %s: fail, vma is not DAX capable\n",
                                current->comm, func);
                return -EINVAL;
        }
index 903d9c473749c24d636f573aba798a9680df2909..45276abf03aa2bd52aa9af56b8cbd45a4b1e5135 100644 (file)
@@ -86,6 +86,7 @@ bool __bdev_dax_supported(struct block_device *bdev, int blocksize)
 {
        struct dax_device *dax_dev;
        bool dax_enabled = false;
+       struct request_queue *q;
        pgoff_t pgoff;
        int err, id;
        void *kaddr;
@@ -99,6 +100,13 @@ bool __bdev_dax_supported(struct block_device *bdev, int blocksize)
                return false;
        }
 
+       q = bdev_get_queue(bdev);
+       if (!q || !blk_queue_dax(q)) {
+               pr_debug("%s: error: request queue doesn't support dax\n",
+                               bdevname(bdev, buf));
+               return false;
+       }
+
        err = bdev_dax_pgoff(bdev, 0, PAGE_SIZE, &pgoff);
        if (err) {
                pr_debug("%s: error: unaligned partition for dax\n",
index fa31cccbe04faf5fa6a8adb07abf6b48a4a6cdd2..6bfa217ed6d0de81d1ef51b37accdba9ae4cd04b 100644 (file)
@@ -794,7 +794,7 @@ static struct dma_chan *k3_of_dma_simple_xlate(struct of_phandle_args *dma_spec,
        struct k3_dma_dev *d = ofdma->of_dma_data;
        unsigned int request = dma_spec->args[0];
 
-       if (request > d->dma_requests)
+       if (request >= d->dma_requests)
                return NULL;
 
        return dma_get_slave_channel(&(d->chans[request].vc.chan));
index defcdde4d358b19cc5430de95fb5e9f16ec538ca..de0957fe966821beb79ee1b75470a8834509ef44 100644 (file)
@@ -3033,7 +3033,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
        pd->src_addr_widths = PL330_DMA_BUSWIDTHS;
        pd->dst_addr_widths = PL330_DMA_BUSWIDTHS;
        pd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
-       pd->residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
+       pd->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
        pd->max_burst = ((pl330->quirks & PL330_QUIRK_BROKEN_NO_FLUSHP) ?
                         1 : PL330_MAX_BURST);
 
index 9b5ca8691f27dcf6561fbd98051b697ab2da6011..a4a931ddf6f695fa21a25a359a94ee0c57f92beb 100644 (file)
@@ -1485,7 +1485,11 @@ static int omap_dma_probe(struct platform_device *pdev)
        od->ddev.src_addr_widths = OMAP_DMA_BUSWIDTHS;
        od->ddev.dst_addr_widths = OMAP_DMA_BUSWIDTHS;
        od->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
-       od->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
+       if (__dma_omap15xx(od->plat->dma_attr))
+               od->ddev.residue_granularity =
+                               DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
+       else
+               od->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
        od->ddev.max_burst = SZ_16M - 1; /* CCEN: 24bit unsigned */
        od->ddev.dev = &pdev->dev;
        INIT_LIST_HEAD(&od->ddev.channels);
index 951b6c79f166a7d2b4ec14e12096559df9aed684..624a11cb07e23b775d097d930997cc6ebd171b06 100644 (file)
@@ -47,6 +47,7 @@ DEFINE_DMI_ATTR_WITH_SHOW(product_name,               0444, DMI_PRODUCT_NAME);
 DEFINE_DMI_ATTR_WITH_SHOW(product_version,     0444, DMI_PRODUCT_VERSION);
 DEFINE_DMI_ATTR_WITH_SHOW(product_serial,      0400, DMI_PRODUCT_SERIAL);
 DEFINE_DMI_ATTR_WITH_SHOW(product_uuid,                0400, DMI_PRODUCT_UUID);
+DEFINE_DMI_ATTR_WITH_SHOW(product_sku,         0444, DMI_PRODUCT_SKU);
 DEFINE_DMI_ATTR_WITH_SHOW(product_family,      0444, DMI_PRODUCT_FAMILY);
 DEFINE_DMI_ATTR_WITH_SHOW(board_vendor,                0444, DMI_BOARD_VENDOR);
 DEFINE_DMI_ATTR_WITH_SHOW(board_name,          0444, DMI_BOARD_NAME);
@@ -193,6 +194,7 @@ static void __init dmi_id_init_attr_table(void)
        ADD_DMI_ATTR(product_serial,    DMI_PRODUCT_SERIAL);
        ADD_DMI_ATTR(product_uuid,      DMI_PRODUCT_UUID);
        ADD_DMI_ATTR(product_family,    DMI_PRODUCT_FAMILY);
+       ADD_DMI_ATTR(product_sku,       DMI_PRODUCT_SKU);
        ADD_DMI_ATTR(board_vendor,      DMI_BOARD_VENDOR);
        ADD_DMI_ATTR(board_name,        DMI_BOARD_NAME);
        ADD_DMI_ATTR(board_version,     DMI_BOARD_VERSION);
index 54e66adef2525179e49ecfe9fc04e253ecc18e51..f2483548cde92d692f748d6a9c7da0cbf98274a3 100644 (file)
@@ -447,6 +447,7 @@ static void __init dmi_decode(const struct dmi_header *dm, void *dummy)
                dmi_save_ident(dm, DMI_PRODUCT_VERSION, 6);
                dmi_save_ident(dm, DMI_PRODUCT_SERIAL, 7);
                dmi_save_uuid(dm, DMI_PRODUCT_UUID, 8);
+               dmi_save_ident(dm, DMI_PRODUCT_SKU, 25);
                dmi_save_ident(dm, DMI_PRODUCT_FAMILY, 26);
                break;
        case 2:         /* Base Board Information */
index 781a4a33755739438acb6430576f6f2221601f88..d8e159feb573f1a6cdc171bc90b5948b72cc45bd 100644 (file)
@@ -87,6 +87,18 @@ config EFI_RUNTIME_WRAPPERS
 config EFI_ARMSTUB
        bool
 
+config EFI_ARMSTUB_DTB_LOADER
+       bool "Enable the DTB loader"
+       depends on EFI_ARMSTUB
+       help
+         Select this config option to add support for the dtb= command
+         line parameter, allowing a device tree blob to be loaded into
+         memory from the EFI System Partition by the stub.
+
+         The device tree is typically provided by the platform or by
+         the bootloader, so this option is mostly for development
+         purposes only.
+
 config EFI_BOOTLOADER_CONTROL
        tristate "EFI Bootloader Control"
        depends on EFI_VARS
index 3bf0dca378a647f34e5fbc9092df224816e8f3a3..a7902fccdcfa4c5f39e42d6a4561d46a5a2a017b 100644 (file)
@@ -48,8 +48,21 @@ u64 cper_next_record_id(void)
 {
        static atomic64_t seq;
 
-       if (!atomic64_read(&seq))
-               atomic64_set(&seq, ((u64)get_seconds()) << 32);
+       if (!atomic64_read(&seq)) {
+               time64_t time = ktime_get_real_seconds();
+
+               /*
+                * This code is unlikely to still be needed in year 2106,
+                * but just in case, let's use a few more bits for timestamps
+                * after y2038 to be sure they keep increasing monotonically
+                * for the next few hundred years...
+                */
+               if (time < 0x80000000)
+                       atomic64_set(&seq, (ktime_get_real_seconds()) << 32);
+               else
+                       atomic64_set(&seq, 0x8000000000000000ull |
+                                          ktime_get_real_seconds() << 24);
+       }
 
        return atomic64_inc_return(&seq);
 }
@@ -459,7 +472,7 @@ cper_estatus_print_section(const char *pfx, struct acpi_hest_generic_data *gdata
                else
                        goto err_section_too_small;
 #if defined(CONFIG_ARM64) || defined(CONFIG_ARM)
-       } else if (!uuid_le_cmp(*sec_type, CPER_SEC_PROC_ARM)) {
+       } else if (guid_equal(sec_type, &CPER_SEC_PROC_ARM)) {
                struct cper_sec_proc_arm *arm_err = acpi_hest_get_payload(gdata);
 
                printk("%ssection_type: ARM processor error\n", newpfx);
index 232f4915223b519b3ee6580cb08354ac9b0a7016..2a29dd9c986d4e2df7663aa1305df8adbaa59739 100644 (file)
@@ -82,8 +82,11 @@ struct mm_struct efi_mm = {
        .mmap_sem               = __RWSEM_INITIALIZER(efi_mm.mmap_sem),
        .page_table_lock        = __SPIN_LOCK_UNLOCKED(efi_mm.page_table_lock),
        .mmlist                 = LIST_HEAD_INIT(efi_mm.mmlist),
+       .cpu_bitmap             = { [BITS_TO_LONGS(NR_CPUS)] = 0},
 };
 
+struct workqueue_struct *efi_rts_wq;
+
 static bool disable_runtime;
 static int __init setup_noefi(char *arg)
 {
@@ -337,6 +340,18 @@ static int __init efisubsys_init(void)
        if (!efi_enabled(EFI_BOOT))
                return 0;
 
+       /*
+        * Since we process only one efi_runtime_service() at a time, an
+        * ordered workqueue (which creates only one execution context)
+        * should suffice all our needs.
+        */
+       efi_rts_wq = alloc_ordered_workqueue("efi_rts_wq", 0);
+       if (!efi_rts_wq) {
+               pr_err("Creating efi_rts_wq failed, EFI runtime services disabled.\n");
+               clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
+               return 0;
+       }
+
        /* We register the efi directory at /sys/firmware/efi */
        efi_kobj = kobject_create_and_add("efi", firmware_kobj);
        if (!efi_kobj) {
@@ -388,7 +403,7 @@ subsys_initcall(efisubsys_init);
  * and if so, populate the supplied memory descriptor with the appropriate
  * data.
  */
-int __init efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md)
+int efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md)
 {
        efi_memory_desc_t *md;
 
@@ -406,12 +421,6 @@ int __init efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md)
                u64 size;
                u64 end;
 
-               if (!(md->attribute & EFI_MEMORY_RUNTIME) &&
-                   md->type != EFI_BOOT_SERVICES_DATA &&
-                   md->type != EFI_RUNTIME_SERVICES_DATA) {
-                       continue;
-               }
-
                size = md->num_pages << EFI_PAGE_SHIFT;
                end = md->phys_addr + size;
                if (phys_addr >= md->phys_addr && phys_addr < end) {
index 1ab80e06e7c510b0ad730c2de0cbe30b4f826f48..5d06bd247d0731a652424d504293f028d291c1b8 100644 (file)
@@ -250,7 +250,10 @@ void __init efi_esrt_init(void)
                return;
 
        rc = efi_mem_desc_lookup(efi.esrt, &md);
-       if (rc < 0) {
+       if (rc < 0 ||
+           (!(md.attribute & EFI_MEMORY_RUNTIME) &&
+            md.type != EFI_BOOT_SERVICES_DATA &&
+            md.type != EFI_RUNTIME_SERVICES_DATA)) {
                pr_warn("ESRT header is not in the memory map.\n");
                return;
        }
@@ -326,7 +329,8 @@ void __init efi_esrt_init(void)
 
        end = esrt_data + size;
        pr_info("Reserving ESRT space from %pa to %pa.\n", &esrt_data, &end);
-       efi_mem_reserve(esrt_data, esrt_data_size);
+       if (md.type == EFI_BOOT_SERVICES_DATA)
+               efi_mem_reserve(esrt_data, esrt_data_size);
 
        pr_debug("esrt-init: loaded.\n");
 }
index 01a9d78ee4154702e89ab8534727dc1ab66998d4..6920033de6d411689719e64226112a19a8c8021d 100644 (file)
 
 static u64 virtmap_base = EFI_RT_VIRTUAL_BASE;
 
-efi_status_t efi_open_volume(efi_system_table_t *sys_table_arg,
-                            void *__image, void **__fh)
-{
-       efi_file_io_interface_t *io;
-       efi_loaded_image_t *image = __image;
-       efi_file_handle_t *fh;
-       efi_guid_t fs_proto = EFI_FILE_SYSTEM_GUID;
-       efi_status_t status;
-       void *handle = (void *)(unsigned long)image->device_handle;
-
-       status = sys_table_arg->boottime->handle_protocol(handle,
-                                &fs_proto, (void **)&io);
-       if (status != EFI_SUCCESS) {
-               efi_printk(sys_table_arg, "Failed to handle fs_proto\n");
-               return status;
-       }
-
-       status = io->open_volume(io, &fh);
-       if (status != EFI_SUCCESS)
-               efi_printk(sys_table_arg, "Failed to open volume\n");
-
-       *__fh = fh;
-       return status;
-}
-
 void efi_char16_printk(efi_system_table_t *sys_table_arg,
                              efi_char16_t *str)
 {
@@ -202,9 +177,10 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table,
         * 'dtb=' unless UEFI Secure Boot is disabled.  We assume that secure
         * boot is enabled if we can't determine its state.
         */
-       if (secure_boot != efi_secureboot_mode_disabled &&
-           strstr(cmdline_ptr, "dtb=")) {
-               pr_efi(sys_table, "Ignoring DTB from command line.\n");
+       if (!IS_ENABLED(CONFIG_EFI_ARMSTUB_DTB_LOADER) ||
+            secure_boot != efi_secureboot_mode_disabled) {
+               if (strstr(cmdline_ptr, "dtb="))
+                       pr_efi(sys_table, "Ignoring DTB from command line.\n");
        } else {
                status = handle_cmdline_files(sys_table, image, cmdline_ptr,
                                              "dtb=",
index 50a9cab5a8340e542e2f8d12172bd4d500934d91..e94975f4655bdd50d1db4b4a8be728d266d7d02b 100644 (file)
@@ -413,6 +413,34 @@ static efi_status_t efi_file_close(void *handle)
        return efi_call_proto(efi_file_handle, close, handle);
 }
 
+static efi_status_t efi_open_volume(efi_system_table_t *sys_table_arg,
+                                   efi_loaded_image_t *image,
+                                   efi_file_handle_t **__fh)
+{
+       efi_file_io_interface_t *io;
+       efi_file_handle_t *fh;
+       efi_guid_t fs_proto = EFI_FILE_SYSTEM_GUID;
+       efi_status_t status;
+       void *handle = (void *)(unsigned long)efi_table_attr(efi_loaded_image,
+                                                            device_handle,
+                                                            image);
+
+       status = efi_call_early(handle_protocol, handle,
+                               &fs_proto, (void **)&io);
+       if (status != EFI_SUCCESS) {
+               efi_printk(sys_table_arg, "Failed to handle fs_proto\n");
+               return status;
+       }
+
+       status = efi_call_proto(efi_file_io_interface, open_volume, io, &fh);
+       if (status != EFI_SUCCESS)
+               efi_printk(sys_table_arg, "Failed to open volume\n");
+       else
+               *__fh = fh;
+
+       return status;
+}
+
 /*
  * Parse the ASCII string 'cmdline' for EFI options, denoted by the efi=
  * option, e.g. efi=nochunk.
@@ -563,8 +591,7 @@ efi_status_t handle_cmdline_files(efi_system_table_t *sys_table_arg,
 
                /* Only open the volume once. */
                if (!i) {
-                       status = efi_open_volume(sys_table_arg, image,
-                                                (void **)&fh);
+                       status = efi_open_volume(sys_table_arg, image, &fh);
                        if (status != EFI_SUCCESS)
                                goto free_files;
                }
index f59564b72ddcdc0fae2c438f71c0c3d414f4a627..32799cf039ef1562afc8f124ab7eb9d64342083c 100644 (file)
@@ -36,9 +36,6 @@ extern int __pure is_quiet(void);
 
 void efi_char16_printk(efi_system_table_t *, efi_char16_t *);
 
-efi_status_t efi_open_volume(efi_system_table_t *sys_table_arg, void *__image,
-                            void **__fh);
-
 unsigned long get_dram_base(efi_system_table_t *sys_table_arg);
 
 efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
index caa37a6dd9d4eca506e3a0c2fed3c636fbd05d2b..a90b0b8fc69a18abb62d10c3f046a88a7300fd5b 100644 (file)
@@ -64,7 +64,7 @@ static void efi_retrieve_tpm2_eventlog_1_2(efi_system_table_t *sys_table_arg)
        efi_guid_t tcg2_guid = EFI_TCG2_PROTOCOL_GUID;
        efi_guid_t linux_eventlog_guid = LINUX_EFI_TPM_EVENT_LOG_GUID;
        efi_status_t status;
-       efi_physical_addr_t log_location, log_last_entry;
+       efi_physical_addr_t log_location = 0, log_last_entry = 0;
        struct linux_efi_tpm_eventlog *log_tbl = NULL;
        unsigned long first_entry_addr, last_entry_addr;
        size_t log_size, last_entry_size;
index ae54870b27886b016b0924fb85004b2fc4323700..aa66cbf23512af3c6e9195cd5b3471533a8f8e42 100644 (file)
@@ -1,6 +1,15 @@
 /*
  * runtime-wrappers.c - Runtime Services function call wrappers
  *
+ * Implementation summary:
+ * -----------------------
+ * 1. When user/kernel thread requests to execute efi_runtime_service(),
+ * enqueue work to efi_rts_wq.
+ * 2. Caller thread waits for completion until the work is finished
+ * because it's dependent on the return status and execution of
+ * efi_runtime_service().
+ * For instance, get_variable() and get_next_variable().
+ *
  * Copyright (C) 2014 Linaro Ltd. <ard.biesheuvel@linaro.org>
  *
  * Split off from arch/x86/platform/efi/efi.c
@@ -22,6 +31,9 @@
 #include <linux/mutex.h>
 #include <linux/semaphore.h>
 #include <linux/stringify.h>
+#include <linux/workqueue.h>
+#include <linux/completion.h>
+
 #include <asm/efi.h>
 
 /*
 #define __efi_call_virt(f, args...) \
        __efi_call_virt_pointer(efi.systab->runtime, f, args)
 
+/* efi_runtime_service() function identifiers */
+enum efi_rts_ids {
+       GET_TIME,
+       SET_TIME,
+       GET_WAKEUP_TIME,
+       SET_WAKEUP_TIME,
+       GET_VARIABLE,
+       GET_NEXT_VARIABLE,
+       SET_VARIABLE,
+       QUERY_VARIABLE_INFO,
+       GET_NEXT_HIGH_MONO_COUNT,
+       UPDATE_CAPSULE,
+       QUERY_CAPSULE_CAPS,
+};
+
+/*
+ * efi_runtime_work:   Details of EFI Runtime Service work
+ * @arg<1-5>:          EFI Runtime Service function arguments
+ * @status:            Status of executing EFI Runtime Service
+ * @efi_rts_id:                EFI Runtime Service function identifier
+ * @efi_rts_comp:      Struct used for handling completions
+ */
+struct efi_runtime_work {
+       void *arg1;
+       void *arg2;
+       void *arg3;
+       void *arg4;
+       void *arg5;
+       efi_status_t status;
+       struct work_struct work;
+       enum efi_rts_ids efi_rts_id;
+       struct completion efi_rts_comp;
+};
+
+/*
+ * efi_queue_work:     Queue efi_runtime_service() and wait until it's done
+ * @rts:               efi_runtime_service() function identifier
+ * @rts_arg<1-5>:      efi_runtime_service() function arguments
+ *
+ * Accesses to efi_runtime_services() are serialized by a binary
+ * semaphore (efi_runtime_lock) and caller waits until the work is
+ * finished, hence _only_ one work is queued at a time and the caller
+ * thread waits for completion.
+ */
+#define efi_queue_work(_rts, _arg1, _arg2, _arg3, _arg4, _arg5)                \
+({                                                                     \
+       struct efi_runtime_work efi_rts_work;                           \
+       efi_rts_work.status = EFI_ABORTED;                              \
+                                                                       \
+       init_completion(&efi_rts_work.efi_rts_comp);                    \
+       INIT_WORK_ONSTACK(&efi_rts_work.work, efi_call_rts);            \
+       efi_rts_work.arg1 = _arg1;                                      \
+       efi_rts_work.arg2 = _arg2;                                      \
+       efi_rts_work.arg3 = _arg3;                                      \
+       efi_rts_work.arg4 = _arg4;                                      \
+       efi_rts_work.arg5 = _arg5;                                      \
+       efi_rts_work.efi_rts_id = _rts;                                 \
+                                                                       \
+       /*                                                              \
+        * queue_work() returns 0 if work was already on queue,         \
+        * _ideally_ this should never happen.                          \
+        */                                                             \
+       if (queue_work(efi_rts_wq, &efi_rts_work.work))                 \
+               wait_for_completion(&efi_rts_work.efi_rts_comp);        \
+       else                                                            \
+               pr_err("Failed to queue work to efi_rts_wq.\n");        \
+                                                                       \
+       efi_rts_work.status;                                            \
+})
+
 void efi_call_virt_check_flags(unsigned long flags, const char *call)
 {
        unsigned long cur_flags, mismatch;
@@ -90,13 +172,98 @@ void efi_call_virt_check_flags(unsigned long flags, const char *call)
  */
 static DEFINE_SEMAPHORE(efi_runtime_lock);
 
+/*
+ * Calls the appropriate efi_runtime_service() with the appropriate
+ * arguments.
+ *
+ * Semantics followed by efi_call_rts() to understand efi_runtime_work:
+ * 1. If argument was a pointer, recast it from void pointer to original
+ * pointer type.
+ * 2. If argument was a value, recast it from void pointer to original
+ * pointer type and dereference it.
+ */
+static void efi_call_rts(struct work_struct *work)
+{
+       struct efi_runtime_work *efi_rts_work;
+       void *arg1, *arg2, *arg3, *arg4, *arg5;
+       efi_status_t status = EFI_NOT_FOUND;
+
+       efi_rts_work = container_of(work, struct efi_runtime_work, work);
+       arg1 = efi_rts_work->arg1;
+       arg2 = efi_rts_work->arg2;
+       arg3 = efi_rts_work->arg3;
+       arg4 = efi_rts_work->arg4;
+       arg5 = efi_rts_work->arg5;
+
+       switch (efi_rts_work->efi_rts_id) {
+       case GET_TIME:
+               status = efi_call_virt(get_time, (efi_time_t *)arg1,
+                                      (efi_time_cap_t *)arg2);
+               break;
+       case SET_TIME:
+               status = efi_call_virt(set_time, (efi_time_t *)arg1);
+               break;
+       case GET_WAKEUP_TIME:
+               status = efi_call_virt(get_wakeup_time, (efi_bool_t *)arg1,
+                                      (efi_bool_t *)arg2, (efi_time_t *)arg3);
+               break;
+       case SET_WAKEUP_TIME:
+               status = efi_call_virt(set_wakeup_time, *(efi_bool_t *)arg1,
+                                      (efi_time_t *)arg2);
+               break;
+       case GET_VARIABLE:
+               status = efi_call_virt(get_variable, (efi_char16_t *)arg1,
+                                      (efi_guid_t *)arg2, (u32 *)arg3,
+                                      (unsigned long *)arg4, (void *)arg5);
+               break;
+       case GET_NEXT_VARIABLE:
+               status = efi_call_virt(get_next_variable, (unsigned long *)arg1,
+                                      (efi_char16_t *)arg2,
+                                      (efi_guid_t *)arg3);
+               break;
+       case SET_VARIABLE:
+               status = efi_call_virt(set_variable, (efi_char16_t *)arg1,
+                                      (efi_guid_t *)arg2, *(u32 *)arg3,
+                                      *(unsigned long *)arg4, (void *)arg5);
+               break;
+       case QUERY_VARIABLE_INFO:
+               status = efi_call_virt(query_variable_info, *(u32 *)arg1,
+                                      (u64 *)arg2, (u64 *)arg3, (u64 *)arg4);
+               break;
+       case GET_NEXT_HIGH_MONO_COUNT:
+               status = efi_call_virt(get_next_high_mono_count, (u32 *)arg1);
+               break;
+       case UPDATE_CAPSULE:
+               status = efi_call_virt(update_capsule,
+                                      (efi_capsule_header_t **)arg1,
+                                      *(unsigned long *)arg2,
+                                      *(unsigned long *)arg3);
+               break;
+       case QUERY_CAPSULE_CAPS:
+               status = efi_call_virt(query_capsule_caps,
+                                      (efi_capsule_header_t **)arg1,
+                                      *(unsigned long *)arg2, (u64 *)arg3,
+                                      (int *)arg4);
+               break;
+       default:
+               /*
+                * Ideally, we should never reach here because a caller of this
+                * function should have put the right efi_runtime_service()
+                * function identifier into efi_rts_work->efi_rts_id
+                */
+               pr_err("Requested executing invalid EFI Runtime Service.\n");
+       }
+       efi_rts_work->status = status;
+       complete(&efi_rts_work->efi_rts_comp);
+}
+
 static efi_status_t virt_efi_get_time(efi_time_t *tm, efi_time_cap_t *tc)
 {
        efi_status_t status;
 
        if (down_interruptible(&efi_runtime_lock))
                return EFI_ABORTED;
-       status = efi_call_virt(get_time, tm, tc);
+       status = efi_queue_work(GET_TIME, tm, tc, NULL, NULL, NULL);
        up(&efi_runtime_lock);
        return status;
 }
@@ -107,7 +274,7 @@ static efi_status_t virt_efi_set_time(efi_time_t *tm)
 
        if (down_interruptible(&efi_runtime_lock))
                return EFI_ABORTED;
-       status = efi_call_virt(set_time, tm);
+       status = efi_queue_work(SET_TIME, tm, NULL, NULL, NULL, NULL);
        up(&efi_runtime_lock);
        return status;
 }
@@ -120,7 +287,8 @@ static efi_status_t virt_efi_get_wakeup_time(efi_bool_t *enabled,
 
        if (down_interruptible(&efi_runtime_lock))
                return EFI_ABORTED;
-       status = efi_call_virt(get_wakeup_time, enabled, pending, tm);
+       status = efi_queue_work(GET_WAKEUP_TIME, enabled, pending, tm, NULL,
+                               NULL);
        up(&efi_runtime_lock);
        return status;
 }
@@ -131,7 +299,8 @@ static efi_status_t virt_efi_set_wakeup_time(efi_bool_t enabled, efi_time_t *tm)
 
        if (down_interruptible(&efi_runtime_lock))
                return EFI_ABORTED;
-       status = efi_call_virt(set_wakeup_time, enabled, tm);
+       status = efi_queue_work(SET_WAKEUP_TIME, &enabled, tm, NULL, NULL,
+                               NULL);
        up(&efi_runtime_lock);
        return status;
 }
@@ -146,8 +315,8 @@ static efi_status_t virt_efi_get_variable(efi_char16_t *name,
 
        if (down_interruptible(&efi_runtime_lock))
                return EFI_ABORTED;
-       status = efi_call_virt(get_variable, name, vendor, attr, data_size,
-                              data);
+       status = efi_queue_work(GET_VARIABLE, name, vendor, attr, data_size,
+                               data);
        up(&efi_runtime_lock);
        return status;
 }
@@ -160,7 +329,8 @@ static efi_status_t virt_efi_get_next_variable(unsigned long *name_size,
 
        if (down_interruptible(&efi_runtime_lock))
                return EFI_ABORTED;
-       status = efi_call_virt(get_next_variable, name_size, name, vendor);
+       status = efi_queue_work(GET_NEXT_VARIABLE, name_size, name, vendor,
+                               NULL, NULL);
        up(&efi_runtime_lock);
        return status;
 }
@@ -175,8 +345,8 @@ static efi_status_t virt_efi_set_variable(efi_char16_t *name,
 
        if (down_interruptible(&efi_runtime_lock))
                return EFI_ABORTED;
-       status = efi_call_virt(set_variable, name, vendor, attr, data_size,
-                              data);
+       status = efi_queue_work(SET_VARIABLE, name, vendor, &attr, &data_size,
+                               data);
        up(&efi_runtime_lock);
        return status;
 }
@@ -210,8 +380,8 @@ static efi_status_t virt_efi_query_variable_info(u32 attr,
 
        if (down_interruptible(&efi_runtime_lock))
                return EFI_ABORTED;
-       status = efi_call_virt(query_variable_info, attr, storage_space,
-                              remaining_space, max_variable_size);
+       status = efi_queue_work(QUERY_VARIABLE_INFO, &attr, storage_space,
+                               remaining_space, max_variable_size, NULL);
        up(&efi_runtime_lock);
        return status;
 }
@@ -242,7 +412,8 @@ static efi_status_t virt_efi_get_next_high_mono_count(u32 *count)
 
        if (down_interruptible(&efi_runtime_lock))
                return EFI_ABORTED;
-       status = efi_call_virt(get_next_high_mono_count, count);
+       status = efi_queue_work(GET_NEXT_HIGH_MONO_COUNT, count, NULL, NULL,
+                               NULL, NULL);
        up(&efi_runtime_lock);
        return status;
 }
@@ -272,7 +443,8 @@ static efi_status_t virt_efi_update_capsule(efi_capsule_header_t **capsules,
 
        if (down_interruptible(&efi_runtime_lock))
                return EFI_ABORTED;
-       status = efi_call_virt(update_capsule, capsules, count, sg_list);
+       status = efi_queue_work(UPDATE_CAPSULE, capsules, &count, &sg_list,
+                               NULL, NULL);
        up(&efi_runtime_lock);
        return status;
 }
@@ -289,8 +461,8 @@ static efi_status_t virt_efi_query_capsule_caps(efi_capsule_header_t **capsules,
 
        if (down_interruptible(&efi_runtime_lock))
                return EFI_ABORTED;
-       status = efi_call_virt(query_capsule_caps, capsules, count, max_size,
-                              reset_type);
+       status = efi_queue_work(QUERY_CAPSULE_CAPS, capsules, &count,
+                               max_size, reset_type, NULL);
        up(&efi_runtime_lock);
        return status;
 }
index dd4edd8f22ceebb67c4f6e1ab486c4ac9e9a1106..7fa793672a7a969239329ef1fccc5a2391c764c8 100644 (file)
@@ -455,8 +455,10 @@ static int altera_cvp_probe(struct pci_dev *pdev,
 
        mgr = fpga_mgr_create(&pdev->dev, conf->mgr_name,
                              &altera_cvp_ops, conf);
-       if (!mgr)
-               return -ENOMEM;
+       if (!mgr) {
+               ret = -ENOMEM;
+               goto err_unmap;
+       }
 
        pci_set_drvdata(pdev, mgr);
 
index d3cf9502e7e7f46abeb366c735f2e47d6808e64d..58faeb1cef63abaf8cadd77e9c3960f3ee7e17ee 100644 (file)
@@ -181,7 +181,11 @@ static int uniphier_gpio_to_irq(struct gpio_chip *chip, unsigned int offset)
        fwspec.fwnode = of_node_to_fwnode(chip->parent->of_node);
        fwspec.param_count = 2;
        fwspec.param[0] = offset - UNIPHIER_GPIO_IRQ_OFFSET;
-       fwspec.param[1] = IRQ_TYPE_NONE;
+       /*
+        * IRQ_TYPE_NONE is rejected by the parent irq domain. Set LEVEL_HIGH
+        * temporarily. Anyway, ->irq_set_type() will override it later.
+        */
+       fwspec.param[1] = IRQ_TYPE_LEVEL_HIGH;
 
        return irq_create_fwspec_mapping(&fwspec);
 }
index e2232cbcec8bbe6276e8e9d02e8c0f9e36d34030..addd9fecc198360d03a4cfb420e1e2d6ecf90a67 100644 (file)
@@ -25,6 +25,7 @@
 
 struct acpi_gpio_event {
        struct list_head node;
+       struct list_head initial_sync_list;
        acpi_handle handle;
        unsigned int pin;
        unsigned int irq;
@@ -50,6 +51,9 @@ struct acpi_gpio_chip {
        struct list_head events;
 };
 
+static LIST_HEAD(acpi_gpio_initial_sync_list);
+static DEFINE_MUTEX(acpi_gpio_initial_sync_list_lock);
+
 static int acpi_gpiochip_find(struct gpio_chip *gc, void *data)
 {
        if (!gc->parent)
@@ -85,6 +89,21 @@ static struct gpio_desc *acpi_get_gpiod(char *path, int pin)
        return gpiochip_get_desc(chip, pin);
 }
 
+static void acpi_gpio_add_to_initial_sync_list(struct acpi_gpio_event *event)
+{
+       mutex_lock(&acpi_gpio_initial_sync_list_lock);
+       list_add(&event->initial_sync_list, &acpi_gpio_initial_sync_list);
+       mutex_unlock(&acpi_gpio_initial_sync_list_lock);
+}
+
+static void acpi_gpio_del_from_initial_sync_list(struct acpi_gpio_event *event)
+{
+       mutex_lock(&acpi_gpio_initial_sync_list_lock);
+       if (!list_empty(&event->initial_sync_list))
+               list_del_init(&event->initial_sync_list);
+       mutex_unlock(&acpi_gpio_initial_sync_list_lock);
+}
+
 static irqreturn_t acpi_gpio_irq_handler(int irq, void *data)
 {
        struct acpi_gpio_event *event = data;
@@ -136,7 +155,7 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
        irq_handler_t handler = NULL;
        struct gpio_desc *desc;
        unsigned long irqflags;
-       int ret, pin, irq;
+       int ret, pin, irq, value;
 
        if (!acpi_gpio_get_irq_resource(ares, &agpio))
                return AE_OK;
@@ -167,6 +186,8 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
 
        gpiod_direction_input(desc);
 
+       value = gpiod_get_value(desc);
+
        ret = gpiochip_lock_as_irq(chip, pin);
        if (ret) {
                dev_err(chip->parent, "Failed to lock GPIO as interrupt\n");
@@ -208,6 +229,7 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
        event->irq = irq;
        event->pin = pin;
        event->desc = desc;
+       INIT_LIST_HEAD(&event->initial_sync_list);
 
        ret = request_threaded_irq(event->irq, NULL, handler, irqflags,
                                   "ACPI:Event", event);
@@ -222,6 +244,18 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
                enable_irq_wake(irq);
 
        list_add_tail(&event->node, &acpi_gpio->events);
+
+       /*
+        * Make sure we trigger the initial state of the IRQ when using RISING
+        * or FALLING.  Note we run the handlers on late_init, the AML code
+        * may refer to OperationRegions from other (builtin) drivers which
+        * may be probed after us.
+        */
+       if (handler == acpi_gpio_irq_handler &&
+           (((irqflags & IRQF_TRIGGER_RISING) && value == 1) ||
+            ((irqflags & IRQF_TRIGGER_FALLING) && value == 0)))
+               acpi_gpio_add_to_initial_sync_list(event);
+
        return AE_OK;
 
 fail_free_event:
@@ -294,6 +328,8 @@ void acpi_gpiochip_free_interrupts(struct gpio_chip *chip)
        list_for_each_entry_safe_reverse(event, ep, &acpi_gpio->events, node) {
                struct gpio_desc *desc;
 
+               acpi_gpio_del_from_initial_sync_list(event);
+
                if (irqd_is_wakeup_set(irq_get_irq_data(event->irq)))
                        disable_irq_wake(event->irq);
 
@@ -1158,3 +1194,21 @@ bool acpi_can_fallback_to_crs(struct acpi_device *adev, const char *con_id)
 
        return con_id == NULL;
 }
+
+/* Sync the initial state of handlers after all builtin drivers have probed */
+static int acpi_gpio_initial_sync(void)
+{
+       struct acpi_gpio_event *event, *ep;
+
+       mutex_lock(&acpi_gpio_initial_sync_list_lock);
+       list_for_each_entry_safe(event, ep, &acpi_gpio_initial_sync_list,
+                                initial_sync_list) {
+               acpi_evaluate_object(event->handle, NULL, NULL, NULL);
+               list_del_init(&event->initial_sync_list);
+       }
+       mutex_unlock(&acpi_gpio_initial_sync_list_lock);
+
+       return 0;
+}
+/* We must use _sync so that this runs after the first deferred_probe run */
+late_initcall_sync(acpi_gpio_initial_sync);
index 28d968088131f433901d3b5b4bee4da8a9002250..53a14ee8ad6d364788a891d4f2aa083956df5f04 100644 (file)
@@ -64,7 +64,8 @@ static void of_gpio_flags_quirks(struct device_node *np,
         * Note that active low is the default.
         */
        if (IS_ENABLED(CONFIG_REGULATOR) &&
-           (of_device_is_compatible(np, "reg-fixed-voltage") ||
+           (of_device_is_compatible(np, "regulator-fixed") ||
+            of_device_is_compatible(np, "reg-fixed-voltage") ||
             of_device_is_compatible(np, "regulator-gpio"))) {
                /*
                 * The regulator GPIO handles are specified such that the
index a59c07590ceec2f314768066cbed28126ed68a6d..7dcbac8af9a7a8fb9c1cf46890d661be1ec34592 100644 (file)
@@ -190,6 +190,7 @@ struct amdgpu_job;
 struct amdgpu_irq_src;
 struct amdgpu_fpriv;
 struct amdgpu_bo_va_mapping;
+struct amdgpu_atif;
 
 enum amdgpu_cp_irq {
        AMDGPU_CP_IRQ_GFX_EOP = 0,
@@ -1269,43 +1270,6 @@ struct amdgpu_vram_scratch {
 /*
  * ACPI
  */
-struct amdgpu_atif_notification_cfg {
-       bool enabled;
-       int command_code;
-};
-
-struct amdgpu_atif_notifications {
-       bool display_switch;
-       bool expansion_mode_change;
-       bool thermal_state;
-       bool forced_power_state;
-       bool system_power_state;
-       bool display_conf_change;
-       bool px_gfx_switch;
-       bool brightness_change;
-       bool dgpu_display_event;
-};
-
-struct amdgpu_atif_functions {
-       bool system_params;
-       bool sbios_requests;
-       bool select_active_disp;
-       bool lid_state;
-       bool get_tv_standard;
-       bool set_tv_standard;
-       bool get_panel_expansion_mode;
-       bool set_panel_expansion_mode;
-       bool temperature_change;
-       bool graphics_device_types;
-};
-
-struct amdgpu_atif {
-       struct amdgpu_atif_notifications notifications;
-       struct amdgpu_atif_functions functions;
-       struct amdgpu_atif_notification_cfg notification_cfg;
-       struct amdgpu_encoder *encoder_for_bl;
-};
-
 struct amdgpu_atcs_functions {
        bool get_ext_state;
        bool pcie_perf_req;
@@ -1466,7 +1430,7 @@ struct amdgpu_device {
 #if defined(CONFIG_DEBUG_FS)
        struct dentry                   *debugfs_regs[AMDGPU_DEBUGFS_MAX_COMPONENTS];
 #endif
-       struct amdgpu_atif              atif;
+       struct amdgpu_atif              *atif;
        struct amdgpu_atcs              atcs;
        struct mutex                    srbm_mutex;
        /* GRBM index mutex. Protects concurrent access to GRBM index */
@@ -1894,6 +1858,12 @@ static inline bool amdgpu_atpx_dgpu_req_power_for_displays(void) { return false;
 static inline bool amdgpu_has_atpx(void) { return false; }
 #endif
 
+#if defined(CONFIG_VGA_SWITCHEROO) && defined(CONFIG_ACPI)
+void *amdgpu_atpx_get_dhandle(void);
+#else
+static inline void *amdgpu_atpx_get_dhandle(void) { return NULL; }
+#endif
+
 /*
  * KMS
  */
index f4c474a9587510ed844ae7ec275b8b73c7695572..71efcf38f11beb2c628cee39ff4bae7fa50bedbd 100644 (file)
 #define ACP_I2S_COMP2_CAP_REG_OFFSET           0xa8
 #define ACP_I2S_COMP1_PLAY_REG_OFFSET          0x6c
 #define ACP_I2S_COMP2_PLAY_REG_OFFSET          0x68
+#define ACP_BT_PLAY_REGS_START                 0x14970
+#define ACP_BT_PLAY_REGS_END                   0x14a24
+#define ACP_BT_COMP1_REG_OFFSET                        0xac
+#define ACP_BT_COMP2_REG_OFFSET                        0xa8
 
 #define mmACP_PGFSM_RETAIN_REG                 0x51c9
 #define mmACP_PGFSM_CONFIG_REG                 0x51ca
@@ -77,7 +81,7 @@
 #define ACP_SOFT_RESET_DONE_TIME_OUT_VALUE     0x000000FF
 
 #define ACP_TIMEOUT_LOOP                       0x000000FF
-#define ACP_DEVS                               3
+#define ACP_DEVS                               4
 #define ACP_SRC_ID                             162
 
 enum {
@@ -316,14 +320,13 @@ static int acp_hw_init(void *handle)
        if (adev->acp.acp_cell == NULL)
                return -ENOMEM;
 
-       adev->acp.acp_res = kcalloc(4, sizeof(struct resource), GFP_KERNEL);
-
+       adev->acp.acp_res = kcalloc(5, sizeof(struct resource), GFP_KERNEL);
        if (adev->acp.acp_res == NULL) {
                kfree(adev->acp.acp_cell);
                return -ENOMEM;
        }
 
-       i2s_pdata = kcalloc(2, sizeof(struct i2s_platform_data), GFP_KERNEL);
+       i2s_pdata = kcalloc(3, sizeof(struct i2s_platform_data), GFP_KERNEL);
        if (i2s_pdata == NULL) {
                kfree(adev->acp.acp_res);
                kfree(adev->acp.acp_cell);
@@ -358,6 +361,20 @@ static int acp_hw_init(void *handle)
        i2s_pdata[1].i2s_reg_comp1 = ACP_I2S_COMP1_CAP_REG_OFFSET;
        i2s_pdata[1].i2s_reg_comp2 = ACP_I2S_COMP2_CAP_REG_OFFSET;
 
+       i2s_pdata[2].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET;
+       switch (adev->asic_type) {
+       case CHIP_STONEY:
+               i2s_pdata[2].quirks |= DW_I2S_QUIRK_16BIT_IDX_OVERRIDE;
+               break;
+       default:
+               break;
+       }
+
+       i2s_pdata[2].cap = DWC_I2S_PLAY | DWC_I2S_RECORD;
+       i2s_pdata[2].snd_rates = SNDRV_PCM_RATE_8000_96000;
+       i2s_pdata[2].i2s_reg_comp1 = ACP_BT_COMP1_REG_OFFSET;
+       i2s_pdata[2].i2s_reg_comp2 = ACP_BT_COMP2_REG_OFFSET;
+
        adev->acp.acp_res[0].name = "acp2x_dma";
        adev->acp.acp_res[0].flags = IORESOURCE_MEM;
        adev->acp.acp_res[0].start = acp_base;
@@ -373,13 +390,18 @@ static int acp_hw_init(void *handle)
        adev->acp.acp_res[2].start = acp_base + ACP_I2S_CAP_REGS_START;
        adev->acp.acp_res[2].end = acp_base + ACP_I2S_CAP_REGS_END;
 
-       adev->acp.acp_res[3].name = "acp2x_dma_irq";
-       adev->acp.acp_res[3].flags = IORESOURCE_IRQ;
-       adev->acp.acp_res[3].start = amdgpu_irq_create_mapping(adev, 162);
-       adev->acp.acp_res[3].end = adev->acp.acp_res[3].start;
+       adev->acp.acp_res[3].name = "acp2x_dw_bt_i2s_play_cap";
+       adev->acp.acp_res[3].flags = IORESOURCE_MEM;
+       adev->acp.acp_res[3].start = acp_base + ACP_BT_PLAY_REGS_START;
+       adev->acp.acp_res[3].end = acp_base + ACP_BT_PLAY_REGS_END;
+
+       adev->acp.acp_res[4].name = "acp2x_dma_irq";
+       adev->acp.acp_res[4].flags = IORESOURCE_IRQ;
+       adev->acp.acp_res[4].start = amdgpu_irq_create_mapping(adev, 162);
+       adev->acp.acp_res[4].end = adev->acp.acp_res[4].start;
 
        adev->acp.acp_cell[0].name = "acp_audio_dma";
-       adev->acp.acp_cell[0].num_resources = 4;
+       adev->acp.acp_cell[0].num_resources = 5;
        adev->acp.acp_cell[0].resources = &adev->acp.acp_res[0];
        adev->acp.acp_cell[0].platform_data = &adev->asic_type;
        adev->acp.acp_cell[0].pdata_size = sizeof(adev->asic_type);
@@ -396,6 +418,12 @@ static int acp_hw_init(void *handle)
        adev->acp.acp_cell[2].platform_data = &i2s_pdata[1];
        adev->acp.acp_cell[2].pdata_size = sizeof(struct i2s_platform_data);
 
+       adev->acp.acp_cell[3].name = "designware-i2s";
+       adev->acp.acp_cell[3].num_resources = 1;
+       adev->acp.acp_cell[3].resources = &adev->acp.acp_res[3];
+       adev->acp.acp_cell[3].platform_data = &i2s_pdata[2];
+       adev->acp.acp_cell[3].pdata_size = sizeof(struct i2s_platform_data);
+
        r = mfd_add_hotplug_devices(adev->acp.parent, adev->acp.acp_cell,
                                                                ACP_DEVS);
        if (r)
@@ -451,7 +479,6 @@ static int acp_hw_init(void *handle)
        val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
        val &= ~ACP_SOFT_RESET__SoftResetAud_MASK;
        cgs_write_register(adev->acp.cgs_device, mmACP_SOFT_RESET, val);
-
        return 0;
 }
 
index 8fa850a070e0fe8ea7a67823008ff7e543d8d1dc..0d8c3fc6eacefcfd71788e4442cc541a031a5416 100644 (file)
 #include "amd_acpi.h"
 #include "atom.h"
 
+struct amdgpu_atif_notification_cfg {
+       bool enabled;
+       int command_code;
+};
+
+struct amdgpu_atif_notifications {
+       bool display_switch;
+       bool expansion_mode_change;
+       bool thermal_state;
+       bool forced_power_state;
+       bool system_power_state;
+       bool display_conf_change;
+       bool px_gfx_switch;
+       bool brightness_change;
+       bool dgpu_display_event;
+};
+
+struct amdgpu_atif_functions {
+       bool system_params;
+       bool sbios_requests;
+       bool select_active_disp;
+       bool lid_state;
+       bool get_tv_standard;
+       bool set_tv_standard;
+       bool get_panel_expansion_mode;
+       bool set_panel_expansion_mode;
+       bool temperature_change;
+       bool graphics_device_types;
+};
+
+struct amdgpu_atif {
+       acpi_handle handle;
+
+       struct amdgpu_atif_notifications notifications;
+       struct amdgpu_atif_functions functions;
+       struct amdgpu_atif_notification_cfg notification_cfg;
+       struct amdgpu_encoder *encoder_for_bl;
+};
+
 /* Call the ATIF method
  */
 /**
@@ -46,8 +85,9 @@
  * Executes the requested ATIF function (all asics).
  * Returns a pointer to the acpi output buffer.
  */
-static union acpi_object *amdgpu_atif_call(acpi_handle handle, int function,
-               struct acpi_buffer *params)
+static union acpi_object *amdgpu_atif_call(struct amdgpu_atif *atif,
+                                          int function,
+                                          struct acpi_buffer *params)
 {
        acpi_status status;
        union acpi_object atif_arg_elements[2];
@@ -70,7 +110,8 @@ static union acpi_object *amdgpu_atif_call(acpi_handle handle, int function,
                atif_arg_elements[1].integer.value = 0;
        }
 
-       status = acpi_evaluate_object(handle, "ATIF", &atif_arg, &buffer);
+       status = acpi_evaluate_object(atif->handle, NULL, &atif_arg,
+                                     &buffer);
 
        /* Fail only if calling the method fails and ATIF is supported */
        if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
@@ -141,15 +182,14 @@ static void amdgpu_atif_parse_functions(struct amdgpu_atif_functions *f, u32 mas
  * (all asics).
  * returns 0 on success, error on failure.
  */
-static int amdgpu_atif_verify_interface(acpi_handle handle,
-               struct amdgpu_atif *atif)
+static int amdgpu_atif_verify_interface(struct amdgpu_atif *atif)
 {
        union acpi_object *info;
        struct atif_verify_interface output;
        size_t size;
        int err = 0;
 
-       info = amdgpu_atif_call(handle, ATIF_FUNCTION_VERIFY_INTERFACE, NULL);
+       info = amdgpu_atif_call(atif, ATIF_FUNCTION_VERIFY_INTERFACE, NULL);
        if (!info)
                return -EIO;
 
@@ -176,6 +216,35 @@ out:
        return err;
 }
 
+static acpi_handle amdgpu_atif_probe_handle(acpi_handle dhandle)
+{
+       acpi_handle handle = NULL;
+       char acpi_method_name[255] = { 0 };
+       struct acpi_buffer buffer = { sizeof(acpi_method_name), acpi_method_name };
+       acpi_status status;
+
+       /* For PX/HG systems, ATIF and ATPX are in the iGPU's namespace, on dGPU only
+        * systems, ATIF is in the dGPU's namespace.
+        */
+       status = acpi_get_handle(dhandle, "ATIF", &handle);
+       if (ACPI_SUCCESS(status))
+               goto out;
+
+       if (amdgpu_has_atpx()) {
+               status = acpi_get_handle(amdgpu_atpx_get_dhandle(), "ATIF",
+                                        &handle);
+               if (ACPI_SUCCESS(status))
+                       goto out;
+       }
+
+       DRM_DEBUG_DRIVER("No ATIF handle found\n");
+       return NULL;
+out:
+       acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
+       DRM_DEBUG_DRIVER("Found ATIF handle %s\n", acpi_method_name);
+       return handle;
+}
+
 /**
  * amdgpu_atif_get_notification_params - determine notify configuration
  *
@@ -188,15 +257,16 @@ out:
  * where n is specified in the result if a notifier is used.
  * Returns 0 on success, error on failure.
  */
-static int amdgpu_atif_get_notification_params(acpi_handle handle,
-               struct amdgpu_atif_notification_cfg *n)
+static int amdgpu_atif_get_notification_params(struct amdgpu_atif *atif)
 {
        union acpi_object *info;
+       struct amdgpu_atif_notification_cfg *n = &atif->notification_cfg;
        struct atif_system_params params;
        size_t size;
        int err = 0;
 
-       info = amdgpu_atif_call(handle, ATIF_FUNCTION_GET_SYSTEM_PARAMETERS, NULL);
+       info = amdgpu_atif_call(atif, ATIF_FUNCTION_GET_SYSTEM_PARAMETERS,
+                               NULL);
        if (!info) {
                err = -EIO;
                goto out;
@@ -250,14 +320,15 @@ out:
  * (all asics).
  * Returns 0 on success, error on failure.
  */
-static int amdgpu_atif_get_sbios_requests(acpi_handle handle,
-               struct atif_sbios_requests *req)
+static int amdgpu_atif_get_sbios_requests(struct amdgpu_atif *atif,
+                                         struct atif_sbios_requests *req)
 {
        union acpi_object *info;
        size_t size;
        int count = 0;
 
-       info = amdgpu_atif_call(handle, ATIF_FUNCTION_GET_SYSTEM_BIOS_REQUESTS, NULL);
+       info = amdgpu_atif_call(atif, ATIF_FUNCTION_GET_SYSTEM_BIOS_REQUESTS,
+                               NULL);
        if (!info)
                return -EIO;
 
@@ -290,11 +361,10 @@ out:
  * Returns NOTIFY code
  */
 static int amdgpu_atif_handler(struct amdgpu_device *adev,
-                       struct acpi_bus_event *event)
+                              struct acpi_bus_event *event)
 {
-       struct amdgpu_atif *atif = &adev->atif;
+       struct amdgpu_atif *atif = adev->atif;
        struct atif_sbios_requests req;
-       acpi_handle handle;
        int count;
 
        DRM_DEBUG_DRIVER("event, device_class = %s, type = %#x\n",
@@ -303,14 +373,14 @@ static int amdgpu_atif_handler(struct amdgpu_device *adev,
        if (strcmp(event->device_class, ACPI_VIDEO_CLASS) != 0)
                return NOTIFY_DONE;
 
-       if (!atif->notification_cfg.enabled ||
+       if (!atif ||
+           !atif->notification_cfg.enabled ||
            event->type != atif->notification_cfg.command_code)
                /* Not our event */
                return NOTIFY_DONE;
 
        /* Check pending SBIOS requests */
-       handle = ACPI_HANDLE(&adev->pdev->dev);
-       count = amdgpu_atif_get_sbios_requests(handle, &req);
+       count = amdgpu_atif_get_sbios_requests(atif, &req);
 
        if (count <= 0)
                return NOTIFY_DONE;
@@ -641,8 +711,8 @@ static int amdgpu_acpi_event(struct notifier_block *nb,
  */
 int amdgpu_acpi_init(struct amdgpu_device *adev)
 {
-       acpi_handle handle;
-       struct amdgpu_atif *atif = &adev->atif;
+       acpi_handle handle, atif_handle;
+       struct amdgpu_atif *atif;
        struct amdgpu_atcs *atcs = &adev->atcs;
        int ret;
 
@@ -658,12 +728,26 @@ int amdgpu_acpi_init(struct amdgpu_device *adev)
                DRM_DEBUG_DRIVER("Call to ATCS verify_interface failed: %d\n", ret);
        }
 
+       /* Probe for ATIF, and initialize it if found */
+       atif_handle = amdgpu_atif_probe_handle(handle);
+       if (!atif_handle)
+               goto out;
+
+       atif = kzalloc(sizeof(*atif), GFP_KERNEL);
+       if (!atif) {
+               DRM_WARN("Not enough memory to initialize ATIF\n");
+               goto out;
+       }
+       atif->handle = atif_handle;
+
        /* Call the ATIF method */
-       ret = amdgpu_atif_verify_interface(handle, atif);
+       ret = amdgpu_atif_verify_interface(atif);
        if (ret) {
                DRM_DEBUG_DRIVER("Call to ATIF verify_interface failed: %d\n", ret);
+               kfree(atif);
                goto out;
        }
+       adev->atif = atif;
 
        if (atif->notifications.brightness_change) {
                struct drm_encoder *tmp;
@@ -693,8 +777,7 @@ int amdgpu_acpi_init(struct amdgpu_device *adev)
        }
 
        if (atif->functions.system_params) {
-               ret = amdgpu_atif_get_notification_params(handle,
-                               &atif->notification_cfg);
+               ret = amdgpu_atif_get_notification_params(atif);
                if (ret) {
                        DRM_DEBUG_DRIVER("Call to GET_SYSTEM_PARAMS failed: %d\n",
                                        ret);
@@ -720,4 +803,6 @@ out:
 void amdgpu_acpi_fini(struct amdgpu_device *adev)
 {
        unregister_acpi_notifier(&adev->acpi_nb);
+       if (adev->atif)
+               kfree(adev->atif);
 }
index daa06e7c5bb73e2d4073fad2177bf50eee0be006..ca8bf1c9a98e18a45ff1f07148bec1b1dd6aafc2 100644 (file)
@@ -90,6 +90,12 @@ bool amdgpu_atpx_dgpu_req_power_for_displays(void) {
        return amdgpu_atpx_priv.atpx.dgpu_req_power_for_displays;
 }
 
+#if defined(CONFIG_ACPI)
+void *amdgpu_atpx_get_dhandle(void) {
+       return amdgpu_atpx_priv.dhandle;
+}
+#endif
+
 /**
  * amdgpu_atpx_call - call an ATPX method
  *
@@ -569,6 +575,7 @@ static const struct amdgpu_px_quirk amdgpu_px_quirk_list[] = {
        { 0x1002, 0x6900, 0x1002, 0x0124, AMDGPU_PX_QUIRK_FORCE_ATPX },
        { 0x1002, 0x6900, 0x1028, 0x0812, AMDGPU_PX_QUIRK_FORCE_ATPX },
        { 0x1002, 0x6900, 0x1028, 0x0813, AMDGPU_PX_QUIRK_FORCE_ATPX },
+       { 0x1002, 0x6900, 0x1025, 0x125A, AMDGPU_PX_QUIRK_FORCE_ATPX },
        { 0, 0, 0, 0, 0 },
 };
 
index 82312a7bc6ad5b5a01e232b350f23ac889f47ed7..9c85a90be29375a9bedadd04ee841bd0ae1c5e33 100644 (file)
@@ -927,6 +927,10 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
                r = amdgpu_bo_vm_update_pte(p);
                if (r)
                        return r;
+
+               r = reservation_object_reserve_shared(vm->root.base.bo->tbo.resv);
+               if (r)
+                       return r;
        }
 
        return amdgpu_cs_sync_rings(p);
index 3317d1536f4fc352247756e3c650d72c9236916b..2c5f093e79e369db4900f6b64ddcc107956d3964 100644 (file)
@@ -2158,10 +2158,18 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
        switch (asic_type) {
 #if defined(CONFIG_DRM_AMD_DC)
        case CHIP_BONAIRE:
-       case CHIP_HAWAII:
        case CHIP_KAVERI:
        case CHIP_KABINI:
        case CHIP_MULLINS:
+               /*
+                * We have systems in the wild with these ASICs that require
+                * LVDS and VGA support which is not supported with DC.
+                *
+                * Fallback to the non-DC driver here by default so as not to
+                * cause regressions.
+                */
+               return amdgpu_dc > 0;
+       case CHIP_HAWAII:
        case CHIP_CARRIZO:
        case CHIP_STONEY:
        case CHIP_POLARIS10:
@@ -2739,6 +2747,9 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
        if (r)
                return r;
 
+       /* Make sure IB tests flushed */
+       flush_delayed_work(&adev->late_init_work);
+
        /* blat the mode back in */
        if (fbcon) {
                if (!amdgpu_device_has_dc_support(adev)) {
index 39ec6b8890a1bf200053900b7998e5f33d703a32..e74d620d9699f8a54c273b8b233c81f3efa40f9c 100644 (file)
@@ -376,7 +376,7 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
        struct amdgpu_device *adev = ring->adev;
        uint64_t index;
 
-       if (ring != &adev->uvd.inst[ring->me].ring) {
+       if (ring->funcs->type != AMDGPU_RING_TYPE_UVD) {
                ring->fence_drv.cpu_addr = &adev->wb.wb[ring->fence_offs];
                ring->fence_drv.gpu_addr = adev->wb.gpu_addr + (ring->fence_offs * 4);
        } else {
index f70eeed9ed76fa893dabe2218c4c85c4b4aec104..7aaa263ad8c7e0873d46f46cfc81b37b544a9d40 100644 (file)
@@ -231,6 +231,12 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
        if (ib->flags & AMDGPU_IB_FLAG_TC_WB_NOT_INVALIDATE)
                fence_flags |= AMDGPU_FENCE_FLAG_TC_WB_ONLY;
 
+       /* wrap the last IB with fence */
+       if (job && job->uf_addr) {
+               amdgpu_ring_emit_fence(ring, job->uf_addr, job->uf_sequence,
+                                      fence_flags | AMDGPU_FENCE_FLAG_64BIT);
+       }
+
        r = amdgpu_fence_emit(ring, f, fence_flags);
        if (r) {
                dev_err(adev->dev, "failed to emit fence (%d)\n", r);
@@ -243,12 +249,6 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
        if (ring->funcs->insert_end)
                ring->funcs->insert_end(ring);
 
-       /* wrap the last IB with fence */
-       if (job && job->uf_addr) {
-               amdgpu_ring_emit_fence(ring, job->uf_addr, job->uf_sequence,
-                                      fence_flags | AMDGPU_FENCE_FLAG_64BIT);
-       }
-
        if (patch_offset != ~0 && ring->funcs->patch_cond_exec)
                amdgpu_ring_patch_cond_exec(ring, patch_offset);
 
index 5e4e1bd9038379fe62666e44318162adfc544fea..3526efa8960e3de2042f944db341dc36bc5bddb2 100644 (file)
@@ -762,8 +762,7 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
        domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
        if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
                adev->vram_pin_size += amdgpu_bo_size(bo);
-               if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
-                       adev->invisible_pin_size += amdgpu_bo_size(bo);
+               adev->invisible_pin_size += amdgpu_vram_mgr_bo_invisible_size(bo);
        } else if (domain == AMDGPU_GEM_DOMAIN_GTT) {
                adev->gart_pin_size += amdgpu_bo_size(bo);
        }
@@ -790,25 +789,22 @@ int amdgpu_bo_unpin(struct amdgpu_bo *bo)
        bo->pin_count--;
        if (bo->pin_count)
                return 0;
-       for (i = 0; i < bo->placement.num_placement; i++) {
-               bo->placements[i].lpfn = 0;
-               bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
-       }
-       r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
-       if (unlikely(r)) {
-               dev_err(adev->dev, "%p validate failed for unpin\n", bo);
-               goto error;
-       }
 
        if (bo->tbo.mem.mem_type == TTM_PL_VRAM) {
                adev->vram_pin_size -= amdgpu_bo_size(bo);
-               if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
-                       adev->invisible_pin_size -= amdgpu_bo_size(bo);
+               adev->invisible_pin_size -= amdgpu_vram_mgr_bo_invisible_size(bo);
        } else if (bo->tbo.mem.mem_type == TTM_PL_TT) {
                adev->gart_pin_size -= amdgpu_bo_size(bo);
        }
 
-error:
+       for (i = 0; i < bo->placement.num_placement; i++) {
+               bo->placements[i].lpfn = 0;
+               bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
+       }
+       r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+       if (unlikely(r))
+               dev_err(adev->dev, "%p validate failed for unpin\n", bo);
+
        return r;
 }
 
index b455da4877829e57b76178ed2300959c4dade7f4..fc818b4d849cd70a05948b457a115e09ddfe42d0 100644 (file)
@@ -1882,7 +1882,7 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
                if (!amdgpu_device_has_dc_support(adev)) {
                        mutex_lock(&adev->pm.mutex);
                        amdgpu_dpm_get_active_displays(adev);
-                       adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtcs;
+                       adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtc_count;
                        adev->pm.pm_display_cfg.vrefresh = amdgpu_dpm_get_vrefresh(adev);
                        adev->pm.pm_display_cfg.min_vblank_time = amdgpu_dpm_get_vblank_time(adev);
                        /* we have issues with mclk switching with refresh rates over 120 hz on the non-DC code. */
index e969c879d87e66c686c0345839da07b39391e3e2..e5da4654b630dd7030704496ee5260e7676324ec 100644 (file)
@@ -73,6 +73,7 @@ bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_mem_reg *mem);
 uint64_t amdgpu_gtt_mgr_usage(struct ttm_mem_type_manager *man);
 int amdgpu_gtt_mgr_recover(struct ttm_mem_type_manager *man);
 
+u64 amdgpu_vram_mgr_bo_invisible_size(struct amdgpu_bo *bo);
 uint64_t amdgpu_vram_mgr_usage(struct ttm_mem_type_manager *man);
 uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_mem_type_manager *man);
 
index bcf68f80bbf058b9cfb8f7a1239f82259f95774b..3ff08e326838f381a91d542e3e2ee7484b46c5b3 100644 (file)
@@ -130,7 +130,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
        unsigned version_major, version_minor, family_id;
        int i, j, r;
 
-       INIT_DELAYED_WORK(&adev->uvd.inst->idle_work, amdgpu_uvd_idle_work_handler);
+       INIT_DELAYED_WORK(&adev->uvd.idle_work, amdgpu_uvd_idle_work_handler);
 
        switch (adev->asic_type) {
 #ifdef CONFIG_DRM_AMDGPU_CIK
@@ -314,12 +314,12 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev)
        void *ptr;
        int i, j;
 
+       cancel_delayed_work_sync(&adev->uvd.idle_work);
+
        for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
                if (adev->uvd.inst[j].vcpu_bo == NULL)
                        continue;
 
-               cancel_delayed_work_sync(&adev->uvd.inst[j].idle_work);
-
                /* only valid for physical mode */
                if (adev->asic_type < CHIP_POLARIS10) {
                        for (i = 0; i < adev->uvd.max_handles; ++i)
@@ -1145,7 +1145,7 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
 static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
 {
        struct amdgpu_device *adev =
-               container_of(work, struct amdgpu_device, uvd.inst->idle_work.work);
+               container_of(work, struct amdgpu_device, uvd.idle_work.work);
        unsigned fences = 0, i, j;
 
        for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
@@ -1167,7 +1167,7 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
                                                               AMD_CG_STATE_GATE);
                }
        } else {
-               schedule_delayed_work(&adev->uvd.inst->idle_work, UVD_IDLE_TIMEOUT);
+               schedule_delayed_work(&adev->uvd.idle_work, UVD_IDLE_TIMEOUT);
        }
 }
 
@@ -1179,7 +1179,7 @@ void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring)
        if (amdgpu_sriov_vf(adev))
                return;
 
-       set_clocks = !cancel_delayed_work_sync(&adev->uvd.inst->idle_work);
+       set_clocks = !cancel_delayed_work_sync(&adev->uvd.idle_work);
        if (set_clocks) {
                if (adev->pm.dpm_enabled) {
                        amdgpu_dpm_enable_uvd(adev, true);
@@ -1196,7 +1196,7 @@ void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring)
 void amdgpu_uvd_ring_end_use(struct amdgpu_ring *ring)
 {
        if (!amdgpu_sriov_vf(ring->adev))
-               schedule_delayed_work(&ring->adev->uvd.inst->idle_work, UVD_IDLE_TIMEOUT);
+               schedule_delayed_work(&ring->adev->uvd.idle_work, UVD_IDLE_TIMEOUT);
 }
 
 /**
index b1579fba134c189777d59242d4f9e2dcd97a8378..8b23a1b00c76c95cf2b12c8aff1440415b059560 100644 (file)
@@ -44,7 +44,6 @@ struct amdgpu_uvd_inst {
        void                    *saved_bo;
        atomic_t                handles[AMDGPU_MAX_UVD_HANDLES];
        struct drm_file         *filp[AMDGPU_MAX_UVD_HANDLES];
-       struct delayed_work     idle_work;
        struct amdgpu_ring      ring;
        struct amdgpu_ring      ring_enc[AMDGPU_MAX_UVD_ENC_RINGS];
        struct amdgpu_irq_src   irq;
@@ -62,6 +61,7 @@ struct amdgpu_uvd {
        bool                    address_64_bit;
        bool                    use_ctx_buf;
        struct amdgpu_uvd_inst          inst[AMDGPU_MAX_UVD_INSTANCES];
+       struct delayed_work     idle_work;
 };
 
 int amdgpu_uvd_sw_init(struct amdgpu_device *adev);
index 127e87b470ff4da368c8c1feb0576f0b6cb0c62c..1b4ad9b2a7550189d45f4ac86b7e73bc7cc5966b 100644 (file)
@@ -52,7 +52,7 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
        unsigned long bo_size;
        const char *fw_name;
        const struct common_firmware_header *hdr;
-       unsigned version_major, version_minor, family_id;
+       unsigned char fw_check;
        int r;
 
        INIT_DELAYED_WORK(&adev->vcn.idle_work, amdgpu_vcn_idle_work_handler);
@@ -83,12 +83,33 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
 
        hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
        adev->vcn.fw_version = le32_to_cpu(hdr->ucode_version);
-       family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
-       version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
-       version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
-       DRM_INFO("Found VCN firmware Version: %hu.%hu Family ID: %hu\n",
-               version_major, version_minor, family_id);
 
+       /* Bit 20-23, it is encode major and non-zero for new naming convention.
+        * This field is part of version minor and DRM_DISABLED_FLAG in old naming
+        * convention. Since the l:wq!atest version minor is 0x5B and DRM_DISABLED_FLAG
+        * is zero in old naming convention, this field is always zero so far.
+        * These four bits are used to tell which naming convention is present.
+        */
+       fw_check = (le32_to_cpu(hdr->ucode_version) >> 20) & 0xf;
+       if (fw_check) {
+               unsigned int dec_ver, enc_major, enc_minor, vep, fw_rev;
+
+               fw_rev = le32_to_cpu(hdr->ucode_version) & 0xfff;
+               enc_minor = (le32_to_cpu(hdr->ucode_version) >> 12) & 0xff;
+               enc_major = fw_check;
+               dec_ver = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xf;
+               vep = (le32_to_cpu(hdr->ucode_version) >> 28) & 0xf;
+               DRM_INFO("Found VCN firmware Version ENC: %hu.%hu DEC: %hu VEP: %hu Revision: %hu\n",
+                       enc_major, enc_minor, dec_ver, vep, fw_rev);
+       } else {
+               unsigned int version_major, version_minor, family_id;
+
+               family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
+               version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
+               version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
+               DRM_INFO("Found VCN firmware Version: %hu.%hu Family ID: %hu\n",
+                       version_major, version_minor, family_id);
+       }
 
        bo_size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8)
                  +  AMDGPU_VCN_STACK_SIZE + AMDGPU_VCN_HEAP_SIZE
index b0eb2f537392d192d84d3884bd685f7084e9e220..fdcb498f6d194b42d386940604557cfc90d2680c 100644 (file)
@@ -107,6 +107,9 @@ static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
                return;
        list_add_tail(&base->bo_list, &bo->va);
 
+       if (bo->tbo.type == ttm_bo_type_kernel)
+               list_move(&base->vm_status, &vm->relocated);
+
        if (bo->tbo.resv != vm->root.base.bo->tbo.resv)
                return;
 
@@ -468,7 +471,6 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
                        pt->parent = amdgpu_bo_ref(parent->base.bo);
 
                        amdgpu_vm_bo_base_init(&entry->base, vm, pt);
-                       list_move(&entry->base.vm_status, &vm->relocated);
                }
 
                if (level < AMDGPU_VM_PTB) {
@@ -1463,7 +1465,9 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
                        uint64_t count;
 
                        max_entries = min(max_entries, 16ull * 1024ull);
-                       for (count = 1; count < max_entries; ++count) {
+                       for (count = 1;
+                            count < max_entries / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
+                            ++count) {
                                uint64_t idx = pfn + count;
 
                                if (pages_addr[idx] !=
@@ -1476,7 +1480,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
                                dma_addr = pages_addr;
                        } else {
                                addr = pages_addr[pfn];
-                               max_entries = count;
+                               max_entries = count * (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
                        }
 
                } else if (flags & AMDGPU_PTE_VALID) {
@@ -1491,7 +1495,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
                if (r)
                        return r;
 
-               pfn += last - start + 1;
+               pfn += (last - start + 1) / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
                if (nodes && nodes->size == pfn) {
                        pfn = 0;
                        ++nodes;
index 9aca653bec07714874297e327eb950225f5ac555..b6333f92ba4565e9b5949f48e643f2d47daaa2a0 100644 (file)
@@ -96,6 +96,38 @@ static u64 amdgpu_vram_mgr_vis_size(struct amdgpu_device *adev,
                adev->gmc.visible_vram_size : end) - start;
 }
 
+/**
+ * amdgpu_vram_mgr_bo_invisible_size - CPU invisible BO size
+ *
+ * @bo: &amdgpu_bo buffer object (must be in VRAM)
+ *
+ * Returns:
+ * How much of the given &amdgpu_bo buffer object lies in CPU invisible VRAM.
+ */
+u64 amdgpu_vram_mgr_bo_invisible_size(struct amdgpu_bo *bo)
+{
+       struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+       struct ttm_mem_reg *mem = &bo->tbo.mem;
+       struct drm_mm_node *nodes = mem->mm_node;
+       unsigned pages = mem->num_pages;
+       u64 usage = 0;
+
+       if (adev->gmc.visible_vram_size == adev->gmc.real_vram_size)
+               return 0;
+
+       if (mem->start >= adev->gmc.visible_vram_size >> PAGE_SHIFT)
+               return amdgpu_bo_size(bo);
+
+       while (nodes && pages) {
+               usage += nodes->size << PAGE_SHIFT;
+               usage -= amdgpu_vram_mgr_vis_size(adev, nodes);
+               pages -= nodes->size;
+               ++nodes;
+       }
+
+       return usage;
+}
+
 /**
  * amdgpu_vram_mgr_new - allocate new ranges
  *
@@ -135,7 +167,8 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man,
                num_nodes = DIV_ROUND_UP(mem->num_pages, pages_per_node);
        }
 
-       nodes = kcalloc(num_nodes, sizeof(*nodes), GFP_KERNEL);
+       nodes = kvmalloc_array(num_nodes, sizeof(*nodes),
+                              GFP_KERNEL | __GFP_ZERO);
        if (!nodes)
                return -ENOMEM;
 
@@ -190,7 +223,7 @@ error:
                drm_mm_remove_node(&nodes[i]);
        spin_unlock(&mgr->lock);
 
-       kfree(nodes);
+       kvfree(nodes);
        return r == -ENOSPC ? 0 : r;
 }
 
@@ -229,7 +262,7 @@ static void amdgpu_vram_mgr_del(struct ttm_mem_type_manager *man,
        atomic64_sub(usage, &mgr->usage);
        atomic64_sub(vis_usage, &mgr->vis_usage);
 
-       kfree(mem->mm_node);
+       kvfree(mem->mm_node);
        mem->mm_node = NULL;
 }
 
index 0999c843f623ca37c0504a2a4c3151374c7fb854..a71b97519cc05ac9fc7f4e1884c3f4d61fa65a69 100644 (file)
@@ -900,7 +900,7 @@ static const struct amdgpu_ring_funcs vce_v3_0_ring_phys_funcs = {
        .emit_frame_size =
                4 + /* vce_v3_0_emit_pipeline_sync */
                6, /* amdgpu_vce_ring_emit_fence x1 no user fence */
-       .emit_ib_size = 5, /* vce_v3_0_ring_emit_ib */
+       .emit_ib_size = 4, /* amdgpu_vce_ring_emit_ib */
        .emit_ib = amdgpu_vce_ring_emit_ib,
        .emit_fence = amdgpu_vce_ring_emit_fence,
        .test_ring = amdgpu_vce_ring_test_ring,
@@ -924,7 +924,7 @@ static const struct amdgpu_ring_funcs vce_v3_0_ring_vm_funcs = {
                6 + /* vce_v3_0_emit_vm_flush */
                4 + /* vce_v3_0_emit_pipeline_sync */
                6 + 6, /* amdgpu_vce_ring_emit_fence x2 vm fence */
-       .emit_ib_size = 4, /* amdgpu_vce_ring_emit_ib */
+       .emit_ib_size = 5, /* vce_v3_0_ring_emit_ib */
        .emit_ib = vce_v3_0_ring_emit_ib,
        .emit_vm_flush = vce_v3_0_emit_vm_flush,
        .emit_pipeline_sync = vce_v3_0_emit_pipeline_sync,
index f9add85157e7355432aab9d0d14728f8906774f9..770c6b24be0b6b15d607e5008b864f5b4ca2bf6c 100644 (file)
@@ -2175,6 +2175,46 @@ get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
        return color_space;
 }
 
+static void reduce_mode_colour_depth(struct dc_crtc_timing *timing_out)
+{
+       if (timing_out->display_color_depth <= COLOR_DEPTH_888)
+               return;
+
+       timing_out->display_color_depth--;
+}
+
+static void adjust_colour_depth_from_display_info(struct dc_crtc_timing *timing_out,
+                                               const struct drm_display_info *info)
+{
+       int normalized_clk;
+       if (timing_out->display_color_depth <= COLOR_DEPTH_888)
+               return;
+       do {
+               normalized_clk = timing_out->pix_clk_khz;
+               /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
+               if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
+                       normalized_clk /= 2;
+               /* Adjusting pix clock following on HDMI spec based on colour depth */
+               switch (timing_out->display_color_depth) {
+               case COLOR_DEPTH_101010:
+                       normalized_clk = (normalized_clk * 30) / 24;
+                       break;
+               case COLOR_DEPTH_121212:
+                       normalized_clk = (normalized_clk * 36) / 24;
+                       break;
+               case COLOR_DEPTH_161616:
+                       normalized_clk = (normalized_clk * 48) / 24;
+                       break;
+               default:
+                       return;
+               }
+               if (normalized_clk <= info->max_tmds_clock)
+                       return;
+               reduce_mode_colour_depth(timing_out);
+
+       } while (timing_out->display_color_depth > COLOR_DEPTH_888);
+
+}
 /*****************************************************************************/
 
 static void
@@ -2183,6 +2223,7 @@ fill_stream_properties_from_drm_display_mode(struct dc_stream_state *stream,
                                             const struct drm_connector *connector)
 {
        struct dc_crtc_timing *timing_out = &stream->timing;
+       const struct drm_display_info *info = &connector->display_info;
 
        memset(timing_out, 0, sizeof(struct dc_crtc_timing));
 
@@ -2191,8 +2232,10 @@ fill_stream_properties_from_drm_display_mode(struct dc_stream_state *stream,
        timing_out->v_border_top = 0;
        timing_out->v_border_bottom = 0;
        /* TODO: un-hardcode */
-
-       if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
+       if (drm_mode_is_420_only(info, mode_in)
+                       && stream->sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A)
+               timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
+       else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
                        && stream->sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A)
                timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
        else
@@ -2228,6 +2271,8 @@ fill_stream_properties_from_drm_display_mode(struct dc_stream_state *stream,
 
        stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
        stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
+       if (stream->sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A)
+               adjust_colour_depth_from_display_info(timing_out, info);
 }
 
 static void fill_audio_info(struct audio_info *audio_info,
@@ -3928,10 +3973,11 @@ static void amdgpu_dm_do_flip(struct drm_crtc *crtc,
        if (acrtc->base.state->event)
                prepare_flip_isr(acrtc);
 
+       spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+
        surface_updates->surface = dc_stream_get_status(acrtc_state->stream)->plane_states[0];
        surface_updates->flip_addr = &addr;
 
-
        dc_commit_updates_for_stream(adev->dm.dc,
                                             surface_updates,
                                             1,
@@ -3944,9 +3990,6 @@ static void amdgpu_dm_do_flip(struct drm_crtc *crtc,
                         __func__,
                         addr.address.grph.addr.high_part,
                         addr.address.grph.addr.low_part);
-
-
-       spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
 }
 
 /*
@@ -4206,6 +4249,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
        struct drm_connector *connector;
        struct drm_connector_state *old_con_state, *new_con_state;
        struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
+       int crtc_disable_count = 0;
 
        drm_atomic_helper_update_legacy_modeset_state(dev, state);
 
@@ -4410,6 +4454,9 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
                struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
                bool modeset_needed;
 
+               if (old_crtc_state->active && !new_crtc_state->active)
+                       crtc_disable_count++;
+
                dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
                dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
                modeset_needed = modeset_required(
@@ -4463,11 +4510,9 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
         * so we can put the GPU into runtime suspend if we're not driving any
         * displays anymore
         */
+       for (i = 0; i < crtc_disable_count; i++)
+               pm_runtime_put_autosuspend(dev->dev);
        pm_runtime_mark_last_busy(dev->dev);
-       for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
-               if (old_crtc_state->active && !new_crtc_state->active)
-                       pm_runtime_put_autosuspend(dev->dev);
-       }
 }
 
 
index 4304d9e408b88d180eabac07327497fdda353b25..ace9ad578ca08f85aeccf49ac4d744b071444274 100644 (file)
@@ -83,22 +83,21 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
        enum i2c_mot_mode mot = (msg->request & DP_AUX_I2C_MOT) ?
                I2C_MOT_TRUE : I2C_MOT_FALSE;
        enum ddc_result res;
-       uint32_t read_bytes = msg->size;
+       ssize_t read_bytes;
 
        if (WARN_ON(msg->size > 16))
                return -E2BIG;
 
        switch (msg->request & ~DP_AUX_I2C_MOT) {
        case DP_AUX_NATIVE_READ:
-               res = dal_ddc_service_read_dpcd_data(
+               read_bytes = dal_ddc_service_read_dpcd_data(
                                TO_DM_AUX(aux)->ddc_service,
                                false,
                                I2C_MOT_UNDEF,
                                msg->address,
                                msg->buffer,
-                               msg->size,
-                               &read_bytes);
-               break;
+                               msg->size);
+               return read_bytes;
        case DP_AUX_NATIVE_WRITE:
                res = dal_ddc_service_write_dpcd_data(
                                TO_DM_AUX(aux)->ddc_service,
@@ -109,15 +108,14 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
                                msg->size);
                break;
        case DP_AUX_I2C_READ:
-               res = dal_ddc_service_read_dpcd_data(
+               read_bytes = dal_ddc_service_read_dpcd_data(
                                TO_DM_AUX(aux)->ddc_service,
                                true,
                                mot,
                                msg->address,
                                msg->buffer,
-                               msg->size,
-                               &read_bytes);
-               break;
+                               msg->size);
+               return read_bytes;
        case DP_AUX_I2C_WRITE:
                res = dal_ddc_service_write_dpcd_data(
                                TO_DM_AUX(aux)->ddc_service,
@@ -139,9 +137,7 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
                 r == DDC_RESULT_SUCESSFULL);
 #endif
 
-       if (res != DDC_RESULT_SUCESSFULL)
-               return -EIO;
-       return read_bytes;
+       return msg->size;
 }
 
 static enum drm_connector_status
index 5a3346124a0177da27c6d205559a2f363f5aa40d..5a2e952c5bead295df49350289416d4674c4dcef 100644 (file)
@@ -255,8 +255,9 @@ static void pp_to_dc_clock_levels_with_latency(
                        DC_DECODE_PP_CLOCK_TYPE(dc_clk_type));
 
        for (i = 0; i < clk_level_info->num_levels; i++) {
-               DRM_DEBUG("DM_PPLIB:\t %d\n", pp_clks->data[i].clocks_in_khz);
-               clk_level_info->data[i].clocks_in_khz = pp_clks->data[i].clocks_in_khz;
+               DRM_DEBUG("DM_PPLIB:\t %d in 10kHz\n", pp_clks->data[i].clocks_in_khz);
+               /* translate 10kHz to kHz */
+               clk_level_info->data[i].clocks_in_khz = pp_clks->data[i].clocks_in_khz * 10;
                clk_level_info->data[i].latency_in_us = pp_clks->data[i].latency_in_us;
        }
 }
index ae48d603ebd6ca73c289c71f50795f5d3bd6f65e..49c2face1e7a869e07e94da881e7ed33acbe483e 100644 (file)
@@ -629,14 +629,13 @@ bool dal_ddc_service_query_ddc_data(
        return ret;
 }
 
-enum ddc_result dal_ddc_service_read_dpcd_data(
+ssize_t dal_ddc_service_read_dpcd_data(
        struct ddc_service *ddc,
        bool i2c,
        enum i2c_mot_mode mot,
        uint32_t address,
        uint8_t *data,
-       uint32_t len,
-       uint32_t *read)
+       uint32_t len)
 {
        struct aux_payload read_payload = {
                .i2c_over_aux = i2c,
@@ -653,8 +652,6 @@ enum ddc_result dal_ddc_service_read_dpcd_data(
                .mot = mot
        };
 
-       *read = 0;
-
        if (len > DEFAULT_AUX_MAX_DATA_SIZE) {
                BREAK_TO_DEBUGGER();
                return DDC_RESULT_FAILED_INVALID_OPERATION;
@@ -664,8 +661,7 @@ enum ddc_result dal_ddc_service_read_dpcd_data(
                ddc->ctx->i2caux,
                ddc->ddc_pin,
                &command)) {
-               *read = command.payloads->length;
-               return DDC_RESULT_SUCESSFULL;
+               return (ssize_t)command.payloads->length;
        }
 
        return DDC_RESULT_FAILED_OPERATION;
index 7857cb42b3e62e5603af2408128e4cefb6531a0c..bdd121485cbcd661c4b317ad5f9816ae5c1d127b 100644 (file)
@@ -1767,12 +1767,10 @@ static void dp_test_send_link_training(struct dc_link *link)
        dp_retrain_link_dp_test(link, &link_settings, false);
 }
 
-/* TODO hbr2 compliance eye output is unstable
+/* TODO Raven hbr2 compliance eye output is unstable
  * (toggling on and off) with debugger break
  * This caueses intermittent PHY automation failure
  * Need to look into the root cause */
-static uint8_t force_tps4_for_cp2520 = 1;
-
 static void dp_test_send_phy_test_pattern(struct dc_link *link)
 {
        union phy_test_pattern dpcd_test_pattern;
@@ -1832,13 +1830,13 @@ static void dp_test_send_phy_test_pattern(struct dc_link *link)
                break;
        case PHY_TEST_PATTERN_CP2520_1:
                /* CP2520 pattern is unstable, temporarily use TPS4 instead */
-               test_pattern = (force_tps4_for_cp2520 == 1) ?
+               test_pattern = (link->dc->caps.force_dp_tps4_for_cp2520 == 1) ?
                                DP_TEST_PATTERN_TRAINING_PATTERN4 :
                                DP_TEST_PATTERN_HBR2_COMPLIANCE_EYE;
                break;
        case PHY_TEST_PATTERN_CP2520_2:
                /* CP2520 pattern is unstable, temporarily use TPS4 instead */
-               test_pattern = (force_tps4_for_cp2520 == 1) ?
+               test_pattern = (link->dc->caps.force_dp_tps4_for_cp2520 == 1) ?
                                DP_TEST_PATTERN_TRAINING_PATTERN4 :
                                DP_TEST_PATTERN_HBR2_COMPLIANCE_EYE;
                break;
index 9cfde0ccf4e9d44fb241ec6536d5543856af9924..53c71296f3dd2c3d6eb70bd068ac760e423f4322 100644 (file)
@@ -76,6 +76,7 @@ struct dc_caps {
        bool is_apu;
        bool dual_link_dvi;
        bool post_blend_color_processing;
+       bool force_dp_tps4_for_cp2520;
 };
 
 struct dc_dcc_surface_param {
index b235a75355b855e03cfd679126e72a9e30dd7d00..bae752332a9f7f2baea5d59f9c367d5b5b61a56e 100644 (file)
@@ -741,6 +741,29 @@ static struct mem_input_funcs dce_mi_funcs = {
        .mem_input_is_flip_pending = dce_mi_is_flip_pending
 };
 
+static struct mem_input_funcs dce112_mi_funcs = {
+       .mem_input_program_display_marks = dce112_mi_program_display_marks,
+       .allocate_mem_input = dce_mi_allocate_dmif,
+       .free_mem_input = dce_mi_free_dmif,
+       .mem_input_program_surface_flip_and_addr =
+                       dce_mi_program_surface_flip_and_addr,
+       .mem_input_program_pte_vm = dce_mi_program_pte_vm,
+       .mem_input_program_surface_config =
+                       dce_mi_program_surface_config,
+       .mem_input_is_flip_pending = dce_mi_is_flip_pending
+};
+
+static struct mem_input_funcs dce120_mi_funcs = {
+       .mem_input_program_display_marks = dce120_mi_program_display_marks,
+       .allocate_mem_input = dce_mi_allocate_dmif,
+       .free_mem_input = dce_mi_free_dmif,
+       .mem_input_program_surface_flip_and_addr =
+                       dce_mi_program_surface_flip_and_addr,
+       .mem_input_program_pte_vm = dce_mi_program_pte_vm,
+       .mem_input_program_surface_config =
+                       dce_mi_program_surface_config,
+       .mem_input_is_flip_pending = dce_mi_is_flip_pending
+};
 
 void dce_mem_input_construct(
        struct dce_mem_input *dce_mi,
@@ -769,7 +792,7 @@ void dce112_mem_input_construct(
        const struct dce_mem_input_mask *mi_mask)
 {
        dce_mem_input_construct(dce_mi, ctx, inst, regs, mi_shift, mi_mask);
-       dce_mi->base.funcs->mem_input_program_display_marks = dce112_mi_program_display_marks;
+       dce_mi->base.funcs = &dce112_mi_funcs;
 }
 
 void dce120_mem_input_construct(
@@ -781,5 +804,5 @@ void dce120_mem_input_construct(
        const struct dce_mem_input_mask *mi_mask)
 {
        dce_mem_input_construct(dce_mi, ctx, inst, regs, mi_shift, mi_mask);
-       dce_mi->base.funcs->mem_input_program_display_marks = dce120_mi_program_display_marks;
+       dce_mi->base.funcs = &dce120_mi_funcs;
 }
index 38ec0d609297f832362d45397a2080918e4937c0..344dd2e69e7ceb5177aa2d0762d63862531af31f 100644 (file)
@@ -678,9 +678,22 @@ bool dce100_validate_bandwidth(
        struct dc  *dc,
        struct dc_state *context)
 {
-       /* TODO implement when needed but for now hardcode max value*/
-       context->bw.dce.dispclk_khz = 681000;
-       context->bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER;
+       int i;
+       bool at_least_one_pipe = false;
+
+       for (i = 0; i < dc->res_pool->pipe_count; i++) {
+               if (context->res_ctx.pipe_ctx[i].stream)
+                       at_least_one_pipe = true;
+       }
+
+       if (at_least_one_pipe) {
+               /* TODO implement when needed but for now hardcode max value*/
+               context->bw.dce.dispclk_khz = 681000;
+               context->bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER;
+       } else {
+               context->bw.dce.dispclk_khz = 0;
+               context->bw.dce.yclk_khz = 0;
+       }
 
        return true;
 }
index df5cb2d1d1645ebab3f6f4e79469d037a395798d..34dac84066a0bc32e16aecaa5567d99d1d27ffd8 100644 (file)
@@ -1027,6 +1027,8 @@ static bool construct(
        dc->caps.max_slave_planes = 1;
        dc->caps.is_apu = true;
        dc->caps.post_blend_color_processing = false;
+       /* Raven DP PHY HBR2 eye diagram pattern is not stable. Use TP4 */
+       dc->caps.force_dp_tps4_for_cp2520 = true;
 
        if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
                dc->debug = debug_defaults_drv;
index 30b3a08b91be27dade29620cfc5dbf857c89bad1..090b7a8dd67bde2bdfdaf243c04d175abf82112c 100644 (file)
@@ -102,14 +102,13 @@ bool dal_ddc_service_query_ddc_data(
                uint8_t *read_buf,
                uint32_t read_size);
 
-enum ddc_result dal_ddc_service_read_dpcd_data(
+ssize_t dal_ddc_service_read_dpcd_data(
                struct ddc_service *ddc,
                bool i2c,
                enum i2c_mot_mode mot,
                uint32_t address,
                uint8_t *data,
-               uint32_t len,
-               uint32_t *read);
+               uint32_t len);
 
 enum ddc_result dal_ddc_service_write_dpcd_data(
                struct ddc_service *ddc,
index 092d800b703a7627a2b98fdda7be54b5b6f7ff11..33b4de4ad66eb561f15c04cac555c2ea76b03d2b 100644 (file)
@@ -1433,7 +1433,10 @@ struct atom_smc_dpm_info_v4_1
        uint8_t  acggfxclkspreadpercent;
        uint16_t acggfxclkspreadfreq;
 
-       uint32_t boardreserved[10];
+       uint8_t Vr2_I2C_address;
+       uint8_t padding_vr2[3];
+
+       uint32_t boardreserved[9];
 };
 
 /* 
index 5325661fedffb9480b26fb7b94269a30fb32334a..d27c1c9df2868696887157845aa13eca5b7348d1 100644 (file)
@@ -512,14 +512,82 @@ int pp_atomfwctrl_get_clk_information_by_clkid(struct pp_hwmgr *hwmgr, BIOS_CLKI
        return 0;
 }
 
+static void pp_atomfwctrl_copy_vbios_bootup_values_3_2(struct pp_hwmgr *hwmgr,
+                       struct pp_atomfwctrl_bios_boot_up_values *boot_values,
+                       struct atom_firmware_info_v3_2 *fw_info)
+{
+       uint32_t frequency = 0;
+
+       boot_values->ulRevision = fw_info->firmware_revision;
+       boot_values->ulGfxClk   = fw_info->bootup_sclk_in10khz;
+       boot_values->ulUClk     = fw_info->bootup_mclk_in10khz;
+       boot_values->usVddc     = fw_info->bootup_vddc_mv;
+       boot_values->usVddci    = fw_info->bootup_vddci_mv;
+       boot_values->usMvddc    = fw_info->bootup_mvddc_mv;
+       boot_values->usVddGfx   = fw_info->bootup_vddgfx_mv;
+       boot_values->ucCoolingID = fw_info->coolingsolution_id;
+       boot_values->ulSocClk   = 0;
+       boot_values->ulDCEFClk   = 0;
+
+       if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_SOCCLK_ID, &frequency))
+               boot_values->ulSocClk   = frequency;
+
+       if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_DCEFCLK_ID, &frequency))
+               boot_values->ulDCEFClk  = frequency;
+
+       if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_ECLK_ID, &frequency))
+               boot_values->ulEClk     = frequency;
+
+       if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_VCLK_ID, &frequency))
+               boot_values->ulVClk     = frequency;
+
+       if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_DCLK_ID, &frequency))
+               boot_values->ulDClk     = frequency;
+}
+
+static void pp_atomfwctrl_copy_vbios_bootup_values_3_1(struct pp_hwmgr *hwmgr,
+                       struct pp_atomfwctrl_bios_boot_up_values *boot_values,
+                       struct atom_firmware_info_v3_1 *fw_info)
+{
+       uint32_t frequency = 0;
+
+       boot_values->ulRevision = fw_info->firmware_revision;
+       boot_values->ulGfxClk   = fw_info->bootup_sclk_in10khz;
+       boot_values->ulUClk     = fw_info->bootup_mclk_in10khz;
+       boot_values->usVddc     = fw_info->bootup_vddc_mv;
+       boot_values->usVddci    = fw_info->bootup_vddci_mv;
+       boot_values->usMvddc    = fw_info->bootup_mvddc_mv;
+       boot_values->usVddGfx   = fw_info->bootup_vddgfx_mv;
+       boot_values->ucCoolingID = fw_info->coolingsolution_id;
+       boot_values->ulSocClk   = 0;
+       boot_values->ulDCEFClk   = 0;
+
+       if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_SOCCLK_ID, &frequency))
+               boot_values->ulSocClk   = frequency;
+
+       if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_DCEFCLK_ID, &frequency))
+               boot_values->ulDCEFClk  = frequency;
+
+       if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_ECLK_ID, &frequency))
+               boot_values->ulEClk     = frequency;
+
+       if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_VCLK_ID, &frequency))
+               boot_values->ulVClk     = frequency;
+
+       if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_DCLK_ID, &frequency))
+               boot_values->ulDClk     = frequency;
+}
+
 int pp_atomfwctrl_get_vbios_bootup_values(struct pp_hwmgr *hwmgr,
                        struct pp_atomfwctrl_bios_boot_up_values *boot_values)
 {
-       struct atom_firmware_info_v3_1 *info = NULL;
+       struct atom_firmware_info_v3_2 *fwinfo_3_2;
+       struct atom_firmware_info_v3_1 *fwinfo_3_1;
+       struct atom_common_table_header *info = NULL;
        uint16_t ix;
 
        ix = GetIndexIntoMasterDataTable(firmwareinfo);
-       info = (struct atom_firmware_info_v3_1 *)
+       info = (struct atom_common_table_header *)
                smu_atom_get_data_table(hwmgr->adev,
                                ix, NULL, NULL, NULL);
 
@@ -528,16 +596,18 @@ int pp_atomfwctrl_get_vbios_bootup_values(struct pp_hwmgr *hwmgr,
                return -EINVAL;
        }
 
-       boot_values->ulRevision = info->firmware_revision;
-       boot_values->ulGfxClk   = info->bootup_sclk_in10khz;
-       boot_values->ulUClk     = info->bootup_mclk_in10khz;
-       boot_values->usVddc     = info->bootup_vddc_mv;
-       boot_values->usVddci    = info->bootup_vddci_mv;
-       boot_values->usMvddc    = info->bootup_mvddc_mv;
-       boot_values->usVddGfx   = info->bootup_vddgfx_mv;
-       boot_values->ucCoolingID = info->coolingsolution_id;
-       boot_values->ulSocClk   = 0;
-       boot_values->ulDCEFClk   = 0;
+       if ((info->format_revision == 3) && (info->content_revision == 2)) {
+               fwinfo_3_2 = (struct atom_firmware_info_v3_2 *)info;
+               pp_atomfwctrl_copy_vbios_bootup_values_3_2(hwmgr,
+                               boot_values, fwinfo_3_2);
+       } else if ((info->format_revision == 3) && (info->content_revision == 1)) {
+               fwinfo_3_1 = (struct atom_firmware_info_v3_1 *)info;
+               pp_atomfwctrl_copy_vbios_bootup_values_3_1(hwmgr,
+                               boot_values, fwinfo_3_1);
+       } else {
+               pr_info("Fw info table revision does not match!");
+               return -EINVAL;
+       }
 
        return 0;
 }
@@ -629,5 +699,7 @@ int pp_atomfwctrl_get_smc_dpm_information(struct pp_hwmgr *hwmgr,
        param->acggfxclkspreadpercent = info->acggfxclkspreadpercent;
        param->acggfxclkspreadfreq = info->acggfxclkspreadfreq;
 
+       param->Vr2_I2C_address = info->Vr2_I2C_address;
+
        return 0;
 }
index fe10aa4db5e64f721fbd462ffa6127b0e06851fc..22e21668c93a429239688fd7509328fc0d870406 100644 (file)
@@ -136,6 +136,9 @@ struct pp_atomfwctrl_bios_boot_up_values {
        uint32_t   ulUClk;
        uint32_t   ulSocClk;
        uint32_t   ulDCEFClk;
+       uint32_t   ulEClk;
+       uint32_t   ulVClk;
+       uint32_t   ulDClk;
        uint16_t   usVddc;
        uint16_t   usVddci;
        uint16_t   usMvddc;
@@ -207,6 +210,8 @@ struct pp_atomfwctrl_smc_dpm_parameters
        uint8_t  acggfxclkspreadenabled;
        uint8_t  acggfxclkspreadpercent;
        uint16_t acggfxclkspreadfreq;
+
+       uint8_t Vr2_I2C_address;
 };
 
 int pp_atomfwctrl_get_gpu_pll_dividers_vega10(struct pp_hwmgr *hwmgr,
index dbe4b1f66784961ea028b3fcfee564e830617f80..22364875a943e5e32e7e13d5fe2ef79d579f2824 100644 (file)
@@ -1090,7 +1090,7 @@ static int vega10_disable_se_edc_config(struct pp_hwmgr *hwmgr)
 static int vega10_enable_psm_gc_edc_config(struct pp_hwmgr *hwmgr)
 {
        struct amdgpu_device *adev = hwmgr->adev;
-       int result;
+       int result = 0;
        uint32_t num_se = 0;
        uint32_t count, data;
 
index 782e2098824df6225e2c044bf060626d143a0919..c98e5de777cd1bc18abbc4a5560430543f23ddd6 100644 (file)
@@ -81,6 +81,7 @@ static void vega12_set_default_registry_data(struct pp_hwmgr *hwmgr)
 
        data->registry_data.disallowed_features = 0x0;
        data->registry_data.od_state_in_dc_support = 0;
+       data->registry_data.thermal_support = 1;
        data->registry_data.skip_baco_hardware = 0;
 
        data->registry_data.log_avfs_param = 0;
@@ -803,6 +804,9 @@ static int vega12_init_smc_table(struct pp_hwmgr *hwmgr)
                data->vbios_boot_state.soc_clock = boot_up_values.ulSocClk;
                data->vbios_boot_state.dcef_clock = boot_up_values.ulDCEFClk;
                data->vbios_boot_state.uc_cooling_id = boot_up_values.ucCoolingID;
+               data->vbios_boot_state.eclock = boot_up_values.ulEClk;
+               data->vbios_boot_state.dclock = boot_up_values.ulDClk;
+               data->vbios_boot_state.vclock = boot_up_values.ulVClk;
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                PPSMC_MSG_SetMinDeepSleepDcefclk,
                        (uint32_t)(data->vbios_boot_state.dcef_clock / 100));
index e81ded1ec1982d55f2b1a5657ee0fc163b3d7c96..49b38df8c7f2702553b7aa49e27e77069d55adef 100644 (file)
@@ -167,6 +167,9 @@ struct vega12_vbios_boot_state {
        uint32_t    mem_clock;
        uint32_t    soc_clock;
        uint32_t    dcef_clock;
+       uint32_t    eclock;
+       uint32_t    dclock;
+       uint32_t    vclock;
 };
 
 #define DPMTABLE_OD_UPDATE_SCLK     0x00000001
index 888ddca902d894216acee566879f239a47009468..29914700ee82f5d8d09de71ff8fadb7b28bfaca3 100644 (file)
@@ -230,6 +230,8 @@ static int append_vbios_pptable(struct pp_hwmgr *hwmgr, PPTable_t *ppsmc_pptable
                ppsmc_pptable->AcgThresholdFreqLow = 0xFFFF;
        }
 
+       ppsmc_pptable->Vr2_I2C_address = smc_dpm_table.Vr2_I2C_address;
+
        return 0;
 }
 
index 2f8a3b983cce0c9444cac949ed7ce79a420b06ba..b08526fd161908d29bda5ab87dc7bd1c1f1401f1 100644 (file)
@@ -499,7 +499,10 @@ typedef struct {
        uint8_t      AcgGfxclkSpreadPercent;
        uint16_t     AcgGfxclkSpreadFreq;
 
-       uint32_t     BoardReserved[10];
+  uint8_t      Vr2_I2C_address;
+  uint8_t      padding_vr2[3];
+
+  uint32_t     BoardReserved[9];
 
 
   uint32_t     MmHubPadding[7];
index d644a9bb9078d081639aa09a72af9f92e3612fc2..9f407c48d4f0d4775c6c3dd8943591c19e22331e 100644 (file)
@@ -381,6 +381,7 @@ int smu7_request_smu_load_fw(struct pp_hwmgr *hwmgr)
        uint32_t fw_to_load;
        int result = 0;
        struct SMU_DRAMData_TOC *toc;
+       uint32_t num_entries = 0;
 
        if (!hwmgr->reload_fw) {
                pr_info("skip reloading...\n");
@@ -422,41 +423,41 @@ int smu7_request_smu_load_fw(struct pp_hwmgr *hwmgr)
        }
 
        toc = (struct SMU_DRAMData_TOC *)smu_data->header;
-       toc->num_entries = 0;
        toc->structure_version = 1;
 
        PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
-                               UCODE_ID_RLC_G, &toc->entry[toc->num_entries++]),
+                               UCODE_ID_RLC_G, &toc->entry[num_entries++]),
                                "Failed to Get Firmware Entry.", return -EINVAL);
        PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
-                               UCODE_ID_CP_CE, &toc->entry[toc->num_entries++]),
+                               UCODE_ID_CP_CE, &toc->entry[num_entries++]),
                                "Failed to Get Firmware Entry.", return -EINVAL);
        PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
-                               UCODE_ID_CP_PFP, &toc->entry[toc->num_entries++]),
+                               UCODE_ID_CP_PFP, &toc->entry[num_entries++]),
                                "Failed to Get Firmware Entry.", return -EINVAL);
        PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
-                               UCODE_ID_CP_ME, &toc->entry[toc->num_entries++]),
+                               UCODE_ID_CP_ME, &toc->entry[num_entries++]),
                                "Failed to Get Firmware Entry.", return -EINVAL);
        PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
-                               UCODE_ID_CP_MEC, &toc->entry[toc->num_entries++]),
+                               UCODE_ID_CP_MEC, &toc->entry[num_entries++]),
                                "Failed to Get Firmware Entry.", return -EINVAL);
        PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
-                               UCODE_ID_CP_MEC_JT1, &toc->entry[toc->num_entries++]),
+                               UCODE_ID_CP_MEC_JT1, &toc->entry[num_entries++]),
                                "Failed to Get Firmware Entry.", return -EINVAL);
        PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
-                               UCODE_ID_CP_MEC_JT2, &toc->entry[toc->num_entries++]),
+                               UCODE_ID_CP_MEC_JT2, &toc->entry[num_entries++]),
                                "Failed to Get Firmware Entry.", return -EINVAL);
        PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
-                               UCODE_ID_SDMA0, &toc->entry[toc->num_entries++]),
+                               UCODE_ID_SDMA0, &toc->entry[num_entries++]),
                                "Failed to Get Firmware Entry.", return -EINVAL);
        PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
-                               UCODE_ID_SDMA1, &toc->entry[toc->num_entries++]),
+                               UCODE_ID_SDMA1, &toc->entry[num_entries++]),
                                "Failed to Get Firmware Entry.", return -EINVAL);
        if (!hwmgr->not_vf)
                PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
-                               UCODE_ID_MEC_STORAGE, &toc->entry[toc->num_entries++]),
+                               UCODE_ID_MEC_STORAGE, &toc->entry[num_entries++]),
                                "Failed to Get Firmware Entry.", return -EINVAL);
 
+       toc->num_entries = num_entries;
        smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DRV_DRAM_ADDR_HI, upper_32_bits(smu_data->header_buffer.mc_addr));
        smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DRV_DRAM_ADDR_LO, lower_32_bits(smu_data->header_buffer.mc_addr));
 
index 8d20faa198cf199ff65d604ca9f4f1effd633c86..0a788d76ed5f02f6aed755447783874280b5cf7a 100644 (file)
@@ -278,7 +278,6 @@ static int malidp_init(struct drm_device *drm)
 
 static void malidp_fini(struct drm_device *drm)
 {
-       drm_atomic_helper_shutdown(drm);
        drm_mode_config_cleanup(drm);
 }
 
@@ -646,6 +645,7 @@ vblank_fail:
        malidp_de_irq_fini(drm);
        drm->irq_enabled = false;
 irq_init_fail:
+       drm_atomic_helper_shutdown(drm);
        component_unbind_all(dev, drm);
 bind_fail:
        of_node_put(malidp->crtc.port);
@@ -681,6 +681,7 @@ static void malidp_unbind(struct device *dev)
        malidp_se_irq_fini(drm);
        malidp_de_irq_fini(drm);
        drm->irq_enabled = false;
+       drm_atomic_helper_shutdown(drm);
        component_unbind_all(dev, drm);
        of_node_put(malidp->crtc.port);
        malidp->crtc.port = NULL;
index d789b46dc817335dd2d509ee456d850897762b88..069783e715f1777829b3d992b2add45b8fb693da 100644 (file)
@@ -634,7 +634,8 @@ const struct malidp_hw malidp_device[MALIDP_MAX_DEVICES] = {
                                .vsync_irq = MALIDP500_DE_IRQ_VSYNC,
                        },
                        .se_irq_map = {
-                               .irq_mask = MALIDP500_SE_IRQ_CONF_MODE,
+                               .irq_mask = MALIDP500_SE_IRQ_CONF_MODE |
+                                           MALIDP500_SE_IRQ_GLOBAL,
                                .vsync_irq = 0,
                        },
                        .dc_irq_map = {
index 7a44897c50fea784bf516db7f17866ee31f413fa..29409a65d864760e674f787cb5279cdbff5b91a7 100644 (file)
@@ -23,6 +23,7 @@
 
 /* Layer specific register offsets */
 #define MALIDP_LAYER_FORMAT            0x000
+#define   LAYER_FORMAT_MASK            0x3f
 #define MALIDP_LAYER_CONTROL           0x004
 #define   LAYER_ENABLE                 (1 << 0)
 #define   LAYER_FLOWCFG_MASK           7
@@ -235,8 +236,8 @@ static int malidp_de_plane_check(struct drm_plane *plane,
        if (state->rotation & MALIDP_ROTATED_MASK) {
                int val;
 
-               val = mp->hwdev->hw->rotmem_required(mp->hwdev, state->crtc_h,
-                                                    state->crtc_w,
+               val = mp->hwdev->hw->rotmem_required(mp->hwdev, state->crtc_w,
+                                                    state->crtc_h,
                                                     fb->format->format);
                if (val < 0)
                        return val;
@@ -337,7 +338,9 @@ static void malidp_de_plane_update(struct drm_plane *plane,
        dest_w = plane->state->crtc_w;
        dest_h = plane->state->crtc_h;
 
-       malidp_hw_write(mp->hwdev, ms->format, mp->layer->base);
+       val = malidp_hw_read(mp->hwdev, mp->layer->base);
+       val = (val & ~LAYER_FORMAT_MASK) | ms->format;
+       malidp_hw_write(mp->hwdev, val, mp->layer->base);
 
        for (i = 0; i < ms->n_planes; i++) {
                /* calculate the offset for the layer's plane registers */
index 03eeee11dd5bd2f4ed000b2f86ca5eda6253f9ca..42a40daff13265c968f442b4dafd2ec081bbafd7 100644 (file)
@@ -519,8 +519,9 @@ static irqreturn_t armada_drm_irq(int irq, void *arg)
        u32 v, stat = readl_relaxed(dcrtc->base + LCD_SPU_IRQ_ISR);
 
        /*
-        * This is rediculous - rather than writing bits to clear, we
-        * have to set the actual status register value.  This is racy.
+        * Reading the ISR appears to clear bits provided CLEAN_SPU_IRQ_ISR
+        * is set.  Writing has some other effect to acknowledge the IRQ -
+        * without this, we only get a single IRQ.
         */
        writel_relaxed(0, dcrtc->base + LCD_SPU_IRQ_ISR);
 
@@ -1116,16 +1117,22 @@ armada_drm_crtc_set_property(struct drm_crtc *crtc,
 static int armada_drm_crtc_enable_vblank(struct drm_crtc *crtc)
 {
        struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+       unsigned long flags;
 
+       spin_lock_irqsave(&dcrtc->irq_lock, flags);
        armada_drm_crtc_enable_irq(dcrtc, VSYNC_IRQ_ENA);
+       spin_unlock_irqrestore(&dcrtc->irq_lock, flags);
        return 0;
 }
 
 static void armada_drm_crtc_disable_vblank(struct drm_crtc *crtc)
 {
        struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+       unsigned long flags;
 
+       spin_lock_irqsave(&dcrtc->irq_lock, flags);
        armada_drm_crtc_disable_irq(dcrtc, VSYNC_IRQ_ENA);
+       spin_unlock_irqrestore(&dcrtc->irq_lock, flags);
 }
 
 static const struct drm_crtc_funcs armada_crtc_funcs = {
@@ -1415,6 +1422,7 @@ static int armada_drm_crtc_create(struct drm_device *drm, struct device *dev,
                       CFG_PDWN64x66, dcrtc->base + LCD_SPU_SRAM_PARA1);
        writel_relaxed(0x2032ff81, dcrtc->base + LCD_SPU_DMA_CTRL1);
        writel_relaxed(dcrtc->irq_ena, dcrtc->base + LCD_SPU_IRQ_ENA);
+       readl_relaxed(dcrtc->base + LCD_SPU_IRQ_ISR);
        writel_relaxed(0, dcrtc->base + LCD_SPU_IRQ_ISR);
 
        ret = devm_request_irq(dev, irq, armada_drm_irq, 0, "armada_drm_crtc",
index 27319a8335e258cf12cb093c5c598523fbe307f3..345dc4d0851ef43bd73069679c3ec7fef8bfa073 100644 (file)
@@ -160,6 +160,7 @@ enum {
        CFG_ALPHAM_GRA          = 0x1 << 16,
        CFG_ALPHAM_CFG          = 0x2 << 16,
        CFG_ALPHA_MASK          = 0xff << 8,
+#define CFG_ALPHA(x)           ((x) << 8)
        CFG_PIXCMD_MASK         = 0xff,
 };
 
index c391955009d6051a6bb67b8a9a7bcebee38a8983..afa7ded3ae31df68a406da0bba045a6d87f0bce6 100644 (file)
@@ -28,6 +28,7 @@ struct armada_ovl_plane_properties {
        uint16_t contrast;
        uint16_t saturation;
        uint32_t colorkey_mode;
+       uint32_t colorkey_enable;
 };
 
 struct armada_ovl_plane {
@@ -54,11 +55,13 @@ armada_ovl_update_attr(struct armada_ovl_plane_properties *prop,
        writel_relaxed(0x00002000, dcrtc->base + LCD_SPU_CBSH_HUE);
 
        spin_lock_irq(&dcrtc->irq_lock);
-       armada_updatel(prop->colorkey_mode | CFG_ALPHAM_GRA,
-                    CFG_CKMODE_MASK | CFG_ALPHAM_MASK | CFG_ALPHA_MASK,
-                    dcrtc->base + LCD_SPU_DMA_CTRL1);
-
-       armada_updatel(ADV_GRACOLORKEY, 0, dcrtc->base + LCD_SPU_ADV_REG);
+       armada_updatel(prop->colorkey_mode,
+                      CFG_CKMODE_MASK | CFG_ALPHAM_MASK | CFG_ALPHA_MASK,
+                      dcrtc->base + LCD_SPU_DMA_CTRL1);
+       if (dcrtc->variant->has_spu_adv_reg)
+               armada_updatel(prop->colorkey_enable,
+                              ADV_GRACOLORKEY | ADV_VIDCOLORKEY,
+                              dcrtc->base + LCD_SPU_ADV_REG);
        spin_unlock_irq(&dcrtc->irq_lock);
 }
 
@@ -321,8 +324,17 @@ static int armada_ovl_plane_set_property(struct drm_plane *plane,
                dplane->prop.colorkey_vb |= K2B(val);
                update_attr = true;
        } else if (property == priv->colorkey_mode_prop) {
-               dplane->prop.colorkey_mode &= ~CFG_CKMODE_MASK;
-               dplane->prop.colorkey_mode |= CFG_CKMODE(val);
+               if (val == CKMODE_DISABLE) {
+                       dplane->prop.colorkey_mode =
+                               CFG_CKMODE(CKMODE_DISABLE) |
+                               CFG_ALPHAM_CFG | CFG_ALPHA(255);
+                       dplane->prop.colorkey_enable = 0;
+               } else {
+                       dplane->prop.colorkey_mode =
+                               CFG_CKMODE(val) |
+                               CFG_ALPHAM_GRA | CFG_ALPHA(0);
+                       dplane->prop.colorkey_enable = ADV_GRACOLORKEY;
+               }
                update_attr = true;
        } else if (property == priv->brightness_prop) {
                dplane->prop.brightness = val - 256;
@@ -453,7 +465,9 @@ int armada_overlay_plane_create(struct drm_device *dev, unsigned long crtcs)
        dplane->prop.colorkey_yr = 0xfefefe00;
        dplane->prop.colorkey_ug = 0x01010100;
        dplane->prop.colorkey_vb = 0x01010100;
-       dplane->prop.colorkey_mode = CFG_CKMODE(CKMODE_RGB);
+       dplane->prop.colorkey_mode = CFG_CKMODE(CKMODE_RGB) |
+                                    CFG_ALPHAM_GRA | CFG_ALPHA(0);
+       dplane->prop.colorkey_enable = ADV_GRACOLORKEY;
        dplane->prop.brightness = 0;
        dplane->prop.contrast = 0x4000;
        dplane->prop.saturation = 0x4000;
index 73c875db45f4346afd5a25408e9264c466401138..47e0992f39083161d46c5d1759f0c06f5cf2c0de 100644 (file)
@@ -839,7 +839,7 @@ static int atmel_hlcdc_plane_init_properties(struct atmel_hlcdc_plane *plane)
                        return ret;
        }
 
-       if (desc->layout.xstride && desc->layout.pstride) {
+       if (desc->layout.xstride[0] && desc->layout.pstride[0]) {
                int ret;
 
                ret = drm_plane_create_rotation_property(&plane->base,
index 73021b388e12d3bc1ca7cabfc0178af223f77686..dd3ff2f2cdce00cb761ffe5be5bd15b0c915c38f 100644 (file)
@@ -429,6 +429,18 @@ static void adv7511_hpd_work(struct work_struct *work)
        else
                status = connector_status_disconnected;
 
+       /*
+        * The bridge resets its registers on unplug. So when we get a plug
+        * event and we're already supposed to be powered, cycle the bridge to
+        * restore its state.
+        */
+       if (status == connector_status_connected &&
+           adv7511->connector.status == connector_status_disconnected &&
+           adv7511->powered) {
+               regcache_mark_dirty(adv7511->regmap);
+               adv7511_power_on(adv7511);
+       }
+
        if (adv7511->connector.status != status) {
                adv7511->connector.status = status;
                if (status == connector_status_disconnected)
index 7ab36042a822cf6cfec2fc440ef3fbe6e018fa3d..a6e8f4591e636241c6f1e8515fea33dc9147a7f3 100644 (file)
@@ -14,6 +14,7 @@
 #include <drm/bridge/mhl.h>
 #include <drm/drm_crtc.h>
 #include <drm/drm_edid.h>
+#include <drm/drm_encoder.h>
 
 #include <linux/clk.h>
 #include <linux/delay.h>
 
 #define SII8620_BURST_BUF_LEN 288
 #define VAL_RX_HDMI_CTRL2_DEFVAL VAL_RX_HDMI_CTRL2_IDLE_CNT(3)
-#define MHL1_MAX_LCLK 225000
-#define MHL3_MAX_LCLK 600000
+
+#define MHL1_MAX_PCLK 75000
+#define MHL1_MAX_PCLK_PP_MODE 150000
+#define MHL3_MAX_PCLK 200000
+#define MHL3_MAX_PCLK_PP_MODE 300000
 
 enum sii8620_mode {
        CM_DISCONNECTED,
@@ -69,9 +73,7 @@ struct sii8620 {
        struct regulator_bulk_data supplies[2];
        struct mutex lock; /* context lock, protects fields below */
        int error;
-       int pixel_clock;
        unsigned int use_packed_pixel:1;
-       int video_code;
        enum sii8620_mode mode;
        enum sii8620_sink_type sink_type;
        u8 cbus_status;
@@ -79,7 +81,9 @@ struct sii8620 {
        u8 xstat[MHL_XDS_SIZE];
        u8 devcap[MHL_DCAP_SIZE];
        u8 xdevcap[MHL_XDC_SIZE];
-       u8 avif[HDMI_INFOFRAME_SIZE(AVI)];
+       bool feature_complete;
+       bool devcap_read;
+       bool sink_detected;
        struct edid *edid;
        unsigned int gen2_write_burst:1;
        enum sii8620_mt_state mt_state;
@@ -476,7 +480,7 @@ static void sii8620_update_array(u8 *dst, u8 *src, int count)
        }
 }
 
-static void sii8620_sink_detected(struct sii8620 *ctx, int ret)
+static void sii8620_identify_sink(struct sii8620 *ctx)
 {
        static const char * const sink_str[] = {
                [SINK_NONE] = "NONE",
@@ -487,7 +491,7 @@ static void sii8620_sink_detected(struct sii8620 *ctx, int ret)
        char sink_name[20];
        struct device *dev = ctx->dev;
 
-       if (ret < 0)
+       if (!ctx->sink_detected || !ctx->devcap_read)
                return;
 
        sii8620_fetch_edid(ctx);
@@ -496,6 +500,7 @@ static void sii8620_sink_detected(struct sii8620 *ctx, int ret)
                sii8620_mhl_disconnected(ctx);
                return;
        }
+       sii8620_set_upstream_edid(ctx);
 
        if (drm_detect_hdmi_monitor(ctx->edid))
                ctx->sink_type = SINK_HDMI;
@@ -508,53 +513,6 @@ static void sii8620_sink_detected(struct sii8620 *ctx, int ret)
                 sink_str[ctx->sink_type], sink_name);
 }
 
-static void sii8620_hsic_init(struct sii8620 *ctx)
-{
-       if (!sii8620_is_mhl3(ctx))
-               return;
-
-       sii8620_write(ctx, REG_FCGC,
-               BIT_FCGC_HSIC_HOSTMODE | BIT_FCGC_HSIC_ENABLE);
-       sii8620_setbits(ctx, REG_HRXCTRL3,
-               BIT_HRXCTRL3_HRX_STAY_RESET | BIT_HRXCTRL3_STATUS_EN, ~0);
-       sii8620_setbits(ctx, REG_TTXNUMB, MSK_TTXNUMB_TTX_NUMBPS, 4);
-       sii8620_setbits(ctx, REG_TRXCTRL, BIT_TRXCTRL_TRX_FROM_SE_COC, ~0);
-       sii8620_setbits(ctx, REG_HTXCTRL, BIT_HTXCTRL_HTX_DRVCONN1, 0);
-       sii8620_setbits(ctx, REG_KEEPER, MSK_KEEPER_MODE, VAL_KEEPER_MODE_HOST);
-       sii8620_write_seq_static(ctx,
-               REG_TDMLLCTL, 0,
-               REG_UTSRST, BIT_UTSRST_HRX_SRST | BIT_UTSRST_HTX_SRST |
-                       BIT_UTSRST_KEEPER_SRST | BIT_UTSRST_FC_SRST,
-               REG_UTSRST, BIT_UTSRST_HRX_SRST | BIT_UTSRST_HTX_SRST,
-               REG_HRXINTL, 0xff,
-               REG_HRXINTH, 0xff,
-               REG_TTXINTL, 0xff,
-               REG_TTXINTH, 0xff,
-               REG_TRXINTL, 0xff,
-               REG_TRXINTH, 0xff,
-               REG_HTXINTL, 0xff,
-               REG_HTXINTH, 0xff,
-               REG_FCINTR0, 0xff,
-               REG_FCINTR1, 0xff,
-               REG_FCINTR2, 0xff,
-               REG_FCINTR3, 0xff,
-               REG_FCINTR4, 0xff,
-               REG_FCINTR5, 0xff,
-               REG_FCINTR6, 0xff,
-               REG_FCINTR7, 0xff
-       );
-}
-
-static void sii8620_edid_read(struct sii8620 *ctx, int ret)
-{
-       if (ret < 0)
-               return;
-
-       sii8620_set_upstream_edid(ctx);
-       sii8620_hsic_init(ctx);
-       sii8620_enable_hpd(ctx);
-}
-
 static void sii8620_mr_devcap(struct sii8620 *ctx)
 {
        u8 dcap[MHL_DCAP_SIZE];
@@ -570,6 +528,8 @@ static void sii8620_mr_devcap(struct sii8620 *ctx)
                 dcap[MHL_DCAP_ADOPTER_ID_H], dcap[MHL_DCAP_ADOPTER_ID_L],
                 dcap[MHL_DCAP_DEVICE_ID_H], dcap[MHL_DCAP_DEVICE_ID_L]);
        sii8620_update_array(ctx->devcap, dcap, MHL_DCAP_SIZE);
+       ctx->devcap_read = true;
+       sii8620_identify_sink(ctx);
 }
 
 static void sii8620_mr_xdevcap(struct sii8620 *ctx)
@@ -807,6 +767,7 @@ static void sii8620_burst_rx_all(struct sii8620 *ctx)
 static void sii8620_fetch_edid(struct sii8620 *ctx)
 {
        u8 lm_ddc, ddc_cmd, int3, cbus;
+       unsigned long timeout;
        int fetched, i;
        int edid_len = EDID_LENGTH;
        u8 *edid;
@@ -856,23 +817,31 @@ static void sii8620_fetch_edid(struct sii8620 *ctx)
                        REG_DDC_CMD, ddc_cmd | VAL_DDC_CMD_ENH_DDC_READ_NO_ACK
                );
 
-               do {
-                       int3 = sii8620_readb(ctx, REG_INTR3);
+               int3 = 0;
+               timeout = jiffies + msecs_to_jiffies(200);
+               for (;;) {
                        cbus = sii8620_readb(ctx, REG_CBUS_STATUS);
-
-                       if (int3 & BIT_DDC_CMD_DONE)
-                               break;
-
-                       if (!(cbus & BIT_CBUS_STATUS_CBUS_CONNECTED)) {
+                       if (~cbus & BIT_CBUS_STATUS_CBUS_CONNECTED) {
+                               kfree(edid);
+                               edid = NULL;
+                               goto end;
+                       }
+                       if (int3 & BIT_DDC_CMD_DONE) {
+                               if (sii8620_readb(ctx, REG_DDC_DOUT_CNT)
+                                   >= FETCH_SIZE)
+                                       break;
+                       } else {
+                               int3 = sii8620_readb(ctx, REG_INTR3);
+                       }
+                       if (time_is_before_jiffies(timeout)) {
+                               ctx->error = -ETIMEDOUT;
+                               dev_err(ctx->dev, "timeout during EDID read\n");
                                kfree(edid);
                                edid = NULL;
                                goto end;
                        }
-               } while (1);
-
-               sii8620_readb(ctx, REG_DDC_STATUS);
-               while (sii8620_readb(ctx, REG_DDC_DOUT_CNT) < FETCH_SIZE)
                        usleep_range(10, 20);
+               }
 
                sii8620_read_buf(ctx, REG_DDC_DATA, edid + fetched, FETCH_SIZE);
                if (fetched + FETCH_SIZE == EDID_LENGTH) {
@@ -971,8 +940,17 @@ static int sii8620_hw_on(struct sii8620 *ctx)
        ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
        if (ret)
                return ret;
+
        usleep_range(10000, 20000);
-       return clk_prepare_enable(ctx->clk_xtal);
+       ret = clk_prepare_enable(ctx->clk_xtal);
+       if (ret)
+               return ret;
+
+       msleep(100);
+       gpiod_set_value(ctx->gpio_reset, 0);
+       msleep(100);
+
+       return 0;
 }
 
 static int sii8620_hw_off(struct sii8620 *ctx)
@@ -982,17 +960,6 @@ static int sii8620_hw_off(struct sii8620 *ctx)
        return regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
 }
 
-static void sii8620_hw_reset(struct sii8620 *ctx)
-{
-       usleep_range(10000, 20000);
-       gpiod_set_value(ctx->gpio_reset, 0);
-       usleep_range(5000, 20000);
-       gpiod_set_value(ctx->gpio_reset, 1);
-       usleep_range(10000, 20000);
-       gpiod_set_value(ctx->gpio_reset, 0);
-       msleep(300);
-}
-
 static void sii8620_cbus_reset(struct sii8620 *ctx)
 {
        sii8620_write(ctx, REG_PWD_SRST, BIT_PWD_SRST_CBUS_RST
@@ -1055,23 +1022,23 @@ static void sii8620_set_format(struct sii8620 *ctx)
                                BIT_M3_P0CTRL_MHL3_P0_PIXEL_MODE_PACKED,
                                ctx->use_packed_pixel ? ~0 : 0);
        } else {
-               if (ctx->use_packed_pixel)
+               if (ctx->use_packed_pixel) {
                        sii8620_write_seq_static(ctx,
                                REG_VID_MODE, BIT_VID_MODE_M1080P,
                                REG_MHL_TOP_CTL, BIT_MHL_TOP_CTL_MHL_PP_SEL | 1,
                                REG_MHLTX_CTL6, 0x60
                        );
-               else
+               } else {
                        sii8620_write_seq_static(ctx,
                                REG_VID_MODE, 0,
                                REG_MHL_TOP_CTL, 1,
                                REG_MHLTX_CTL6, 0xa0
                        );
+               }
        }
 
        if (ctx->use_packed_pixel)
-               out_fmt = VAL_TPI_FORMAT(YCBCR422, FULL) |
-                       BIT_TPI_OUTPUT_CSCMODE709;
+               out_fmt = VAL_TPI_FORMAT(YCBCR422, FULL);
        else
                out_fmt = VAL_TPI_FORMAT(RGB, FULL);
 
@@ -1128,18 +1095,28 @@ static ssize_t mhl3_infoframe_pack(struct mhl3_infoframe *frame,
        return frm_len;
 }
 
-static void sii8620_set_infoframes(struct sii8620 *ctx)
+static void sii8620_set_infoframes(struct sii8620 *ctx,
+                                  struct drm_display_mode *mode)
 {
        struct mhl3_infoframe mhl_frm;
        union hdmi_infoframe frm;
        u8 buf[31];
        int ret;
 
+       ret = drm_hdmi_avi_infoframe_from_display_mode(&frm.avi,
+                                                      mode,
+                                                      true);
+       if (ctx->use_packed_pixel)
+               frm.avi.colorspace = HDMI_COLORSPACE_YUV422;
+
+       if (!ret)
+               ret = hdmi_avi_infoframe_pack(&frm.avi, buf, ARRAY_SIZE(buf));
+       if (ret > 0)
+               sii8620_write_buf(ctx, REG_TPI_AVI_CHSUM, buf + 3, ret - 3);
+
        if (!sii8620_is_mhl3(ctx) || !ctx->use_packed_pixel) {
                sii8620_write(ctx, REG_TPI_SC,
                        BIT_TPI_SC_TPI_OUTPUT_MODE_0_HDMI);
-               sii8620_write_buf(ctx, REG_TPI_AVI_CHSUM, ctx->avif + 3,
-                       ARRAY_SIZE(ctx->avif) - 3);
                sii8620_write(ctx, REG_PKT_FILTER_0,
                        BIT_PKT_FILTER_0_DROP_CEA_GAMUT_PKT |
                        BIT_PKT_FILTER_0_DROP_MPEG_PKT |
@@ -1148,16 +1125,6 @@ static void sii8620_set_infoframes(struct sii8620 *ctx)
                return;
        }
 
-       ret = hdmi_avi_infoframe_init(&frm.avi);
-       frm.avi.colorspace = HDMI_COLORSPACE_YUV422;
-       frm.avi.active_aspect = HDMI_ACTIVE_ASPECT_PICTURE;
-       frm.avi.picture_aspect = HDMI_PICTURE_ASPECT_16_9;
-       frm.avi.colorimetry = HDMI_COLORIMETRY_ITU_709;
-       frm.avi.video_code = ctx->video_code;
-       if (!ret)
-               ret = hdmi_avi_infoframe_pack(&frm.avi, buf, ARRAY_SIZE(buf));
-       if (ret > 0)
-               sii8620_write_buf(ctx, REG_TPI_AVI_CHSUM, buf + 3, ret - 3);
        sii8620_write(ctx, REG_PKT_FILTER_0,
                BIT_PKT_FILTER_0_DROP_CEA_GAMUT_PKT |
                BIT_PKT_FILTER_0_DROP_MPEG_PKT |
@@ -1177,6 +1144,9 @@ static void sii8620_set_infoframes(struct sii8620 *ctx)
 
 static void sii8620_start_video(struct sii8620 *ctx)
 {
+       struct drm_display_mode *mode =
+               &ctx->bridge.encoder->crtc->state->adjusted_mode;
+
        if (!sii8620_is_mhl3(ctx))
                sii8620_stop_video(ctx);
 
@@ -1195,8 +1165,14 @@ static void sii8620_start_video(struct sii8620 *ctx)
        sii8620_set_format(ctx);
 
        if (!sii8620_is_mhl3(ctx)) {
-               sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE),
-                       MHL_DST_LM_CLK_MODE_NORMAL | MHL_DST_LM_PATH_ENABLED);
+               u8 link_mode = MHL_DST_LM_PATH_ENABLED;
+
+               if (ctx->use_packed_pixel)
+                       link_mode |= MHL_DST_LM_CLK_MODE_PACKED_PIXEL;
+               else
+                       link_mode |= MHL_DST_LM_CLK_MODE_NORMAL;
+
+               sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE), link_mode);
                sii8620_set_auto_zone(ctx);
        } else {
                static const struct {
@@ -1213,10 +1189,10 @@ static void sii8620_start_video(struct sii8620 *ctx)
                          MHL_XDS_LINK_RATE_6_0_GBPS, 0x40 },
                };
                u8 p0_ctrl = BIT_M3_P0CTRL_MHL3_P0_PORT_EN;
-               int clk = ctx->pixel_clock * (ctx->use_packed_pixel ? 2 : 3);
+               int clk = mode->clock * (ctx->use_packed_pixel ? 2 : 3);
                int i;
 
-               for (i = 0; i < ARRAY_SIZE(clk_spec); ++i)
+               for (i = 0; i < ARRAY_SIZE(clk_spec) - 1; ++i)
                        if (clk < clk_spec[i].max_clk)
                                break;
 
@@ -1242,7 +1218,7 @@ static void sii8620_start_video(struct sii8620 *ctx)
                        clk_spec[i].link_rate);
        }
 
-       sii8620_set_infoframes(ctx);
+       sii8620_set_infoframes(ctx, mode);
 }
 
 static void sii8620_disable_hpd(struct sii8620 *ctx)
@@ -1534,6 +1510,16 @@ static void sii8620_set_mode(struct sii8620 *ctx, enum sii8620_mode mode)
        );
 }
 
+static void sii8620_hpd_unplugged(struct sii8620 *ctx)
+{
+       sii8620_disable_hpd(ctx);
+       ctx->sink_type = SINK_NONE;
+       ctx->sink_detected = false;
+       ctx->feature_complete = false;
+       kfree(ctx->edid);
+       ctx->edid = NULL;
+}
+
 static void sii8620_disconnect(struct sii8620 *ctx)
 {
        sii8620_disable_gen2_write_burst(ctx);
@@ -1561,7 +1547,7 @@ static void sii8620_disconnect(struct sii8620 *ctx)
                REG_MHL_DP_CTL6, 0x2A,
                REG_MHL_DP_CTL7, 0x03
        );
-       sii8620_disable_hpd(ctx);
+       sii8620_hpd_unplugged(ctx);
        sii8620_write_seq_static(ctx,
                REG_M3_CTRL, VAL_M3_CTRL_MHL3_VALUE,
                REG_MHL_COC_CTL1, 0x07,
@@ -1609,10 +1595,8 @@ static void sii8620_disconnect(struct sii8620 *ctx)
        memset(ctx->xstat, 0, sizeof(ctx->xstat));
        memset(ctx->devcap, 0, sizeof(ctx->devcap));
        memset(ctx->xdevcap, 0, sizeof(ctx->xdevcap));
+       ctx->devcap_read = false;
        ctx->cbus_status = 0;
-       ctx->sink_type = SINK_NONE;
-       kfree(ctx->edid);
-       ctx->edid = NULL;
        sii8620_mt_cleanup(ctx);
 }
 
@@ -1699,17 +1683,18 @@ static void sii8620_status_dcap_ready(struct sii8620 *ctx)
 
 static void sii8620_status_changed_path(struct sii8620 *ctx)
 {
-       if (ctx->stat[MHL_DST_LINK_MODE] & MHL_DST_LM_PATH_ENABLED) {
-               sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE),
-                                     MHL_DST_LM_CLK_MODE_NORMAL
-                                     | MHL_DST_LM_PATH_ENABLED);
-               if (!sii8620_is_mhl3(ctx))
-                       sii8620_mt_read_devcap(ctx, false);
-               sii8620_mt_set_cont(ctx, sii8620_sink_detected);
-       } else {
-               sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE),
-                                     MHL_DST_LM_CLK_MODE_NORMAL);
-       }
+       u8 link_mode;
+
+       if (ctx->use_packed_pixel)
+               link_mode = MHL_DST_LM_CLK_MODE_PACKED_PIXEL;
+       else
+               link_mode = MHL_DST_LM_CLK_MODE_NORMAL;
+
+       if (ctx->stat[MHL_DST_LINK_MODE] & MHL_DST_LM_PATH_ENABLED)
+               link_mode |= MHL_DST_LM_PATH_ENABLED;
+
+       sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE),
+                             link_mode);
 }
 
 static void sii8620_msc_mr_write_stat(struct sii8620 *ctx)
@@ -1722,9 +1707,14 @@ static void sii8620_msc_mr_write_stat(struct sii8620 *ctx)
        sii8620_update_array(ctx->stat, st, MHL_DST_SIZE);
        sii8620_update_array(ctx->xstat, xst, MHL_XDS_SIZE);
 
-       if (ctx->stat[MHL_DST_CONNECTED_RDY] & MHL_DST_CONN_DCAP_RDY)
+       if (ctx->stat[MHL_DST_CONNECTED_RDY] & st[MHL_DST_CONNECTED_RDY] &
+           MHL_DST_CONN_DCAP_RDY) {
                sii8620_status_dcap_ready(ctx);
 
+               if (!sii8620_is_mhl3(ctx))
+                       sii8620_mt_read_devcap(ctx, false);
+       }
+
        if (st[MHL_DST_LINK_MODE] & MHL_DST_LM_PATH_ENABLED)
                sii8620_status_changed_path(ctx);
 }
@@ -1808,8 +1798,11 @@ static void sii8620_msc_mr_set_int(struct sii8620 *ctx)
        }
        if (ints[MHL_INT_RCHANGE] & MHL_INT_RC_FEAT_REQ)
                sii8620_send_features(ctx);
-       if (ints[MHL_INT_RCHANGE] & MHL_INT_RC_FEAT_COMPLETE)
-               sii8620_edid_read(ctx, 0);
+       if (ints[MHL_INT_RCHANGE] & MHL_INT_RC_FEAT_COMPLETE) {
+               ctx->feature_complete = true;
+               if (ctx->edid)
+                       sii8620_enable_hpd(ctx);
+       }
 }
 
 static struct sii8620_mt_msg *sii8620_msc_msg_first(struct sii8620 *ctx)
@@ -1884,6 +1877,15 @@ static void sii8620_irq_msc(struct sii8620 *ctx)
        if (stat & BIT_CBUS_MSC_MR_WRITE_STAT)
                sii8620_msc_mr_write_stat(ctx);
 
+       if (stat & BIT_CBUS_HPD_CHG) {
+               if (ctx->cbus_status & BIT_CBUS_STATUS_CBUS_HPD) {
+                       ctx->sink_detected = true;
+                       sii8620_identify_sink(ctx);
+               } else {
+                       sii8620_hpd_unplugged(ctx);
+               }
+       }
+
        if (stat & BIT_CBUS_MSC_MR_SET_INT)
                sii8620_msc_mr_set_int(ctx);
 
@@ -1931,14 +1933,6 @@ static void sii8620_irq_edid(struct sii8620 *ctx)
                ctx->mt_state = MT_STATE_DONE;
 }
 
-static void sii8620_scdt_high(struct sii8620 *ctx)
-{
-       sii8620_write_seq_static(ctx,
-               REG_INTR8_MASK, BIT_CEA_NEW_AVI | BIT_CEA_NEW_VSI,
-               REG_TPI_SC, BIT_TPI_SC_TPI_OUTPUT_MODE_0_HDMI,
-       );
-}
-
 static void sii8620_irq_scdt(struct sii8620 *ctx)
 {
        u8 stat = sii8620_readb(ctx, REG_INTR5);
@@ -1946,53 +1940,13 @@ static void sii8620_irq_scdt(struct sii8620 *ctx)
        if (stat & BIT_INTR_SCDT_CHANGE) {
                u8 cstat = sii8620_readb(ctx, REG_TMDS_CSTAT_P3);
 
-               if (cstat & BIT_TMDS_CSTAT_P3_SCDT) {
-                       if (ctx->sink_type == SINK_HDMI)
-                               /* enable infoframe interrupt */
-                               sii8620_scdt_high(ctx);
-                       else
-                               sii8620_start_video(ctx);
-               }
+               if (cstat & BIT_TMDS_CSTAT_P3_SCDT)
+                       sii8620_start_video(ctx);
        }
 
        sii8620_write(ctx, REG_INTR5, stat);
 }
 
-static void sii8620_new_vsi(struct sii8620 *ctx)
-{
-       u8 vsif[11];
-
-       sii8620_write(ctx, REG_RX_HDMI_CTRL2,
-                     VAL_RX_HDMI_CTRL2_DEFVAL |
-                     BIT_RX_HDMI_CTRL2_VSI_MON_SEL_VSI);
-       sii8620_read_buf(ctx, REG_RX_HDMI_MON_PKT_HEADER1, vsif,
-                        ARRAY_SIZE(vsif));
-}
-
-static void sii8620_new_avi(struct sii8620 *ctx)
-{
-       sii8620_write(ctx, REG_RX_HDMI_CTRL2, VAL_RX_HDMI_CTRL2_DEFVAL);
-       sii8620_read_buf(ctx, REG_RX_HDMI_MON_PKT_HEADER1, ctx->avif,
-                        ARRAY_SIZE(ctx->avif));
-}
-
-static void sii8620_irq_infr(struct sii8620 *ctx)
-{
-       u8 stat = sii8620_readb(ctx, REG_INTR8)
-               & (BIT_CEA_NEW_VSI | BIT_CEA_NEW_AVI);
-
-       sii8620_write(ctx, REG_INTR8, stat);
-
-       if (stat & BIT_CEA_NEW_VSI)
-               sii8620_new_vsi(ctx);
-
-       if (stat & BIT_CEA_NEW_AVI)
-               sii8620_new_avi(ctx);
-
-       if (stat & (BIT_CEA_NEW_VSI | BIT_CEA_NEW_AVI))
-               sii8620_start_video(ctx);
-}
-
 static void sii8620_got_xdevcap(struct sii8620 *ctx, int ret)
 {
        if (ret < 0)
@@ -2043,11 +1997,11 @@ static void sii8620_irq_ddc(struct sii8620 *ctx)
 
        if (stat & BIT_DDC_CMD_DONE) {
                sii8620_write(ctx, REG_INTR3_MASK, 0);
-               if (sii8620_is_mhl3(ctx))
+               if (sii8620_is_mhl3(ctx) && !ctx->feature_complete)
                        sii8620_mt_set_int(ctx, MHL_INT_REG(RCHANGE),
                                           MHL_INT_RC_FEAT_REQ);
                else
-                       sii8620_edid_read(ctx, 0);
+                       sii8620_enable_hpd(ctx);
        }
        sii8620_write(ctx, REG_INTR3, stat);
 }
@@ -2074,7 +2028,6 @@ static irqreturn_t sii8620_irq_thread(int irq, void *data)
                { BIT_FAST_INTR_STAT_EDID, sii8620_irq_edid },
                { BIT_FAST_INTR_STAT_DDC, sii8620_irq_ddc },
                { BIT_FAST_INTR_STAT_SCDT, sii8620_irq_scdt },
-               { BIT_FAST_INTR_STAT_INFR, sii8620_irq_infr },
        };
        struct sii8620 *ctx = data;
        u8 stats[LEN_FAST_INTR_STAT];
@@ -2112,7 +2065,6 @@ static void sii8620_cable_in(struct sii8620 *ctx)
                dev_err(dev, "Error powering on, %d.\n", ret);
                return;
        }
-       sii8620_hw_reset(ctx);
 
        sii8620_read_buf(ctx, REG_VND_IDL, ver, ARRAY_SIZE(ver));
        ret = sii8620_clear_error(ctx);
@@ -2268,17 +2220,43 @@ static void sii8620_detach(struct drm_bridge *bridge)
        rc_unregister_device(ctx->rc_dev);
 }
 
+static int sii8620_is_packing_required(struct sii8620 *ctx,
+                                      const struct drm_display_mode *mode)
+{
+       int max_pclk, max_pclk_pp_mode;
+
+       if (sii8620_is_mhl3(ctx)) {
+               max_pclk = MHL3_MAX_PCLK;
+               max_pclk_pp_mode = MHL3_MAX_PCLK_PP_MODE;
+       } else {
+               max_pclk = MHL1_MAX_PCLK;
+               max_pclk_pp_mode = MHL1_MAX_PCLK_PP_MODE;
+       }
+
+       if (mode->clock < max_pclk)
+               return 0;
+       else if (mode->clock < max_pclk_pp_mode)
+               return 1;
+       else
+               return -1;
+}
+
 static enum drm_mode_status sii8620_mode_valid(struct drm_bridge *bridge,
                                         const struct drm_display_mode *mode)
 {
        struct sii8620 *ctx = bridge_to_sii8620(bridge);
+       int pack_required = sii8620_is_packing_required(ctx, mode);
        bool can_pack = ctx->devcap[MHL_DCAP_VID_LINK_MODE] &
                        MHL_DCAP_VID_LINK_PPIXEL;
-       unsigned int max_pclk = sii8620_is_mhl3(ctx) ? MHL3_MAX_LCLK :
-                                                      MHL1_MAX_LCLK;
-       max_pclk /= can_pack ? 2 : 3;
 
-       return (mode->clock > max_pclk) ? MODE_CLOCK_HIGH : MODE_OK;
+       switch (pack_required) {
+       case 0:
+               return MODE_OK;
+       case 1:
+               return (can_pack) ? MODE_OK : MODE_CLOCK_HIGH;
+       default:
+               return MODE_CLOCK_HIGH;
+       }
 }
 
 static bool sii8620_mode_fixup(struct drm_bridge *bridge,
@@ -2286,43 +2264,14 @@ static bool sii8620_mode_fixup(struct drm_bridge *bridge,
                               struct drm_display_mode *adjusted_mode)
 {
        struct sii8620 *ctx = bridge_to_sii8620(bridge);
-       int max_lclk;
-       bool ret = true;
 
        mutex_lock(&ctx->lock);
 
-       max_lclk = sii8620_is_mhl3(ctx) ? MHL3_MAX_LCLK : MHL1_MAX_LCLK;
-       if (max_lclk > 3 * adjusted_mode->clock) {
-               ctx->use_packed_pixel = 0;
-               goto end;
-       }
-       if ((ctx->devcap[MHL_DCAP_VID_LINK_MODE] & MHL_DCAP_VID_LINK_PPIXEL) &&
-           max_lclk > 2 * adjusted_mode->clock) {
-               ctx->use_packed_pixel = 1;
-               goto end;
-       }
-       ret = false;
-end:
-       if (ret) {
-               u8 vic = drm_match_cea_mode(adjusted_mode);
-
-               if (!vic) {
-                       union hdmi_infoframe frm;
-                       u8 mhl_vic[] = { 0, 95, 94, 93, 98 };
-
-                       /* FIXME: We need the connector here */
-                       drm_hdmi_vendor_infoframe_from_display_mode(
-                               &frm.vendor.hdmi, NULL, adjusted_mode);
-                       vic = frm.vendor.hdmi.vic;
-                       if (vic >= ARRAY_SIZE(mhl_vic))
-                               vic = 0;
-                       vic = mhl_vic[vic];
-               }
-               ctx->video_code = vic;
-               ctx->pixel_clock = adjusted_mode->clock;
-       }
+       ctx->use_packed_pixel = sii8620_is_packing_required(ctx, adjusted_mode);
+
        mutex_unlock(&ctx->lock);
-       return ret;
+
+       return true;
 }
 
 static const struct drm_bridge_funcs sii8620_bridge_funcs = {
index 130da5195f3b622062c274507eb0727c76601473..81e32199d3ef4962751e2c010938b80ec8d29110 100644 (file)
@@ -1510,8 +1510,9 @@ int drm_atomic_helper_async_check(struct drm_device *dev,
 {
        struct drm_crtc *crtc;
        struct drm_crtc_state *crtc_state;
-       struct drm_plane *plane;
-       struct drm_plane_state *old_plane_state, *new_plane_state;
+       struct drm_plane *plane = NULL;
+       struct drm_plane_state *old_plane_state = NULL;
+       struct drm_plane_state *new_plane_state = NULL;
        const struct drm_plane_helper_funcs *funcs;
        int i, n_planes = 0;
 
@@ -1527,7 +1528,8 @@ int drm_atomic_helper_async_check(struct drm_device *dev,
        if (n_planes != 1)
                return -EINVAL;
 
-       if (!new_plane_state->crtc)
+       if (!new_plane_state->crtc ||
+           old_plane_state->crtc != new_plane_state->crtc)
                return -EINVAL;
 
        funcs = plane->helper_private;
index 3c4000facb360a012f78b12599d918a276145ca6..f973d287696a6febd621f8bf1ff32d5fc64bdca2 100644 (file)
@@ -372,7 +372,7 @@ int drm_legacy_addctx(struct drm_device *dev, void *data,
                ctx->handle = drm_legacy_ctxbitmap_next(dev);
        }
        DRM_DEBUG("%d\n", ctx->handle);
-       if (ctx->handle == -1) {
+       if (ctx->handle < 0) {
                DRM_DEBUG("Not enough free contexts.\n");
                /* Should this return -EBUSY instead? */
                return -ENOMEM;
index b553a6f2ff0eb27dec7ad0aaeeb891992fe19ab8..7af748ed1c58dddfae7cb578760be3344273901c 100644 (file)
@@ -369,13 +369,6 @@ EXPORT_SYMBOL(drm_dev_exit);
  */
 void drm_dev_unplug(struct drm_device *dev)
 {
-       drm_dev_unregister(dev);
-
-       mutex_lock(&drm_global_mutex);
-       if (dev->open_count == 0)
-               drm_dev_put(dev);
-       mutex_unlock(&drm_global_mutex);
-
        /*
         * After synchronizing any critical read section is guaranteed to see
         * the new value of ->unplugged, and any critical section which might
@@ -384,6 +377,13 @@ void drm_dev_unplug(struct drm_device *dev)
         */
        dev->unplugged = true;
        synchronize_srcu(&drm_unplug_srcu);
+
+       drm_dev_unregister(dev);
+
+       mutex_lock(&drm_global_mutex);
+       if (dev->open_count == 0)
+               drm_dev_put(dev);
+       mutex_unlock(&drm_global_mutex);
 }
 EXPORT_SYMBOL(drm_dev_unplug);
 
index 50c73c0a20b92e808d1ab511d1d3336ab9a094df..b54fb78a283c642e8541370482c627ea9567dc8e 100644 (file)
@@ -553,24 +553,13 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev,
 
        /* Clone the lessor file to create a new file for us */
        DRM_DEBUG_LEASE("Allocating lease file\n");
-       path_get(&lessor_file->f_path);
-       lessee_file = alloc_file(&lessor_file->f_path,
-                                lessor_file->f_mode,
-                                fops_get(lessor_file->f_inode->i_fop));
-
+       lessee_file = file_clone_open(lessor_file);
        if (IS_ERR(lessee_file)) {
                ret = PTR_ERR(lessee_file);
                goto out_lessee;
        }
 
-       /* Initialize the new file for DRM */
-       DRM_DEBUG_LEASE("Initializing the file with %p\n", lessee_file->f_op->open);
-       ret = lessee_file->f_op->open(lessee_file->f_inode, lessee_file);
-       if (ret)
-               goto out_lessee_file;
-
        lessee_priv = lessee_file->private_data;
-
        /* Change the file to a master one */
        drm_master_put(&lessee_priv->master);
        lessee_priv->master = lessee;
@@ -588,9 +577,6 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev,
        DRM_DEBUG_LEASE("drm_mode_create_lease_ioctl succeeded\n");
        return 0;
 
-out_lessee_file:
-       fput(lessee_file);
-
 out_lessee:
        drm_master_put(&lessee);
 
index 1f8031e30f5397bf97054b3fcffc11408c35759e..cdb10f885a4febea85fc5272e22f1378d770da8b 100644 (file)
@@ -532,7 +532,7 @@ static void drm_property_free_blob(struct kref *kref)
 
        drm_mode_object_unregister(blob->dev, &blob->base);
 
-       kfree(blob);
+       kvfree(blob);
 }
 
 /**
@@ -559,7 +559,7 @@ drm_property_create_blob(struct drm_device *dev, size_t length,
        if (!length || length > ULONG_MAX - sizeof(struct drm_property_blob))
                return ERR_PTR(-EINVAL);
 
-       blob = kzalloc(sizeof(struct drm_property_blob)+length, GFP_KERNEL);
+       blob = kvzalloc(sizeof(struct drm_property_blob)+length, GFP_KERNEL);
        if (!blob)
                return ERR_PTR(-ENOMEM);
 
@@ -576,7 +576,7 @@ drm_property_create_blob(struct drm_device *dev, size_t length,
        ret = __drm_mode_object_add(dev, &blob->base, DRM_MODE_OBJECT_BLOB,
                                    true, drm_property_free_blob);
        if (ret) {
-               kfree(blob);
+               kvfree(blob);
                return ERR_PTR(-EINVAL);
        }
 
index e5013a9991477eda57913a80f7c978a199727a62..540b59fb41038fcbed3f16528edf9f85e3976071 100644 (file)
@@ -631,8 +631,11 @@ static struct platform_driver etnaviv_platform_driver = {
        },
 };
 
+static struct platform_device *etnaviv_drm;
+
 static int __init etnaviv_init(void)
 {
+       struct platform_device *pdev;
        int ret;
        struct device_node *np;
 
@@ -644,7 +647,7 @@ static int __init etnaviv_init(void)
 
        ret = platform_driver_register(&etnaviv_platform_driver);
        if (ret != 0)
-               platform_driver_unregister(&etnaviv_gpu_driver);
+               goto unregister_gpu_driver;
 
        /*
         * If the DT contains at least one available GPU device, instantiate
@@ -653,20 +656,33 @@ static int __init etnaviv_init(void)
        for_each_compatible_node(np, NULL, "vivante,gc") {
                if (!of_device_is_available(np))
                        continue;
-
-               platform_device_register_simple("etnaviv", -1, NULL, 0);
+               pdev = platform_device_register_simple("etnaviv", -1,
+                                                      NULL, 0);
+               if (IS_ERR(pdev)) {
+                       ret = PTR_ERR(pdev);
+                       of_node_put(np);
+                       goto unregister_platform_driver;
+               }
+               etnaviv_drm = pdev;
                of_node_put(np);
                break;
        }
 
+       return 0;
+
+unregister_platform_driver:
+       platform_driver_unregister(&etnaviv_platform_driver);
+unregister_gpu_driver:
+       platform_driver_unregister(&etnaviv_gpu_driver);
        return ret;
 }
 module_init(etnaviv_init);
 
 static void __exit etnaviv_exit(void)
 {
-       platform_driver_unregister(&etnaviv_gpu_driver);
+       platform_device_unregister(etnaviv_drm);
        platform_driver_unregister(&etnaviv_platform_driver);
+       platform_driver_unregister(&etnaviv_gpu_driver);
 }
 module_exit(etnaviv_exit);
 
index dd430f0f8ff5158975e21f26ad97fc3aa5bae2cb..90f17ff7888e7042b85f38e5ef114f77fa42dccd 100644 (file)
@@ -131,6 +131,9 @@ struct etnaviv_gpu {
        struct work_struct sync_point_work;
        int sync_point_event;
 
+       /* hang detection */
+       u32 hangcheck_dma_addr;
+
        void __iomem *mmio;
        int irq;
 
index a74eb57af15bc65ba2ff4a2ed3906da29afe959b..50d6b88cb7aab3f99025829e1f544aae957d6d7f 100644 (file)
@@ -10,6 +10,7 @@
 #include "etnaviv_gem.h"
 #include "etnaviv_gpu.h"
 #include "etnaviv_sched.h"
+#include "state.xml.h"
 
 static int etnaviv_job_hang_limit = 0;
 module_param_named(job_hang_limit, etnaviv_job_hang_limit, int , 0444);
@@ -85,6 +86,29 @@ static void etnaviv_sched_timedout_job(struct drm_sched_job *sched_job)
 {
        struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
        struct etnaviv_gpu *gpu = submit->gpu;
+       u32 dma_addr;
+       int change;
+
+       /*
+        * If the GPU managed to complete this jobs fence, the timout is
+        * spurious. Bail out.
+        */
+       if (fence_completed(gpu, submit->out_fence->seqno))
+               return;
+
+       /*
+        * If the GPU is still making forward progress on the front-end (which
+        * should never loop) we shift out the timeout to give it a chance to
+        * finish the job.
+        */
+       dma_addr = gpu_read(gpu, VIVS_FE_DMA_ADDRESS);
+       change = dma_addr - gpu->hangcheck_dma_addr;
+       if (change < 0 || change > 16) {
+               gpu->hangcheck_dma_addr = dma_addr;
+               schedule_delayed_work(&sched_job->work_tdr,
+                                     sched_job->sched->timeout);
+               return;
+       }
 
        /* block scheduler */
        kthread_park(gpu->sched.thread);
index 82c95c34447fe19d34018b15a9b6a92e43ca917a..e868773ea5097cf3afd0592c2bf51ef66adf84cc 100644 (file)
@@ -265,7 +265,7 @@ static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win,
        unsigned long val;
 
        val = readl(ctx->addr + DECON_WINCONx(win));
-       val &= ~WINCONx_BPPMODE_MASK;
+       val &= WINCONx_ENWIN_F;
 
        switch (fb->format->format) {
        case DRM_FORMAT_XRGB1555:
@@ -356,8 +356,8 @@ static void decon_update_plane(struct exynos_drm_crtc *crtc,
                writel(val, ctx->addr + DECON_VIDOSDxB(win));
        }
 
-       val = VIDOSD_Wx_ALPHA_R_F(0x0) | VIDOSD_Wx_ALPHA_G_F(0x0) |
-               VIDOSD_Wx_ALPHA_B_F(0x0);
+       val = VIDOSD_Wx_ALPHA_R_F(0xff) | VIDOSD_Wx_ALPHA_G_F(0xff) |
+               VIDOSD_Wx_ALPHA_B_F(0xff);
        writel(val, ctx->addr + DECON_VIDOSDxC(win));
 
        val = VIDOSD_Wx_ALPHA_R_F(0x0) | VIDOSD_Wx_ALPHA_G_F(0x0) |
index a81b4a5e24a77397e4748a914357424cca60642d..ed3cc2989f93f2fe17f086e7ee8413702bd9d944 100644 (file)
@@ -420,7 +420,7 @@ err_mode_config_cleanup:
 err_free_private:
        kfree(private);
 err_free_drm:
-       drm_dev_unref(drm);
+       drm_dev_put(drm);
 
        return ret;
 }
@@ -444,7 +444,7 @@ static void exynos_drm_unbind(struct device *dev)
        drm->dev_private = NULL;
        dev_set_drvdata(dev, NULL);
 
-       drm_dev_unref(drm);
+       drm_dev_put(drm);
 }
 
 static const struct component_master_ops exynos_drm_ops = {
index 7fcc1a7ab1a079fe63bfa6d45687bae146ac2920..27b7d34d776cb23fdaa1cd35064236cd313f7edf 100644 (file)
@@ -138,7 +138,7 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
 
 err:
        while (i--)
-               drm_gem_object_unreference_unlocked(&exynos_gem[i]->base);
+               drm_gem_object_put_unlocked(&exynos_gem[i]->base);
 
        return ERR_PTR(ret);
 }
index 6127ef25acd60ec5ec6db92655d364220963fc0b..e8d0670bb5f8d280a9e17a1e4270c0464b1e4abc 100644 (file)
@@ -470,17 +470,18 @@ static void fimc_src_set_transf(struct fimc_context *ctx, unsigned int rotation)
 static void fimc_set_window(struct fimc_context *ctx,
                            struct exynos_drm_ipp_buffer *buf)
 {
+       unsigned int real_width = buf->buf.pitch[0] / buf->format->cpp[0];
        u32 cfg, h1, h2, v1, v2;
 
        /* cropped image */
        h1 = buf->rect.x;
-       h2 = buf->buf.width - buf->rect.w - buf->rect.x;
+       h2 = real_width - buf->rect.w - buf->rect.x;
        v1 = buf->rect.y;
        v2 = buf->buf.height - buf->rect.h - buf->rect.y;
 
        DRM_DEBUG_KMS("x[%d]y[%d]w[%d]h[%d]hsize[%d]vsize[%d]\n",
                buf->rect.x, buf->rect.y, buf->rect.w, buf->rect.h,
-               buf->buf.width, buf->buf.height);
+               real_width, buf->buf.height);
        DRM_DEBUG_KMS("h1[%d]h2[%d]v1[%d]v2[%d]\n", h1, h2, v1, v2);
 
        /*
@@ -503,12 +504,13 @@ static void fimc_set_window(struct fimc_context *ctx,
 static void fimc_src_set_size(struct fimc_context *ctx,
                              struct exynos_drm_ipp_buffer *buf)
 {
+       unsigned int real_width = buf->buf.pitch[0] / buf->format->cpp[0];
        u32 cfg;
 
-       DRM_DEBUG_KMS("hsize[%d]vsize[%d]\n", buf->buf.width, buf->buf.height);
+       DRM_DEBUG_KMS("hsize[%d]vsize[%d]\n", real_width, buf->buf.height);
 
        /* original size */
-       cfg = (EXYNOS_ORGISIZE_HORIZONTAL(buf->buf.width) |
+       cfg = (EXYNOS_ORGISIZE_HORIZONTAL(real_width) |
                EXYNOS_ORGISIZE_VERTICAL(buf->buf.height));
 
        fimc_write(ctx, cfg, EXYNOS_ORGISIZE);
@@ -529,7 +531,7 @@ static void fimc_src_set_size(struct fimc_context *ctx,
         * for now, we support only ITU601 8 bit mode
         */
        cfg = (EXYNOS_CISRCFMT_ITU601_8BIT |
-               EXYNOS_CISRCFMT_SOURCEHSIZE(buf->buf.width) |
+               EXYNOS_CISRCFMT_SOURCEHSIZE(real_width) |
                EXYNOS_CISRCFMT_SOURCEVSIZE(buf->buf.height));
        fimc_write(ctx, cfg, EXYNOS_CISRCFMT);
 
@@ -842,12 +844,13 @@ static void fimc_set_scaler(struct fimc_context *ctx, struct fimc_scaler *sc)
 static void fimc_dst_set_size(struct fimc_context *ctx,
                             struct exynos_drm_ipp_buffer *buf)
 {
+       unsigned int real_width = buf->buf.pitch[0] / buf->format->cpp[0];
        u32 cfg, cfg_ext;
 
-       DRM_DEBUG_KMS("hsize[%d]vsize[%d]\n", buf->buf.width, buf->buf.height);
+       DRM_DEBUG_KMS("hsize[%d]vsize[%d]\n", real_width, buf->buf.height);
 
        /* original size */
-       cfg = (EXYNOS_ORGOSIZE_HORIZONTAL(buf->buf.width) |
+       cfg = (EXYNOS_ORGOSIZE_HORIZONTAL(real_width) |
                EXYNOS_ORGOSIZE_VERTICAL(buf->buf.height));
 
        fimc_write(ctx, cfg, EXYNOS_ORGOSIZE);
index 6e1494fa71b40d70a59b768a1ac2caf6d9799dfe..bdf5a7655228b69ad799ffdce859876cf176563c 100644 (file)
@@ -143,7 +143,7 @@ static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
        DRM_DEBUG_KMS("gem handle = 0x%x\n", *handle);
 
        /* drop reference from allocate - handle holds it now. */
-       drm_gem_object_unreference_unlocked(obj);
+       drm_gem_object_put_unlocked(obj);
 
        return 0;
 }
@@ -186,7 +186,7 @@ unsigned long exynos_drm_gem_get_size(struct drm_device *dev,
 
        exynos_gem = to_exynos_gem(obj);
 
-       drm_gem_object_unreference_unlocked(obj);
+       drm_gem_object_put_unlocked(obj);
 
        return exynos_gem->size;
 }
@@ -329,13 +329,13 @@ void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
                return;
        }
 
-       drm_gem_object_unreference_unlocked(obj);
+       drm_gem_object_put_unlocked(obj);
 
        /*
         * decrease obj->refcount one more time because we has already
         * increased it at exynos_drm_gem_get_dma_addr().
         */
-       drm_gem_object_unreference_unlocked(obj);
+       drm_gem_object_put_unlocked(obj);
 }
 
 static int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem *exynos_gem,
@@ -383,7 +383,7 @@ int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
        args->flags = exynos_gem->flags;
        args->size = exynos_gem->size;
 
-       drm_gem_object_unreference_unlocked(obj);
+       drm_gem_object_put_unlocked(obj);
 
        return 0;
 }
index 35ac66730563944e83dcb1a6be7ec39999ead086..7ba414b52faa940595a028db7fa3e959bc7a58cd 100644 (file)
@@ -492,21 +492,25 @@ static void gsc_src_set_fmt(struct gsc_context *ctx, u32 fmt)
                        GSC_IN_CHROMA_ORDER_CRCB);
                break;
        case DRM_FORMAT_NV21:
+               cfg |= (GSC_IN_CHROMA_ORDER_CRCB | GSC_IN_YUV420_2P);
+               break;
        case DRM_FORMAT_NV61:
-               cfg |= (GSC_IN_CHROMA_ORDER_CRCB |
-                       GSC_IN_YUV420_2P);
+               cfg |= (GSC_IN_CHROMA_ORDER_CRCB | GSC_IN_YUV422_2P);
                break;
        case DRM_FORMAT_YUV422:
                cfg |= GSC_IN_YUV422_3P;
                break;
        case DRM_FORMAT_YUV420:
+               cfg |= (GSC_IN_CHROMA_ORDER_CBCR | GSC_IN_YUV420_3P);
+               break;
        case DRM_FORMAT_YVU420:
-               cfg |= GSC_IN_YUV420_3P;
+               cfg |= (GSC_IN_CHROMA_ORDER_CRCB | GSC_IN_YUV420_3P);
                break;
        case DRM_FORMAT_NV12:
+               cfg |= (GSC_IN_CHROMA_ORDER_CBCR | GSC_IN_YUV420_2P);
+               break;
        case DRM_FORMAT_NV16:
-               cfg |= (GSC_IN_CHROMA_ORDER_CBCR |
-                       GSC_IN_YUV420_2P);
+               cfg |= (GSC_IN_CHROMA_ORDER_CBCR | GSC_IN_YUV422_2P);
                break;
        }
 
@@ -523,30 +527,30 @@ static void gsc_src_set_transf(struct gsc_context *ctx, unsigned int rotation)
 
        switch (degree) {
        case DRM_MODE_ROTATE_0:
-               if (rotation & DRM_MODE_REFLECT_Y)
-                       cfg |= GSC_IN_ROT_XFLIP;
                if (rotation & DRM_MODE_REFLECT_X)
+                       cfg |= GSC_IN_ROT_XFLIP;
+               if (rotation & DRM_MODE_REFLECT_Y)
                        cfg |= GSC_IN_ROT_YFLIP;
                break;
        case DRM_MODE_ROTATE_90:
                cfg |= GSC_IN_ROT_90;
-               if (rotation & DRM_MODE_REFLECT_Y)
-                       cfg |= GSC_IN_ROT_XFLIP;
                if (rotation & DRM_MODE_REFLECT_X)
+                       cfg |= GSC_IN_ROT_XFLIP;
+               if (rotation & DRM_MODE_REFLECT_Y)
                        cfg |= GSC_IN_ROT_YFLIP;
                break;
        case DRM_MODE_ROTATE_180:
                cfg |= GSC_IN_ROT_180;
-               if (rotation & DRM_MODE_REFLECT_Y)
-                       cfg &= ~GSC_IN_ROT_XFLIP;
                if (rotation & DRM_MODE_REFLECT_X)
+                       cfg &= ~GSC_IN_ROT_XFLIP;
+               if (rotation & DRM_MODE_REFLECT_Y)
                        cfg &= ~GSC_IN_ROT_YFLIP;
                break;
        case DRM_MODE_ROTATE_270:
                cfg |= GSC_IN_ROT_270;
-               if (rotation & DRM_MODE_REFLECT_Y)
-                       cfg &= ~GSC_IN_ROT_XFLIP;
                if (rotation & DRM_MODE_REFLECT_X)
+                       cfg &= ~GSC_IN_ROT_XFLIP;
+               if (rotation & DRM_MODE_REFLECT_Y)
                        cfg &= ~GSC_IN_ROT_YFLIP;
                break;
        }
@@ -577,7 +581,7 @@ static void gsc_src_set_size(struct gsc_context *ctx,
        cfg &= ~(GSC_SRCIMG_HEIGHT_MASK |
                GSC_SRCIMG_WIDTH_MASK);
 
-       cfg |= (GSC_SRCIMG_WIDTH(buf->buf.width) |
+       cfg |= (GSC_SRCIMG_WIDTH(buf->buf.pitch[0] / buf->format->cpp[0]) |
                GSC_SRCIMG_HEIGHT(buf->buf.height));
 
        gsc_write(cfg, GSC_SRCIMG_SIZE);
@@ -672,18 +676,25 @@ static void gsc_dst_set_fmt(struct gsc_context *ctx, u32 fmt)
                        GSC_OUT_CHROMA_ORDER_CRCB);
                break;
        case DRM_FORMAT_NV21:
-       case DRM_FORMAT_NV61:
                cfg |= (GSC_OUT_CHROMA_ORDER_CRCB | GSC_OUT_YUV420_2P);
                break;
+       case DRM_FORMAT_NV61:
+               cfg |= (GSC_OUT_CHROMA_ORDER_CRCB | GSC_OUT_YUV422_2P);
+               break;
        case DRM_FORMAT_YUV422:
+               cfg |= GSC_OUT_YUV422_3P;
+               break;
        case DRM_FORMAT_YUV420:
+               cfg |= (GSC_OUT_CHROMA_ORDER_CBCR | GSC_OUT_YUV420_3P);
+               break;
        case DRM_FORMAT_YVU420:
-               cfg |= GSC_OUT_YUV420_3P;
+               cfg |= (GSC_OUT_CHROMA_ORDER_CRCB | GSC_OUT_YUV420_3P);
                break;
        case DRM_FORMAT_NV12:
+               cfg |= (GSC_OUT_CHROMA_ORDER_CBCR | GSC_OUT_YUV420_2P);
+               break;
        case DRM_FORMAT_NV16:
-               cfg |= (GSC_OUT_CHROMA_ORDER_CBCR |
-                       GSC_OUT_YUV420_2P);
+               cfg |= (GSC_OUT_CHROMA_ORDER_CBCR | GSC_OUT_YUV422_2P);
                break;
        }
 
@@ -868,7 +879,7 @@ static void gsc_dst_set_size(struct gsc_context *ctx,
        /* original size */
        cfg = gsc_read(GSC_DSTIMG_SIZE);
        cfg &= ~(GSC_DSTIMG_HEIGHT_MASK | GSC_DSTIMG_WIDTH_MASK);
-       cfg |= GSC_DSTIMG_WIDTH(buf->buf.width) |
+       cfg |= GSC_DSTIMG_WIDTH(buf->buf.pitch[0] / buf->format->cpp[0]) |
               GSC_DSTIMG_HEIGHT(buf->buf.height);
        gsc_write(cfg, GSC_DSTIMG_SIZE);
 
@@ -1341,7 +1352,7 @@ static const struct drm_exynos_ipp_limit gsc_5420_limits[] = {
 };
 
 static const struct drm_exynos_ipp_limit gsc_5433_limits[] = {
-       { IPP_SIZE_LIMIT(BUFFER, .h = { 32, 8191, 2 }, .v = { 16, 8191, 2 }) },
+       { IPP_SIZE_LIMIT(BUFFER, .h = { 32, 8191, 16 }, .v = { 16, 8191, 2 }) },
        { IPP_SIZE_LIMIT(AREA, .h = { 16, 4800, 1 }, .v = { 8, 3344, 1 }) },
        { IPP_SIZE_LIMIT(ROTATED, .h = { 32, 2047 }, .v = { 8, 8191 }) },
        { IPP_SCALE_LIMIT(.h = { (1 << 16) / 16, (1 << 16) * 8 },
index 26374e58c5578dc326356e19520b522e1d45412c..b435db8fc91677927c6e735d78d99232d7a91e0e 100644 (file)
@@ -345,27 +345,6 @@ static int exynos_drm_ipp_task_setup_buffer(struct exynos_drm_ipp_buffer *buf,
        int ret = 0;
        int i;
 
-       /* basic checks */
-       if (buf->buf.width == 0 || buf->buf.height == 0)
-               return -EINVAL;
-       buf->format = drm_format_info(buf->buf.fourcc);
-       for (i = 0; i < buf->format->num_planes; i++) {
-               unsigned int width = (i == 0) ? buf->buf.width :
-                            DIV_ROUND_UP(buf->buf.width, buf->format->hsub);
-
-               if (buf->buf.pitch[i] == 0)
-                       buf->buf.pitch[i] = width * buf->format->cpp[i];
-               if (buf->buf.pitch[i] < width * buf->format->cpp[i])
-                       return -EINVAL;
-               if (!buf->buf.gem_id[i])
-                       return -ENOENT;
-       }
-
-       /* pitch for additional planes must match */
-       if (buf->format->num_planes > 2 &&
-           buf->buf.pitch[1] != buf->buf.pitch[2])
-               return -EINVAL;
-
        /* get GEM buffers and check their size */
        for (i = 0; i < buf->format->num_planes; i++) {
                unsigned int height = (i == 0) ? buf->buf.height :
@@ -428,7 +407,7 @@ enum drm_ipp_size_id {
        IPP_LIMIT_BUFFER, IPP_LIMIT_AREA, IPP_LIMIT_ROTATED, IPP_LIMIT_MAX
 };
 
-static const enum drm_ipp_size_id limit_id_fallback[IPP_LIMIT_MAX][4] = {
+static const enum drm_exynos_ipp_limit_type limit_id_fallback[IPP_LIMIT_MAX][4] = {
        [IPP_LIMIT_BUFFER]  = { DRM_EXYNOS_IPP_LIMIT_SIZE_BUFFER },
        [IPP_LIMIT_AREA]    = { DRM_EXYNOS_IPP_LIMIT_SIZE_AREA,
                                DRM_EXYNOS_IPP_LIMIT_SIZE_BUFFER },
@@ -495,12 +474,13 @@ static int exynos_drm_ipp_check_size_limits(struct exynos_drm_ipp_buffer *buf,
        enum drm_ipp_size_id id = rotate ? IPP_LIMIT_ROTATED : IPP_LIMIT_AREA;
        struct drm_ipp_limit l;
        struct drm_exynos_ipp_limit_val *lh = &l.h, *lv = &l.v;
+       int real_width = buf->buf.pitch[0] / buf->format->cpp[0];
 
        if (!limits)
                return 0;
 
        __get_size_limit(limits, num_limits, IPP_LIMIT_BUFFER, &l);
-       if (!__size_limit_check(buf->buf.width, &l.h) ||
+       if (!__size_limit_check(real_width, &l.h) ||
            !__size_limit_check(buf->buf.height, &l.v))
                return -EINVAL;
 
@@ -560,10 +540,62 @@ static int exynos_drm_ipp_check_scale_limits(
        return 0;
 }
 
+static int exynos_drm_ipp_check_format(struct exynos_drm_ipp_task *task,
+                                      struct exynos_drm_ipp_buffer *buf,
+                                      struct exynos_drm_ipp_buffer *src,
+                                      struct exynos_drm_ipp_buffer *dst,
+                                      bool rotate, bool swap)
+{
+       const struct exynos_drm_ipp_formats *fmt;
+       int ret, i;
+
+       fmt = __ipp_format_get(task->ipp, buf->buf.fourcc, buf->buf.modifier,
+                              buf == src ? DRM_EXYNOS_IPP_FORMAT_SOURCE :
+                                           DRM_EXYNOS_IPP_FORMAT_DESTINATION);
+       if (!fmt) {
+               DRM_DEBUG_DRIVER("Task %pK: %s format not supported\n", task,
+                                buf == src ? "src" : "dst");
+               return -EINVAL;
+       }
+
+       /* basic checks */
+       if (buf->buf.width == 0 || buf->buf.height == 0)
+               return -EINVAL;
+
+       buf->format = drm_format_info(buf->buf.fourcc);
+       for (i = 0; i < buf->format->num_planes; i++) {
+               unsigned int width = (i == 0) ? buf->buf.width :
+                            DIV_ROUND_UP(buf->buf.width, buf->format->hsub);
+
+               if (buf->buf.pitch[i] == 0)
+                       buf->buf.pitch[i] = width * buf->format->cpp[i];
+               if (buf->buf.pitch[i] < width * buf->format->cpp[i])
+                       return -EINVAL;
+               if (!buf->buf.gem_id[i])
+                       return -ENOENT;
+       }
+
+       /* pitch for additional planes must match */
+       if (buf->format->num_planes > 2 &&
+           buf->buf.pitch[1] != buf->buf.pitch[2])
+               return -EINVAL;
+
+       /* check driver limits */
+       ret = exynos_drm_ipp_check_size_limits(buf, fmt->limits,
+                                              fmt->num_limits,
+                                              rotate,
+                                              buf == dst ? swap : false);
+       if (ret)
+               return ret;
+       ret = exynos_drm_ipp_check_scale_limits(&src->rect, &dst->rect,
+                                               fmt->limits,
+                                               fmt->num_limits, swap);
+       return ret;
+}
+
 static int exynos_drm_ipp_task_check(struct exynos_drm_ipp_task *task)
 {
        struct exynos_drm_ipp *ipp = task->ipp;
-       const struct exynos_drm_ipp_formats *src_fmt, *dst_fmt;
        struct exynos_drm_ipp_buffer *src = &task->src, *dst = &task->dst;
        unsigned int rotation = task->transform.rotation;
        int ret = 0;
@@ -607,37 +639,11 @@ static int exynos_drm_ipp_task_check(struct exynos_drm_ipp_task *task)
                return -EINVAL;
        }
 
-       src_fmt = __ipp_format_get(ipp, src->buf.fourcc, src->buf.modifier,
-                                  DRM_EXYNOS_IPP_FORMAT_SOURCE);
-       if (!src_fmt) {
-               DRM_DEBUG_DRIVER("Task %pK: src format not supported\n", task);
-               return -EINVAL;
-       }
-       ret = exynos_drm_ipp_check_size_limits(src, src_fmt->limits,
-                                              src_fmt->num_limits,
-                                              rotate, false);
-       if (ret)
-               return ret;
-       ret = exynos_drm_ipp_check_scale_limits(&src->rect, &dst->rect,
-                                               src_fmt->limits,
-                                               src_fmt->num_limits, swap);
+       ret = exynos_drm_ipp_check_format(task, src, src, dst, rotate, swap);
        if (ret)
                return ret;
 
-       dst_fmt = __ipp_format_get(ipp, dst->buf.fourcc, dst->buf.modifier,
-                                  DRM_EXYNOS_IPP_FORMAT_DESTINATION);
-       if (!dst_fmt) {
-               DRM_DEBUG_DRIVER("Task %pK: dst format not supported\n", task);
-               return -EINVAL;
-       }
-       ret = exynos_drm_ipp_check_size_limits(dst, dst_fmt->limits,
-                                              dst_fmt->num_limits,
-                                              false, swap);
-       if (ret)
-               return ret;
-       ret = exynos_drm_ipp_check_scale_limits(&src->rect, &dst->rect,
-                                               dst_fmt->limits,
-                                               dst_fmt->num_limits, swap);
+       ret = exynos_drm_ipp_check_format(task, dst, src, dst, false, swap);
        if (ret)
                return ret;
 
index 38a2a7f1204be7a9ef416556db031cf8eaa07770..7098c6d35266bd1116e3be83feabc836bad30cb8 100644 (file)
@@ -132,7 +132,7 @@ static void exynos_drm_plane_reset(struct drm_plane *plane)
        if (plane->state) {
                exynos_state = to_exynos_plane_state(plane->state);
                if (exynos_state->base.fb)
-                       drm_framebuffer_unreference(exynos_state->base.fb);
+                       drm_framebuffer_put(exynos_state->base.fb);
                kfree(exynos_state);
                plane->state = NULL;
        }
index 1a76dd3d52e1dc5b63d81bd19b49e4120e23891d..a820a68429b9a8f56be132d251b2f4ea81f6c1b4 100644 (file)
@@ -168,9 +168,9 @@ static void rotator_dst_set_transf(struct rot_context *rot,
        val &= ~ROT_CONTROL_FLIP_MASK;
 
        if (rotation & DRM_MODE_REFLECT_X)
-               val |= ROT_CONTROL_FLIP_HORIZONTAL;
-       if (rotation & DRM_MODE_REFLECT_Y)
                val |= ROT_CONTROL_FLIP_VERTICAL;
+       if (rotation & DRM_MODE_REFLECT_Y)
+               val |= ROT_CONTROL_FLIP_HORIZONTAL;
 
        val &= ~ROT_CONTROL_ROT_MASK;
 
index 91d4382343d080abd4607fd78a58729878eda844..0ddb6eec7b113ea306fea4bde563e8ecb9945495 100644 (file)
@@ -30,6 +30,7 @@
 #define scaler_write(cfg, offset)      writel(cfg, scaler->regs + (offset))
 #define SCALER_MAX_CLK                 4
 #define SCALER_AUTOSUSPEND_DELAY       2000
+#define SCALER_RESET_WAIT_RETRIES      100
 
 struct scaler_data {
        const char      *clk_name[SCALER_MAX_CLK];
@@ -51,9 +52,9 @@ struct scaler_context {
 static u32 scaler_get_format(u32 drm_fmt)
 {
        switch (drm_fmt) {
-       case DRM_FORMAT_NV21:
-               return SCALER_YUV420_2P_UV;
        case DRM_FORMAT_NV12:
+               return SCALER_YUV420_2P_UV;
+       case DRM_FORMAT_NV21:
                return SCALER_YUV420_2P_VU;
        case DRM_FORMAT_YUV420:
                return SCALER_YUV420_3P;
@@ -63,15 +64,15 @@ static u32 scaler_get_format(u32 drm_fmt)
                return SCALER_YUV422_1P_UYVY;
        case DRM_FORMAT_YVYU:
                return SCALER_YUV422_1P_YVYU;
-       case DRM_FORMAT_NV61:
-               return SCALER_YUV422_2P_UV;
        case DRM_FORMAT_NV16:
+               return SCALER_YUV422_2P_UV;
+       case DRM_FORMAT_NV61:
                return SCALER_YUV422_2P_VU;
        case DRM_FORMAT_YUV422:
                return SCALER_YUV422_3P;
-       case DRM_FORMAT_NV42:
-               return SCALER_YUV444_2P_UV;
        case DRM_FORMAT_NV24:
+               return SCALER_YUV444_2P_UV;
+       case DRM_FORMAT_NV42:
                return SCALER_YUV444_2P_VU;
        case DRM_FORMAT_YUV444:
                return SCALER_YUV444_3P;
@@ -100,6 +101,23 @@ static u32 scaler_get_format(u32 drm_fmt)
        return 0;
 }
 
+static inline int scaler_reset(struct scaler_context *scaler)
+{
+       int retry = SCALER_RESET_WAIT_RETRIES;
+
+       scaler_write(SCALER_CFG_SOFT_RESET, SCALER_CFG);
+       do {
+               cpu_relax();
+       } while (retry > 1 &&
+                scaler_read(SCALER_CFG) & SCALER_CFG_SOFT_RESET);
+       do {
+               cpu_relax();
+               scaler_write(1, SCALER_INT_EN);
+       } while (retry > 0 && scaler_read(SCALER_INT_EN) != 1);
+
+       return retry ? 0 : -EIO;
+}
+
 static inline void scaler_enable_int(struct scaler_context *scaler)
 {
        u32 val;
@@ -354,9 +372,13 @@ static int scaler_commit(struct exynos_drm_ipp *ipp,
        u32 dst_fmt = scaler_get_format(task->dst.buf.fourcc);
        struct drm_exynos_ipp_task_rect *dst_pos = &task->dst.rect;
 
-       scaler->task = task;
-
        pm_runtime_get_sync(scaler->dev);
+       if (scaler_reset(scaler)) {
+               pm_runtime_put(scaler->dev);
+               return -EIO;
+       }
+
+       scaler->task = task;
 
        scaler_set_src_fmt(scaler, src_fmt);
        scaler_set_src_base(scaler, &task->src);
@@ -394,7 +416,11 @@ static inline void scaler_disable_int(struct scaler_context *scaler)
 
 static inline u32 scaler_get_int_status(struct scaler_context *scaler)
 {
-       return scaler_read(SCALER_INT_STATUS);
+       u32 val = scaler_read(SCALER_INT_STATUS);
+
+       scaler_write(val, SCALER_INT_STATUS);
+
+       return val;
 }
 
 static inline int scaler_task_done(u32 val)
index 4704a993cbb7f003a51b901ae9b291a9adc9fe09..16b39734115c93855d82a5de94e16cc5e2570940 100644 (file)
 #define GSC_OUT_YUV420_3P              (3 << 4)
 #define GSC_OUT_YUV422_1P              (4 << 4)
 #define GSC_OUT_YUV422_2P              (5 << 4)
+#define GSC_OUT_YUV422_3P              (6 << 4)
 #define GSC_OUT_YUV444                 (7 << 4)
 #define GSC_OUT_TILE_TYPE_MASK         (1 << 2)
 #define GSC_OUT_TILE_C_16x8            (0 << 2)
index b51c05d03f14a1790ba43e70065af6acd74887ef..7f562410f9cf8aab7c47462fcbe7fd07fea68c73 100644 (file)
@@ -862,6 +862,7 @@ static int cmd_reg_handler(struct parser_exec_state *s,
 {
        struct intel_vgpu *vgpu = s->vgpu;
        struct intel_gvt *gvt = vgpu->gvt;
+       u32 ctx_sr_ctl;
 
        if (offset + 4 > gvt->device_info.mmio_size) {
                gvt_vgpu_err("%s access to (%x) outside of MMIO range\n",
@@ -894,6 +895,28 @@ static int cmd_reg_handler(struct parser_exec_state *s,
                patch_value(s, cmd_ptr(s, index), VGT_PVINFO_PAGE);
        }
 
+       /* TODO
+        * Right now only scan LRI command on KBL and in inhibit context.
+        * It's good enough to support initializing mmio by lri command in
+        * vgpu inhibit context on KBL.
+        */
+       if (IS_KABYLAKE(s->vgpu->gvt->dev_priv) &&
+                       intel_gvt_mmio_is_in_ctx(gvt, offset) &&
+                       !strncmp(cmd, "lri", 3)) {
+               intel_gvt_hypervisor_read_gpa(s->vgpu,
+                       s->workload->ring_context_gpa + 12, &ctx_sr_ctl, 4);
+               /* check inhibit context */
+               if (ctx_sr_ctl & 1) {
+                       u32 data = cmd_val(s, index + 1);
+
+                       if (intel_gvt_mmio_has_mode_mask(s->vgpu->gvt, offset))
+                               intel_vgpu_mask_mmio_write(vgpu,
+                                                       offset, &data, 4);
+                       else
+                               vgpu_vreg(vgpu, offset) = data;
+               }
+       }
+
        /* TODO: Update the global mask if this MMIO is a masked-MMIO */
        intel_gvt_mmio_set_cmd_accessed(gvt, offset);
        return 0;
index 6d8180e8d1e21a71916e8b6cad404dab8d8c3257..4b072ade8c389372bcf1e161f6257305dec043bd 100644 (file)
@@ -196,7 +196,7 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
                        ~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
                        TRANS_DDI_PORT_MASK);
                vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |=
-                       (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
+                       (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DVI |
                        (PORT_B << TRANS_DDI_PORT_SHIFT) |
                        TRANS_DDI_FUNC_ENABLE);
                if (IS_BROADWELL(dev_priv)) {
@@ -216,7 +216,7 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
                        ~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
                        TRANS_DDI_PORT_MASK);
                vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |=
-                       (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
+                       (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DVI |
                        (PORT_C << TRANS_DDI_PORT_SHIFT) |
                        TRANS_DDI_FUNC_ENABLE);
                if (IS_BROADWELL(dev_priv)) {
@@ -236,7 +236,7 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
                        ~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
                        TRANS_DDI_PORT_MASK);
                vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |=
-                       (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
+                       (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DVI |
                        (PORT_D << TRANS_DDI_PORT_SHIFT) |
                        TRANS_DDI_FUNC_ENABLE);
                if (IS_BROADWELL(dev_priv)) {
index 23296547da95e8634c3bbaa401225c2d415498d5..4efec8fa6c1d30aa9c7853131ad299fb8bbef169 100644 (file)
@@ -1592,6 +1592,7 @@ static struct intel_vgpu_mm *intel_vgpu_create_ggtt_mm(struct intel_vgpu *vgpu)
                vgpu_free_mm(mm);
                return ERR_PTR(-ENOMEM);
        }
+       mm->ggtt_mm.last_partial_off = -1UL;
 
        return mm;
 }
@@ -1616,6 +1617,7 @@ void _intel_vgpu_mm_release(struct kref *mm_ref)
                invalidate_ppgtt_mm(mm);
        } else {
                vfree(mm->ggtt_mm.virtual_ggtt);
+               mm->ggtt_mm.last_partial_off = -1UL;
        }
 
        vgpu_free_mm(mm);
@@ -1868,6 +1870,62 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
        memcpy((void *)&e.val64 + (off & (info->gtt_entry_size - 1)), p_data,
                        bytes);
 
+       /* If ggtt entry size is 8 bytes, and it's split into two 4 bytes
+        * write, we assume the two 4 bytes writes are consecutive.
+        * Otherwise, we abort and report error
+        */
+       if (bytes < info->gtt_entry_size) {
+               if (ggtt_mm->ggtt_mm.last_partial_off == -1UL) {
+                       /* the first partial part*/
+                       ggtt_mm->ggtt_mm.last_partial_off = off;
+                       ggtt_mm->ggtt_mm.last_partial_data = e.val64;
+                       return 0;
+               } else if ((g_gtt_index ==
+                               (ggtt_mm->ggtt_mm.last_partial_off >>
+                               info->gtt_entry_size_shift)) &&
+                       (off != ggtt_mm->ggtt_mm.last_partial_off)) {
+                       /* the second partial part */
+
+                       int last_off = ggtt_mm->ggtt_mm.last_partial_off &
+                               (info->gtt_entry_size - 1);
+
+                       memcpy((void *)&e.val64 + last_off,
+                               (void *)&ggtt_mm->ggtt_mm.last_partial_data +
+                               last_off, bytes);
+
+                       ggtt_mm->ggtt_mm.last_partial_off = -1UL;
+               } else {
+                       int last_offset;
+
+                       gvt_vgpu_err("failed to populate guest ggtt entry: abnormal ggtt entry write sequence, last_partial_off=%lx, offset=%x, bytes=%d, ggtt entry size=%d\n",
+                                       ggtt_mm->ggtt_mm.last_partial_off, off,
+                                       bytes, info->gtt_entry_size);
+
+                       /* set host ggtt entry to scratch page and clear
+                        * virtual ggtt entry as not present for last
+                        * partially write offset
+                        */
+                       last_offset = ggtt_mm->ggtt_mm.last_partial_off &
+                                       (~(info->gtt_entry_size - 1));
+
+                       ggtt_get_host_entry(ggtt_mm, &m, last_offset);
+                       ggtt_invalidate_pte(vgpu, &m);
+                       ops->set_pfn(&m, gvt->gtt.scratch_mfn);
+                       ops->clear_present(&m);
+                       ggtt_set_host_entry(ggtt_mm, &m, last_offset);
+                       ggtt_invalidate(gvt->dev_priv);
+
+                       ggtt_get_guest_entry(ggtt_mm, &e, last_offset);
+                       ops->clear_present(&e);
+                       ggtt_set_guest_entry(ggtt_mm, &e, last_offset);
+
+                       ggtt_mm->ggtt_mm.last_partial_off = off;
+                       ggtt_mm->ggtt_mm.last_partial_data = e.val64;
+
+                       return 0;
+               }
+       }
+
        if (ops->test_present(&e)) {
                gfn = ops->get_pfn(&e);
                m = e;
index 3792f2b7f4ff0686832458efcf533248c4aa356d..97e62647418a0a48fa1d1da6ace5a800f7d0de23 100644 (file)
@@ -150,6 +150,8 @@ struct intel_vgpu_mm {
                } ppgtt_mm;
                struct {
                        void *virtual_ggtt;
+                       unsigned long last_partial_off;
+                       u64 last_partial_data;
                } ggtt_mm;
        };
 };
index 05d15a095310d41b75d6ab64aa04162799f393a1..858967daf04b21792be2f578c9b27a6e75ba8277 100644 (file)
@@ -268,6 +268,8 @@ struct intel_gvt_mmio {
 #define F_CMD_ACCESSED (1 << 5)
 /* This reg could be accessed by unaligned address */
 #define F_UNALIGN      (1 << 6)
+/* This reg is saved/restored in context */
+#define F_IN_CTX       (1 << 7)
 
        struct gvt_mmio_block *mmio_block;
        unsigned int num_mmio_block;
@@ -639,6 +641,33 @@ static inline bool intel_gvt_mmio_has_mode_mask(
        return gvt->mmio.mmio_attribute[offset >> 2] & F_MODE_MASK;
 }
 
+/**
+ * intel_gvt_mmio_is_in_ctx - check if a MMIO has in-ctx mask
+ * @gvt: a GVT device
+ * @offset: register offset
+ *
+ * Returns:
+ * True if a MMIO has a in-context mask, false if it isn't.
+ *
+ */
+static inline bool intel_gvt_mmio_is_in_ctx(
+                       struct intel_gvt *gvt, unsigned int offset)
+{
+       return gvt->mmio.mmio_attribute[offset >> 2] & F_IN_CTX;
+}
+
+/**
+ * intel_gvt_mmio_set_in_ctx - mask a MMIO in logical context
+ * @gvt: a GVT device
+ * @offset: register offset
+ *
+ */
+static inline void intel_gvt_mmio_set_in_ctx(
+                       struct intel_gvt *gvt, unsigned int offset)
+{
+       gvt->mmio.mmio_attribute[offset >> 2] |= F_IN_CTX;
+}
+
 int intel_gvt_debugfs_add_vgpu(struct intel_vgpu *vgpu);
 void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu);
 int intel_gvt_debugfs_init(struct intel_gvt *gvt);
index bcbc47a88a7006a06107005b0faad5c02820c215..8f1caacdc78a4037efb56aeb1fa1cc488a5103f3 100644 (file)
@@ -3045,6 +3045,30 @@ int intel_vgpu_default_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
        return 0;
 }
 
+/**
+ * intel_vgpu_mask_mmio_write - write mask register
+ * @vgpu: a vGPU
+ * @offset: access offset
+ * @p_data: write data buffer
+ * @bytes: access data length
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+int intel_vgpu_mask_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
+               void *p_data, unsigned int bytes)
+{
+       u32 mask, old_vreg;
+
+       old_vreg = vgpu_vreg(vgpu, offset);
+       write_vreg(vgpu, offset, p_data, bytes);
+       mask = vgpu_vreg(vgpu, offset) >> 16;
+       vgpu_vreg(vgpu, offset) = (old_vreg & ~mask) |
+                               (vgpu_vreg(vgpu, offset) & mask);
+
+       return 0;
+}
+
 /**
  * intel_gvt_in_force_nonpriv_whitelist - if a mmio is in whitelist to be
  * force-nopriv register
index 71b6208759439d8ca74d3dbee4ea4a95059e779d..dac8c6401e26a010f5ed36bd441b9f429526c24e 100644 (file)
@@ -98,4 +98,6 @@ bool intel_gvt_in_force_nonpriv_whitelist(struct intel_gvt *gvt,
 int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset,
                           void *pdata, unsigned int bytes, bool is_read);
 
+int intel_vgpu_mask_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
+                                 void *p_data, unsigned int bytes);
 #endif
index 0f949554d118c22e1313cfecc2948d0fcc4672ef..5ca9caf7552a6145b0ccb91a3f18a0c3d4764841 100644 (file)
@@ -581,7 +581,9 @@ void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt)
 
        for (mmio = gvt->engine_mmio_list.mmio;
             i915_mmio_reg_valid(mmio->reg); mmio++) {
-               if (mmio->in_context)
+               if (mmio->in_context) {
                        gvt->engine_mmio_list.ctx_mmio_count[mmio->ring_id]++;
+                       intel_gvt_mmio_set_in_ctx(gvt, mmio->reg.reg);
+               }
        }
 }
index 34c125e2d90c094c98759e127d2f93780cd65788..71e1aa54f7741bcecf62067050e6f9e5799677c2 100644 (file)
@@ -340,14 +340,21 @@ struct drm_i915_file_private {
 
        unsigned int bsd_engine;
 
-/* Client can have a maximum of 3 contexts banned before
- * it is denied of creating new contexts. As one context
- * ban needs 4 consecutive hangs, and more if there is
- * progress in between, this is a last resort stop gap measure
- * to limit the badly behaving clients access to gpu.
+/*
+ * Every context ban increments per client ban score. Also
+ * hangs in short succession increments ban score. If ban threshold
+ * is reached, client is considered banned and submitting more work
+ * will fail. This is a stop gap measure to limit the badly behaving
+ * clients access to gpu. Note that unbannable contexts never increment
+ * the client ban score.
  */
-#define I915_MAX_CLIENT_CONTEXT_BANS 3
-       atomic_t context_bans;
+#define I915_CLIENT_SCORE_HANG_FAST    1
+#define   I915_CLIENT_FAST_HANG_JIFFIES (60 * HZ)
+#define I915_CLIENT_SCORE_CONTEXT_BAN   3
+#define I915_CLIENT_SCORE_BANNED       9
+       /** ban_score: Accumulated score of all ctx bans and fast hangs. */
+       atomic_t ban_score;
+       unsigned long hang_timestamp;
 };
 
 /* Interface history:
@@ -645,6 +652,7 @@ enum intel_sbi_destination {
 #define QUIRK_BACKLIGHT_PRESENT (1<<3)
 #define QUIRK_PIN_SWIZZLED_PAGES (1<<5)
 #define QUIRK_INCREASE_T12_DELAY (1<<6)
+#define QUIRK_INCREASE_DDI_DISABLED_TIME (1<<7)
 
 struct intel_fbdev;
 struct intel_fbc_work;
@@ -2238,9 +2246,6 @@ static inline struct scatterlist *____sg_next(struct scatterlist *sg)
  **/
 static inline struct scatterlist *__sg_next(struct scatterlist *sg)
 {
-#ifdef CONFIG_DEBUG_SG
-       BUG_ON(sg->sg_magic != SG_MAGIC);
-#endif
        return sg_is_last(sg) ? NULL : ____sg_next(sg);
 }
 
index 3704f4c0c2c970c31b0c6031050b20126e7ae2a9..17c5097721e8f27795dae3487a21fb59d9ec75a9 100644 (file)
@@ -2002,7 +2002,6 @@ int i915_gem_fault(struct vm_fault *vmf)
        bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
        struct i915_vma *vma;
        pgoff_t page_offset;
-       unsigned int flags;
        int ret;
 
        /* We don't use vmf->pgoff since that has the fake offset */
@@ -2038,27 +2037,34 @@ int i915_gem_fault(struct vm_fault *vmf)
                goto err_unlock;
        }
 
-       /* If the object is smaller than a couple of partial vma, it is
-        * not worth only creating a single partial vma - we may as well
-        * clear enough space for the full object.
-        */
-       flags = PIN_MAPPABLE;
-       if (obj->base.size > 2 * MIN_CHUNK_PAGES << PAGE_SHIFT)
-               flags |= PIN_NONBLOCK | PIN_NONFAULT;
 
        /* Now pin it into the GTT as needed */
-       vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, flags);
+       vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
+                                      PIN_MAPPABLE |
+                                      PIN_NONBLOCK |
+                                      PIN_NONFAULT);
        if (IS_ERR(vma)) {
                /* Use a partial view if it is bigger than available space */
                struct i915_ggtt_view view =
                        compute_partial_view(obj, page_offset, MIN_CHUNK_PAGES);
+               unsigned int flags;
 
-               /* Userspace is now writing through an untracked VMA, abandon
+               flags = PIN_MAPPABLE;
+               if (view.type == I915_GGTT_VIEW_NORMAL)
+                       flags |= PIN_NONBLOCK; /* avoid warnings for pinned */
+
+               /*
+                * Userspace is now writing through an untracked VMA, abandon
                 * all hope that the hardware is able to track future writes.
                 */
                obj->frontbuffer_ggtt_origin = ORIGIN_CPU;
 
-               vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE);
+               vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, flags);
+               if (IS_ERR(vma) && !view.type) {
+                       flags = PIN_MAPPABLE;
+                       view.type = I915_GGTT_VIEW_PARTIAL;
+                       vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, flags);
+               }
        }
        if (IS_ERR(vma)) {
                ret = PTR_ERR(vma);
@@ -2933,32 +2939,54 @@ i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj,
        return 0;
 }
 
+static void i915_gem_client_mark_guilty(struct drm_i915_file_private *file_priv,
+                                       const struct i915_gem_context *ctx)
+{
+       unsigned int score;
+       unsigned long prev_hang;
+
+       if (i915_gem_context_is_banned(ctx))
+               score = I915_CLIENT_SCORE_CONTEXT_BAN;
+       else
+               score = 0;
+
+       prev_hang = xchg(&file_priv->hang_timestamp, jiffies);
+       if (time_before(jiffies, prev_hang + I915_CLIENT_FAST_HANG_JIFFIES))
+               score += I915_CLIENT_SCORE_HANG_FAST;
+
+       if (score) {
+               atomic_add(score, &file_priv->ban_score);
+
+               DRM_DEBUG_DRIVER("client %s: gained %u ban score, now %u\n",
+                                ctx->name, score,
+                                atomic_read(&file_priv->ban_score));
+       }
+}
+
 static void i915_gem_context_mark_guilty(struct i915_gem_context *ctx)
 {
-       bool banned;
+       unsigned int score;
+       bool banned, bannable;
 
        atomic_inc(&ctx->guilty_count);
 
-       banned = false;
-       if (i915_gem_context_is_bannable(ctx)) {
-               unsigned int score;
+       bannable = i915_gem_context_is_bannable(ctx);
+       score = atomic_add_return(CONTEXT_SCORE_GUILTY, &ctx->ban_score);
+       banned = score >= CONTEXT_SCORE_BAN_THRESHOLD;
 
-               score = atomic_add_return(CONTEXT_SCORE_GUILTY,
-                                         &ctx->ban_score);
-               banned = score >= CONTEXT_SCORE_BAN_THRESHOLD;
+       DRM_DEBUG_DRIVER("context %s: guilty %d, score %u, ban %s\n",
+                        ctx->name, atomic_read(&ctx->guilty_count),
+                        score, yesno(banned && bannable));
 
-               DRM_DEBUG_DRIVER("context %s marked guilty (score %d) banned? %s\n",
-                                ctx->name, score, yesno(banned));
-       }
-       if (!banned)
+       /* Cool contexts don't accumulate client ban score */
+       if (!bannable)
                return;
 
-       i915_gem_context_set_banned(ctx);
-       if (!IS_ERR_OR_NULL(ctx->file_priv)) {
-               atomic_inc(&ctx->file_priv->context_bans);
-               DRM_DEBUG_DRIVER("client %s has had %d context banned\n",
-                                ctx->name, atomic_read(&ctx->file_priv->context_bans));
-       }
+       if (banned)
+               i915_gem_context_set_banned(ctx);
+
+       if (!IS_ERR_OR_NULL(ctx->file_priv))
+               i915_gem_client_mark_guilty(ctx->file_priv, ctx);
 }
 
 static void i915_gem_context_mark_innocent(struct i915_gem_context *ctx)
@@ -5736,6 +5764,7 @@ int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file)
        INIT_LIST_HEAD(&file_priv->mm.request_list);
 
        file_priv->bsd_engine = -1;
+       file_priv->hang_timestamp = jiffies;
 
        ret = i915_gem_context_open(i915, file);
        if (ret)
index 33f8a4b3c98170f2857e15255e4fc23ae8bbb49e..060335d3d9e0b44d19c9b3147e2e1496d1e8e571 100644 (file)
@@ -652,7 +652,7 @@ int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv)
 
 static bool client_is_banned(struct drm_i915_file_private *file_priv)
 {
-       return atomic_read(&file_priv->context_bans) > I915_MAX_CLIENT_CONTEXT_BANS;
+       return atomic_read(&file_priv->ban_score) >= I915_CLIENT_SCORE_BANNED;
 }
 
 int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
index f627a8c47c58a36f6ff92f17a4d6d672b28bc00b..22df17c8ca9b0fc75dc29189c6b7117002c5ad17 100644 (file)
@@ -489,7 +489,9 @@ eb_validate_vma(struct i915_execbuffer *eb,
 }
 
 static int
-eb_add_vma(struct i915_execbuffer *eb, unsigned int i, struct i915_vma *vma)
+eb_add_vma(struct i915_execbuffer *eb,
+          unsigned int i, unsigned batch_idx,
+          struct i915_vma *vma)
 {
        struct drm_i915_gem_exec_object2 *entry = &eb->exec[i];
        int err;
@@ -522,6 +524,24 @@ eb_add_vma(struct i915_execbuffer *eb, unsigned int i, struct i915_vma *vma)
        eb->flags[i] = entry->flags;
        vma->exec_flags = &eb->flags[i];
 
+       /*
+        * SNA is doing fancy tricks with compressing batch buffers, which leads
+        * to negative relocation deltas. Usually that works out ok since the
+        * relocate address is still positive, except when the batch is placed
+        * very low in the GTT. Ensure this doesn't happen.
+        *
+        * Note that actual hangs have only been observed on gen7, but for
+        * paranoia do it everywhere.
+        */
+       if (i == batch_idx) {
+               if (!(eb->flags[i] & EXEC_OBJECT_PINNED))
+                       eb->flags[i] |= __EXEC_OBJECT_NEEDS_BIAS;
+               if (eb->reloc_cache.has_fence)
+                       eb->flags[i] |= EXEC_OBJECT_NEEDS_FENCE;
+
+               eb->batch = vma;
+       }
+
        err = 0;
        if (eb_pin_vma(eb, entry, vma)) {
                if (entry->offset != vma->node.start) {
@@ -716,7 +736,7 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
 {
        struct radix_tree_root *handles_vma = &eb->ctx->handles_vma;
        struct drm_i915_gem_object *obj;
-       unsigned int i;
+       unsigned int i, batch;
        int err;
 
        if (unlikely(i915_gem_context_is_closed(eb->ctx)))
@@ -728,6 +748,8 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
        INIT_LIST_HEAD(&eb->relocs);
        INIT_LIST_HEAD(&eb->unbound);
 
+       batch = eb_batch_index(eb);
+
        for (i = 0; i < eb->buffer_count; i++) {
                u32 handle = eb->exec[i].handle;
                struct i915_lut_handle *lut;
@@ -770,33 +792,16 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
                lut->handle = handle;
 
 add_vma:
-               err = eb_add_vma(eb, i, vma);
+               err = eb_add_vma(eb, i, batch, vma);
                if (unlikely(err))
                        goto err_vma;
 
                GEM_BUG_ON(vma != eb->vma[i]);
                GEM_BUG_ON(vma->exec_flags != &eb->flags[i]);
+               GEM_BUG_ON(drm_mm_node_allocated(&vma->node) &&
+                          eb_vma_misplaced(&eb->exec[i], vma, eb->flags[i]));
        }
 
-       /* take note of the batch buffer before we might reorder the lists */
-       i = eb_batch_index(eb);
-       eb->batch = eb->vma[i];
-       GEM_BUG_ON(eb->batch->exec_flags != &eb->flags[i]);
-
-       /*
-        * SNA is doing fancy tricks with compressing batch buffers, which leads
-        * to negative relocation deltas. Usually that works out ok since the
-        * relocate address is still positive, except when the batch is placed
-        * very low in the GTT. Ensure this doesn't happen.
-        *
-        * Note that actual hangs have only been observed on gen7, but for
-        * paranoia do it everywhere.
-        */
-       if (!(eb->flags[i] & EXEC_OBJECT_PINNED))
-               eb->flags[i] |= __EXEC_OBJECT_NEEDS_BIAS;
-       if (eb->reloc_cache.has_fence)
-               eb->flags[i] |= EXEC_OBJECT_NEEDS_FENCE;
-
        eb->args->flags |= __EXEC_VALIDATED;
        return eb_reserve(eb);
 
index f9bc3aaa90d0f5de110e893415be0a6ee1c40448..c16cb025755e46edb6038e15790732c5fda74b71 100644 (file)
@@ -1893,9 +1893,17 @@ static void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
 
                /*
                 * Clear the PIPE*STAT regs before the IIR
+                *
+                * Toggle the enable bits to make sure we get an
+                * edge in the ISR pipe event bit if we don't clear
+                * all the enabled status bits. Otherwise the edge
+                * triggered IIR on i965/g4x wouldn't notice that
+                * an interrupt is still pending.
                 */
-               if (pipe_stats[pipe])
-                       I915_WRITE(reg, enable_mask | pipe_stats[pipe]);
+               if (pipe_stats[pipe]) {
+                       I915_WRITE(reg, pipe_stats[pipe]);
+                       I915_WRITE(reg, enable_mask);
+               }
        }
        spin_unlock(&dev_priv->irq_lock);
 }
@@ -1990,10 +1998,38 @@ static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv,
 
 static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv)
 {
-       u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
+       u32 hotplug_status = 0, hotplug_status_mask;
+       int i;
 
-       if (hotplug_status)
+       if (IS_G4X(dev_priv) ||
+           IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+               hotplug_status_mask = HOTPLUG_INT_STATUS_G4X |
+                       DP_AUX_CHANNEL_MASK_INT_STATUS_G4X;
+       else
+               hotplug_status_mask = HOTPLUG_INT_STATUS_I915;
+
+       /*
+        * We absolutely have to clear all the pending interrupt
+        * bits in PORT_HOTPLUG_STAT. Otherwise the ISR port
+        * interrupt bit won't have an edge, and the i965/g4x
+        * edge triggered IIR will not notice that an interrupt
+        * is still pending. We can't use PORT_HOTPLUG_EN to
+        * guarantee the edge as the act of toggling the enable
+        * bits can itself generate a new hotplug interrupt :(
+        */
+       for (i = 0; i < 10; i++) {
+               u32 tmp = I915_READ(PORT_HOTPLUG_STAT) & hotplug_status_mask;
+
+               if (tmp == 0)
+                       return hotplug_status;
+
+               hotplug_status |= tmp;
                I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
+       }
+
+       WARN_ONCE(1,
+                 "PORT_HOTPLUG_STAT did not clear (0x%08x)\n",
+                 I915_READ(PORT_HOTPLUG_STAT));
 
        return hotplug_status;
 }
index f11bb213ec0784e4c50db5bd0ea0647a5419e0e1..7720569f20244114e027dca6b810b182771cfcee 100644 (file)
@@ -2425,12 +2425,17 @@ enum i915_power_well_id {
 #define _3D_CHICKEN    _MMIO(0x2084)
 #define  _3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB     (1 << 10)
 #define _3D_CHICKEN2   _MMIO(0x208c)
+
+#define FF_SLICE_CHICKEN       _MMIO(0x2088)
+#define  FF_SLICE_CHICKEN_CL_PROVOKING_VERTEX_FIX      (1 << 1)
+
 /* Disables pipelining of read flushes past the SF-WIZ interface.
  * Required on all Ironlake steppings according to the B-Spec, but the
  * particular danger of not doing so is not specified.
  */
 # define _3D_CHICKEN2_WM_READ_PIPELINED                        (1 << 14)
 #define _3D_CHICKEN3   _MMIO(0x2090)
+#define  _3D_CHICKEN_SF_PROVOKING_VERTEX_FIX           (1 << 12)
 #define  _3D_CHICKEN_SF_DISABLE_OBJEND_CULL            (1 << 10)
 #define  _3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE       (1 << 5)
 #define  _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL         (1 << 5)
index 9324d476e0a7c356b39cb02374e904a2b0a95262..0531c01c3604663c9166e593d2c26fd45cbd0361 100644 (file)
@@ -109,7 +109,7 @@ vma_create(struct drm_i915_gem_object *obj,
                                                     obj->base.size >> PAGE_SHIFT));
                        vma->size = view->partial.size;
                        vma->size <<= PAGE_SHIFT;
-                       GEM_BUG_ON(vma->size >= obj->base.size);
+                       GEM_BUG_ON(vma->size > obj->base.size);
                } else if (view->type == I915_GGTT_VIEW_ROTATED) {
                        vma->size = intel_rotation_info_size(&view->rotated);
                        vma->size <<= PAGE_SHIFT;
index de0e22322c76ed649c2f36266e65247ed9d02c28..072b326d5ee0a77868de83818a334c68077c762e 100644 (file)
@@ -304,6 +304,9 @@ intel_crt_mode_valid(struct drm_connector *connector,
        int max_dotclk = dev_priv->max_dotclk_freq;
        int max_clock;
 
+       if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+               return MODE_NO_DBLESCAN;
+
        if (mode->clock < 25000)
                return MODE_CLOCK_LOW;
 
@@ -337,6 +340,12 @@ static bool intel_crt_compute_config(struct intel_encoder *encoder,
                                     struct intel_crtc_state *pipe_config,
                                     struct drm_connector_state *conn_state)
 {
+       struct drm_display_mode *adjusted_mode =
+               &pipe_config->base.adjusted_mode;
+
+       if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
+               return false;
+
        return true;
 }
 
@@ -344,6 +353,12 @@ static bool pch_crt_compute_config(struct intel_encoder *encoder,
                                   struct intel_crtc_state *pipe_config,
                                   struct drm_connector_state *conn_state)
 {
+       struct drm_display_mode *adjusted_mode =
+               &pipe_config->base.adjusted_mode;
+
+       if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
+               return false;
+
        pipe_config->has_pch_encoder = true;
 
        return true;
@@ -354,6 +369,11 @@ static bool hsw_crt_compute_config(struct intel_encoder *encoder,
                                   struct drm_connector_state *conn_state)
 {
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+       struct drm_display_mode *adjusted_mode =
+               &pipe_config->base.adjusted_mode;
+
+       if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
+               return false;
 
        pipe_config->has_pch_encoder = true;
 
index f4a8598a2d392d607e8d17338f1ff251bc7c627b..fed26d6e4e276bfb55c68d21f92f1d51679a0677 100644 (file)
@@ -1782,15 +1782,24 @@ void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state)
        I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), temp);
 }
 
-void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
-                                      enum transcoder cpu_transcoder)
+void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state)
 {
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
        i915_reg_t reg = TRANS_DDI_FUNC_CTL(cpu_transcoder);
        uint32_t val = I915_READ(reg);
 
        val &= ~(TRANS_DDI_FUNC_ENABLE | TRANS_DDI_PORT_MASK | TRANS_DDI_DP_VC_PAYLOAD_ALLOC);
        val |= TRANS_DDI_PORT_NONE;
        I915_WRITE(reg, val);
+
+       if (dev_priv->quirks & QUIRK_INCREASE_DDI_DISABLED_TIME &&
+           intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
+               DRM_DEBUG_KMS("Quirk Increase DDI disabled time\n");
+               /* Quirk time at 100ms for reliable operation */
+               msleep(100);
+       }
 }
 
 int intel_ddi_toggle_hdcp_signalling(struct intel_encoder *intel_encoder,
index dee3a8e659f1d6c9dbe2040abd6e2ba42020070a..dec0d60921bfd8394e2c38c7d32d2b66095b8336 100644 (file)
@@ -5809,7 +5809,7 @@ static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state,
                intel_ddi_set_vc_payload_alloc(intel_crtc->config, false);
 
        if (!transcoder_is_dsi(cpu_transcoder))
-               intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
+               intel_ddi_disable_transcoder_func(old_crtc_state);
 
        if (INTEL_GEN(dev_priv) >= 9)
                skylake_scaler_disable(intel_crtc);
@@ -14469,12 +14469,22 @@ static enum drm_mode_status
 intel_mode_valid(struct drm_device *dev,
                 const struct drm_display_mode *mode)
 {
+       /*
+        * Can't reject DBLSCAN here because Xorg ddxen can add piles
+        * of DBLSCAN modes to the output's mode list when they detect
+        * the scaling mode property on the connector. And they don't
+        * ask the kernel to validate those modes in any way until
+        * modeset time at which point the client gets a protocol error.
+        * So in order to not upset those clients we silently ignore the
+        * DBLSCAN flag on such connectors. For other connectors we will
+        * reject modes with the DBLSCAN flag in encoder->compute_config().
+        * And we always reject DBLSCAN modes in connector->mode_valid()
+        * as we never want such modes on the connector's mode list.
+        */
+
        if (mode->vscan > 1)
                return MODE_NO_VSCAN;
 
-       if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
-               return MODE_NO_DBLESCAN;
-
        if (mode->flags & DRM_MODE_FLAG_HSKEW)
                return MODE_H_ILLEGAL;
 
@@ -14636,6 +14646,18 @@ static void quirk_increase_t12_delay(struct drm_device *dev)
        DRM_INFO("Applying T12 delay quirk\n");
 }
 
+/*
+ * GeminiLake NUC HDMI outputs require additional off time
+ * this allows the onboard retimer to correctly sync to signal
+ */
+static void quirk_increase_ddi_disabled_time(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = to_i915(dev);
+
+       dev_priv->quirks |= QUIRK_INCREASE_DDI_DISABLED_TIME;
+       DRM_INFO("Applying Increase DDI Disabled quirk\n");
+}
+
 struct intel_quirk {
        int device;
        int subsystem_vendor;
@@ -14722,6 +14744,13 @@ static struct intel_quirk intel_quirks[] = {
 
        /* Toshiba Satellite P50-C-18C */
        { 0x191B, 0x1179, 0xF840, quirk_increase_t12_delay },
+
+       /* GeminiLake NUC */
+       { 0x3185, 0x8086, 0x2072, quirk_increase_ddi_disabled_time },
+       { 0x3184, 0x8086, 0x2072, quirk_increase_ddi_disabled_time },
+       /* ASRock ITX*/
+       { 0x3185, 0x1849, 0x2212, quirk_increase_ddi_disabled_time },
+       { 0x3184, 0x1849, 0x2212, quirk_increase_ddi_disabled_time },
 };
 
 static void intel_init_quirks(struct drm_device *dev)
index 8320f0e8e3bef8587b94a908bf87f3eecdf63fc5..16faea30114ac01f2744b69150d7a26d7ca578c3 100644 (file)
@@ -420,6 +420,9 @@ intel_dp_mode_valid(struct drm_connector *connector,
        int max_rate, mode_rate, max_lanes, max_link_clock;
        int max_dotclk;
 
+       if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+               return MODE_NO_DBLESCAN;
+
        max_dotclk = intel_dp_downstream_max_dotclock(intel_dp);
 
        if (intel_dp_is_edp(intel_dp) && fixed_mode) {
@@ -1862,7 +1865,10 @@ intel_dp_compute_config(struct intel_encoder *encoder,
                                                conn_state->scaling_mode);
        }
 
-       if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
+       if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
+               return false;
+
+       if (HAS_GMCH_DISPLAY(dev_priv) &&
            adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
                return false;
 
@@ -2782,16 +2788,6 @@ static void intel_disable_dp(struct intel_encoder *encoder,
 static void g4x_disable_dp(struct intel_encoder *encoder,
                           const struct intel_crtc_state *old_crtc_state,
                           const struct drm_connector_state *old_conn_state)
-{
-       intel_disable_dp(encoder, old_crtc_state, old_conn_state);
-
-       /* disable the port before the pipe on g4x */
-       intel_dp_link_down(encoder, old_crtc_state);
-}
-
-static void ilk_disable_dp(struct intel_encoder *encoder,
-                          const struct intel_crtc_state *old_crtc_state,
-                          const struct drm_connector_state *old_conn_state)
 {
        intel_disable_dp(encoder, old_crtc_state, old_conn_state);
 }
@@ -2807,13 +2803,19 @@ static void vlv_disable_dp(struct intel_encoder *encoder,
        intel_disable_dp(encoder, old_crtc_state, old_conn_state);
 }
 
-static void ilk_post_disable_dp(struct intel_encoder *encoder,
+static void g4x_post_disable_dp(struct intel_encoder *encoder,
                                const struct intel_crtc_state *old_crtc_state,
                                const struct drm_connector_state *old_conn_state)
 {
        struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
        enum port port = encoder->port;
 
+       /*
+        * Bspec does not list a specific disable sequence for g4x DP.
+        * Follow the ilk+ sequence (disable pipe before the port) for
+        * g4x DP as it does not suffer from underruns like the normal
+        * g4x modeset sequence (disable pipe after the port).
+        */
        intel_dp_link_down(encoder, old_crtc_state);
 
        /* Only ilk+ has port A */
@@ -6337,7 +6339,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
        drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
        drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
 
-       if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
+       if (!HAS_GMCH_DISPLAY(dev_priv))
                connector->interlace_allowed = true;
        connector->doublescan_allowed = 0;
 
@@ -6436,15 +6438,11 @@ bool intel_dp_init(struct drm_i915_private *dev_priv,
                intel_encoder->enable = vlv_enable_dp;
                intel_encoder->disable = vlv_disable_dp;
                intel_encoder->post_disable = vlv_post_disable_dp;
-       } else if (INTEL_GEN(dev_priv) >= 5) {
-               intel_encoder->pre_enable = g4x_pre_enable_dp;
-               intel_encoder->enable = g4x_enable_dp;
-               intel_encoder->disable = ilk_disable_dp;
-               intel_encoder->post_disable = ilk_post_disable_dp;
        } else {
                intel_encoder->pre_enable = g4x_pre_enable_dp;
                intel_encoder->enable = g4x_enable_dp;
                intel_encoder->disable = g4x_disable_dp;
+               intel_encoder->post_disable = g4x_post_disable_dp;
        }
 
        intel_dig_port->dp.output_reg = output_reg;
index 9e6956c0868835a9bcdf156c45d151ee2479b99a..5890500a3a8b6640e587070e89d805b06c99baf7 100644 (file)
@@ -48,6 +48,9 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
        bool reduce_m_n = drm_dp_has_quirk(&intel_dp->desc,
                                           DP_DPCD_QUIRK_LIMITED_M_N);
 
+       if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
+               return false;
+
        pipe_config->has_pch_encoder = false;
        bpp = 24;
        if (intel_dp->compliance.test_data.bpc) {
@@ -366,6 +369,9 @@ intel_dp_mst_mode_valid(struct drm_connector *connector,
        if (!intel_dp)
                return MODE_ERROR;
 
+       if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+               return MODE_NO_DBLESCAN;
+
        max_link_clock = intel_dp_max_link_rate(intel_dp);
        max_lanes = intel_dp_max_lane_count(intel_dp);
 
index 0361130500a6f7fc0786b343c2193c1629fbb39f..b8eefbffc77d871696edf79066162113b0a56751 100644 (file)
@@ -1388,8 +1388,7 @@ void hsw_fdi_link_train(struct intel_crtc *crtc,
 void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port);
 bool intel_ddi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe);
 void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state);
-void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
-                                      enum transcoder cpu_transcoder);
+void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state);
 void intel_ddi_enable_pipe_clock(const struct intel_crtc_state *crtc_state);
 void intel_ddi_disable_pipe_clock(const  struct intel_crtc_state *crtc_state);
 struct intel_encoder *
index cf39ca90d887872ddb2de5e011041f785fda3996..f349b39201993c88f633c1154c9fc9054c90449c 100644 (file)
@@ -326,6 +326,9 @@ static bool intel_dsi_compute_config(struct intel_encoder *encoder,
                                                conn_state->scaling_mode);
        }
 
+       if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
+               return false;
+
        /* DSI uses short packets for sync events, so clear mode flags for DSI */
        adjusted_mode->flags = 0;
 
@@ -1266,6 +1269,9 @@ intel_dsi_mode_valid(struct drm_connector *connector,
 
        DRM_DEBUG_KMS("\n");
 
+       if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+               return MODE_NO_DBLESCAN;
+
        if (fixed_mode) {
                if (mode->hdisplay > fixed_mode->hdisplay)
                        return MODE_PANEL;
index a70d767313aa10e198338e2e7472592827dfa347..61d908e0df0e2d75175f6f6300d0114b66f3833d 100644 (file)
@@ -219,6 +219,9 @@ intel_dvo_mode_valid(struct drm_connector *connector,
        int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
        int target_clock = mode->clock;
 
+       if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+               return MODE_NO_DBLESCAN;
+
        /* XXX: Validate clock range */
 
        if (fixed_mode) {
@@ -254,6 +257,9 @@ static bool intel_dvo_compute_config(struct intel_encoder *encoder,
        if (fixed_mode)
                intel_fixed_panel_mode(fixed_mode, adjusted_mode);
 
+       if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
+               return false;
+
        return true;
 }
 
index ee929f31f7db712d0f1b306571de962480758314..d8cb53ef435134b67e2f08fc5c8470f6697436d0 100644 (file)
@@ -1557,6 +1557,9 @@ intel_hdmi_mode_valid(struct drm_connector *connector,
        bool force_dvi =
                READ_ONCE(to_intel_digital_connector_state(connector->state)->force_audio) == HDMI_AUDIO_OFF_DVI;
 
+       if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+               return MODE_NO_DBLESCAN;
+
        clock = mode->clock;
 
        if ((mode->flags & DRM_MODE_FLAG_3D_MASK) == DRM_MODE_FLAG_3D_FRAME_PACKING)
@@ -1677,6 +1680,9 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
        int desired_bpp;
        bool force_dvi = intel_conn_state->force_audio == HDMI_AUDIO_OFF_DVI;
 
+       if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
+               return false;
+
        pipe_config->has_hdmi_sink = !force_dvi && intel_hdmi->has_hdmi_sink;
 
        if (pipe_config->has_hdmi_sink)
index 15434cad543001317be875f1a266e3fef6636042..7c4c8fb1dae465bdaba562ed536e68918b7eb6b8 100644 (file)
@@ -1545,11 +1545,21 @@ static u32 *gen9_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch)
        /* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt,glk */
        batch = gen8_emit_flush_coherentl3_wa(engine, batch);
 
+       *batch++ = MI_LOAD_REGISTER_IMM(3);
+
        /* WaDisableGatherAtSetShaderCommonSlice:skl,bxt,kbl,glk */
-       *batch++ = MI_LOAD_REGISTER_IMM(1);
        *batch++ = i915_mmio_reg_offset(COMMON_SLICE_CHICKEN2);
        *batch++ = _MASKED_BIT_DISABLE(
                        GEN9_DISABLE_GATHER_AT_SET_SHADER_COMMON_SLICE);
+
+       /* BSpec: 11391 */
+       *batch++ = i915_mmio_reg_offset(FF_SLICE_CHICKEN);
+       *batch++ = _MASKED_BIT_ENABLE(FF_SLICE_CHICKEN_CL_PROVOKING_VERTEX_FIX);
+
+       /* BSpec: 11299 */
+       *batch++ = i915_mmio_reg_offset(_3D_CHICKEN3);
+       *batch++ = _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_PROVOKING_VERTEX_FIX);
+
        *batch++ = MI_NOOP;
 
        /* WaClearSlmSpaceAtContextSwitch:kbl */
@@ -2641,10 +2651,8 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
        context_size += LRC_HEADER_PAGES * PAGE_SIZE;
 
        ctx_obj = i915_gem_object_create(ctx->i915, context_size);
-       if (IS_ERR(ctx_obj)) {
-               ret = PTR_ERR(ctx_obj);
-               goto error_deref_obj;
-       }
+       if (IS_ERR(ctx_obj))
+               return PTR_ERR(ctx_obj);
 
        vma = i915_vma_instance(ctx_obj, &ctx->i915->ggtt.base, NULL);
        if (IS_ERR(vma)) {
index d278f24ba6ae58bf4a704dc57610a53e9b042d25..48f618dc9abbb9de03ddf594cddf67d154a872e1 100644 (file)
@@ -380,6 +380,8 @@ intel_lvds_mode_valid(struct drm_connector *connector,
        struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
        int max_pixclk = to_i915(connector->dev)->max_dotclk_freq;
 
+       if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+               return MODE_NO_DBLESCAN;
        if (mode->hdisplay > fixed_mode->hdisplay)
                return MODE_PANEL;
        if (mode->vdisplay > fixed_mode->vdisplay)
@@ -429,6 +431,9 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
        intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
                               adjusted_mode);
 
+       if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
+               return false;
+
        if (HAS_PCH_SPLIT(dev_priv)) {
                pipe_config->has_pch_encoder = true;
 
index 25005023c243cb0526baf01f9c90d90dd3df4da2..26975df4e593b9a899c060cee398ffbdb7b5052b 100644 (file)
@@ -1160,6 +1160,9 @@ static bool intel_sdvo_compute_config(struct intel_encoder *encoder,
                                                           adjusted_mode);
        }
 
+       if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
+               return false;
+
        /*
         * Make the CRTC code factor in the SDVO pixel multiplier.  The
         * SDVO device will factor out the multiplier during mode_set.
@@ -1621,6 +1624,9 @@ intel_sdvo_mode_valid(struct drm_connector *connector,
        struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
        int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
 
+       if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+               return MODE_NO_DBLESCAN;
+
        if (intel_sdvo->pixel_clock_min > mode->clock)
                return MODE_CLOCK_LOW;
 
index 885fc3809f7f904e8bc82e0eeee9cddd3dcef3f4..b55b5c157e384158d6c55dab6a693c2e9e5fea21 100644 (file)
@@ -850,6 +850,9 @@ intel_tv_mode_valid(struct drm_connector *connector,
        const struct tv_mode *tv_mode = intel_tv_mode_find(connector->state);
        int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
 
+       if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+               return MODE_NO_DBLESCAN;
+
        if (mode->clock > max_dotclk)
                return MODE_CLOCK_HIGH;
 
@@ -877,16 +880,21 @@ intel_tv_compute_config(struct intel_encoder *encoder,
                        struct drm_connector_state *conn_state)
 {
        const struct tv_mode *tv_mode = intel_tv_mode_find(conn_state);
+       struct drm_display_mode *adjusted_mode =
+               &pipe_config->base.adjusted_mode;
 
        if (!tv_mode)
                return false;
 
-       pipe_config->base.adjusted_mode.crtc_clock = tv_mode->clock;
+       if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
+               return false;
+
+       adjusted_mode->crtc_clock = tv_mode->clock;
        DRM_DEBUG_KMS("forcing bpc to 8 for TV\n");
        pipe_config->pipe_bpp = 8*3;
 
        /* TV has it's own notion of sync and other mode flags, so clear them. */
-       pipe_config->base.adjusted_mode.flags = 0;
+       adjusted_mode->flags = 0;
 
        /*
         * FIXME: We don't check whether the input mode is actually what we want
index 56dd7a9a8e254a3e03e4bebb5fda75b8555abbba..dd5312b02a8d21749fb004d41db74ff4bc511b1a 100644 (file)
@@ -612,6 +612,9 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
                return PTR_ERR(imx_ldb->regmap);
        }
 
+       /* disable LDB by resetting the control register to POR default */
+       regmap_write(imx_ldb->regmap, IOMUXC_GPR2, 0);
+
        imx_ldb->dev = dev;
 
        if (of_id)
@@ -652,14 +655,14 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
                if (ret || i < 0 || i > 1)
                        return -EINVAL;
 
+               if (!of_device_is_available(child))
+                       continue;
+
                if (dual && i > 0) {
                        dev_warn(dev, "dual-channel mode, ignoring second output\n");
                        continue;
                }
 
-               if (!of_device_is_available(child))
-                       continue;
-
                channel = &imx_ldb->channel[i];
                channel->ldb = imx_ldb;
                channel->chno = i;
index 32b1a6cdecfc05133147e6ff85c959f4668362f1..d3443125e66164a863fb41bfdb435a1ce13340b6 100644 (file)
@@ -197,8 +197,10 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
        priv->io_base = regs;
 
        res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hhi");
-       if (!res)
-               return -EINVAL;
+       if (!res) {
+               ret = -EINVAL;
+               goto free_drm;
+       }
        /* Simply ioremap since it may be a shared register zone */
        regs = devm_ioremap(dev, res->start, resource_size(res));
        if (!regs) {
@@ -215,8 +217,10 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
        }
 
        res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dmc");
-       if (!res)
-               return -EINVAL;
+       if (!res) {
+               ret = -EINVAL;
+               goto free_drm;
+       }
        /* Simply ioremap since it may be a shared register zone */
        regs = devm_ioremap(dev, res->start, resource_size(res));
        if (!regs) {
index 501d2d290e9c6c49072c8a57d9a3643dd286b731..70dce544984e848b54409a390a41c2a3f9c24d4f 100644 (file)
@@ -55,6 +55,9 @@ nv04_display_create(struct drm_device *dev)
        nouveau_display(dev)->init = nv04_display_init;
        nouveau_display(dev)->fini = nv04_display_fini;
 
+       /* Pre-nv50 doesn't support atomic, so don't expose the ioctls */
+       dev->driver->driver_features &= ~DRIVER_ATOMIC;
+
        nouveau_hw_save_vga_fonts(dev, 1);
 
        nv04_crtc_create(dev, 0);
index 291c08117ab65337f7a9d8567cec08207cd555db..397143b639c64ba6dbd7c1144c0314aae954339f 100644 (file)
@@ -132,7 +132,7 @@ curs507a_new_(const struct nv50_wimm_func *func, struct nouveau_drm *drm,
 
        nvif_object_map(&wndw->wimm.base.user, NULL, 0);
        wndw->immd = func;
-       wndw->ctxdma.parent = &disp->core->chan.base.user;
+       wndw->ctxdma.parent = NULL;
        return 0;
 }
 
index b83465ae7c1bcead7b81657cd2d03968e74a6baf..9bae4db84cfb8b27494388baaf3611d8815718d4 100644 (file)
@@ -1585,8 +1585,9 @@ nv50_pior_create(struct drm_connector *connector, struct dcb_output *dcbe)
  *****************************************************************************/
 
 static void
-nv50_disp_atomic_commit_core(struct nouveau_drm *drm, u32 *interlock)
+nv50_disp_atomic_commit_core(struct drm_atomic_state *state, u32 *interlock)
 {
+       struct nouveau_drm *drm = nouveau_drm(state->dev);
        struct nv50_disp *disp = nv50_disp(drm->dev);
        struct nv50_core *core = disp->core;
        struct nv50_mstm *mstm;
@@ -1617,6 +1618,22 @@ nv50_disp_atomic_commit_core(struct nouveau_drm *drm, u32 *interlock)
        }
 }
 
+static void
+nv50_disp_atomic_commit_wndw(struct drm_atomic_state *state, u32 *interlock)
+{
+       struct drm_plane_state *new_plane_state;
+       struct drm_plane *plane;
+       int i;
+
+       for_each_new_plane_in_state(state, plane, new_plane_state, i) {
+               struct nv50_wndw *wndw = nv50_wndw(plane);
+               if (interlock[wndw->interlock.type] & wndw->interlock.data) {
+                       if (wndw->func->update)
+                               wndw->func->update(wndw, interlock);
+               }
+       }
+}
+
 static void
 nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
 {
@@ -1684,7 +1701,8 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
                        help->disable(encoder);
                        interlock[NV50_DISP_INTERLOCK_CORE] |= 1;
                        if (outp->flush_disable) {
-                               nv50_disp_atomic_commit_core(drm, interlock);
+                               nv50_disp_atomic_commit_wndw(state, interlock);
+                               nv50_disp_atomic_commit_core(state, interlock);
                                memset(interlock, 0x00, sizeof(interlock));
                        }
                }
@@ -1693,15 +1711,8 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
        /* Flush disable. */
        if (interlock[NV50_DISP_INTERLOCK_CORE]) {
                if (atom->flush_disable) {
-                       for_each_new_plane_in_state(state, plane, new_plane_state, i) {
-                               struct nv50_wndw *wndw = nv50_wndw(plane);
-                               if (interlock[wndw->interlock.type] & wndw->interlock.data) {
-                                       if (wndw->func->update)
-                                               wndw->func->update(wndw, interlock);
-                               }
-                       }
-
-                       nv50_disp_atomic_commit_core(drm, interlock);
+                       nv50_disp_atomic_commit_wndw(state, interlock);
+                       nv50_disp_atomic_commit_core(state, interlock);
                        memset(interlock, 0x00, sizeof(interlock));
                }
        }
@@ -1762,18 +1773,14 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
        }
 
        /* Flush update. */
-       for_each_new_plane_in_state(state, plane, new_plane_state, i) {
-               struct nv50_wndw *wndw = nv50_wndw(plane);
-               if (interlock[wndw->interlock.type] & wndw->interlock.data) {
-                       if (wndw->func->update)
-                               wndw->func->update(wndw, interlock);
-               }
-       }
+       nv50_disp_atomic_commit_wndw(state, interlock);
 
        if (interlock[NV50_DISP_INTERLOCK_CORE]) {
                if (interlock[NV50_DISP_INTERLOCK_BASE] ||
+                   interlock[NV50_DISP_INTERLOCK_OVLY] ||
+                   interlock[NV50_DISP_INTERLOCK_WNDW] ||
                    !atom->state.legacy_cursor_update)
-                       nv50_disp_atomic_commit_core(drm, interlock);
+                       nv50_disp_atomic_commit_core(state, interlock);
                else
                        disp->core->func->update(disp->core, interlock, false);
        }
@@ -1871,7 +1878,7 @@ nv50_disp_atomic_commit(struct drm_device *dev,
                nv50_disp_atomic_commit_tail(state);
 
        drm_for_each_crtc(crtc, dev) {
-               if (crtc->state->enable) {
+               if (crtc->state->active) {
                        if (!drm->have_disp_power_ref) {
                                drm->have_disp_power_ref = true;
                                return 0;
@@ -2119,10 +2126,6 @@ nv50_display_destroy(struct drm_device *dev)
        kfree(disp);
 }
 
-MODULE_PARM_DESC(atomic, "Expose atomic ioctl (default: disabled)");
-static int nouveau_atomic = 0;
-module_param_named(atomic, nouveau_atomic, int, 0400);
-
 int
 nv50_display_create(struct drm_device *dev)
 {
@@ -2147,8 +2150,6 @@ nv50_display_create(struct drm_device *dev)
        disp->disp = &nouveau_display(dev)->disp;
        dev->mode_config.funcs = &nv50_disp_func;
        dev->driver->driver_features |= DRIVER_PREFER_XBGR_30BPP;
-       if (nouveau_atomic)
-               dev->driver->driver_features |= DRIVER_ATOMIC;
 
        /* small shared memory area we use for notifiers and semaphores */
        ret = nouveau_bo_new(&drm->client, 4096, 0x1000, TTM_PL_FLAG_VRAM,
index 224963b533a69163b39bed8cbf175e637befd591..c5a9bc1af5af79038d938de1f8cb3a85dc35dac8 100644 (file)
@@ -444,14 +444,17 @@ nv50_wndw_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state)
        if (ret)
                return ret;
 
-       ctxdma = nv50_wndw_ctxdma_new(wndw, fb);
-       if (IS_ERR(ctxdma)) {
-               nouveau_bo_unpin(fb->nvbo);
-               return PTR_ERR(ctxdma);
+       if (wndw->ctxdma.parent) {
+               ctxdma = nv50_wndw_ctxdma_new(wndw, fb);
+               if (IS_ERR(ctxdma)) {
+                       nouveau_bo_unpin(fb->nvbo);
+                       return PTR_ERR(ctxdma);
+               }
+
+               asyw->image.handle[0] = ctxdma->object.handle;
        }
 
        asyw->state.fence = reservation_object_get_excl_rcu(fb->nvbo->bo.resv);
-       asyw->image.handle[0] = ctxdma->object.handle;
        asyw->image.offset[0] = fb->nvbo->bo.offset;
 
        if (wndw->func->prepare) {
index debbbf0fd4bdda619732c67952c772f9957c4166..408b955e5c39a6b41043c18fb37ae8dc9de42c04 100644 (file)
@@ -267,6 +267,7 @@ nouveau_backlight_init(struct drm_device *dev)
        struct nouveau_drm *drm = nouveau_drm(dev);
        struct nvif_device *device = &drm->client.device;
        struct drm_connector *connector;
+       struct drm_connector_list_iter conn_iter;
 
        INIT_LIST_HEAD(&drm->bl_connectors);
 
@@ -275,7 +276,8 @@ nouveau_backlight_init(struct drm_device *dev)
                return 0;
        }
 
-       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+       drm_connector_list_iter_begin(dev, &conn_iter);
+       drm_for_each_connector_iter(connector, &conn_iter) {
                if (connector->connector_type != DRM_MODE_CONNECTOR_LVDS &&
                    connector->connector_type != DRM_MODE_CONNECTOR_eDP)
                        continue;
@@ -292,7 +294,7 @@ nouveau_backlight_init(struct drm_device *dev)
                        break;
                }
        }
-
+       drm_connector_list_iter_end(&conn_iter);
 
        return 0;
 }
index 7b557c3543079128ff339b79320c62d6bfb36334..af68eae4c626154938f523fb37d2dad133407e21 100644 (file)
@@ -1208,14 +1208,19 @@ nouveau_connector_create(struct drm_device *dev, int index)
        struct nouveau_display *disp = nouveau_display(dev);
        struct nouveau_connector *nv_connector = NULL;
        struct drm_connector *connector;
+       struct drm_connector_list_iter conn_iter;
        int type, ret = 0;
        bool dummy;
 
-       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+       drm_connector_list_iter_begin(dev, &conn_iter);
+       nouveau_for_each_non_mst_connector_iter(connector, &conn_iter) {
                nv_connector = nouveau_connector(connector);
-               if (nv_connector->index == index)
+               if (nv_connector->index == index) {
+                       drm_connector_list_iter_end(&conn_iter);
                        return connector;
+               }
        }
+       drm_connector_list_iter_end(&conn_iter);
 
        nv_connector = kzalloc(sizeof(*nv_connector), GFP_KERNEL);
        if (!nv_connector)
index a4d1a059bd3d4f948c36c0a942150c68199ec974..dc7454e7f19aa0ec9f22e279015a0966eedbd531 100644 (file)
@@ -33,6 +33,7 @@
 #include <drm/drm_encoder.h>
 #include <drm/drm_dp_helper.h>
 #include "nouveau_crtc.h"
+#include "nouveau_encoder.h"
 
 struct nvkm_i2c_port;
 
@@ -60,19 +61,46 @@ static inline struct nouveau_connector *nouveau_connector(
        return container_of(con, struct nouveau_connector, base);
 }
 
+static inline bool
+nouveau_connector_is_mst(struct drm_connector *connector)
+{
+       const struct nouveau_encoder *nv_encoder;
+       const struct drm_encoder *encoder;
+
+       if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
+               return false;
+
+       nv_encoder = find_encoder(connector, DCB_OUTPUT_ANY);
+       if (!nv_encoder)
+               return false;
+
+       encoder = &nv_encoder->base.base;
+       return encoder->encoder_type == DRM_MODE_ENCODER_DPMST;
+}
+
+#define nouveau_for_each_non_mst_connector_iter(connector, iter) \
+       drm_for_each_connector_iter(connector, iter) \
+               for_each_if(!nouveau_connector_is_mst(connector))
+
 static inline struct nouveau_connector *
 nouveau_crtc_connector_get(struct nouveau_crtc *nv_crtc)
 {
        struct drm_device *dev = nv_crtc->base.dev;
        struct drm_connector *connector;
+       struct drm_connector_list_iter conn_iter;
+       struct nouveau_connector *nv_connector = NULL;
        struct drm_crtc *crtc = to_drm_crtc(nv_crtc);
 
-       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-               if (connector->encoder && connector->encoder->crtc == crtc)
-                       return nouveau_connector(connector);
+       drm_connector_list_iter_begin(dev, &conn_iter);
+       nouveau_for_each_non_mst_connector_iter(connector, &conn_iter) {
+               if (connector->encoder && connector->encoder->crtc == crtc) {
+                       nv_connector = nouveau_connector(connector);
+                       break;
+               }
        }
+       drm_connector_list_iter_end(&conn_iter);
 
-       return NULL;
+       return nv_connector;
 }
 
 struct drm_connector *
index 774b429142bc8e22c79e6b0ed46f97f72e3ffab1..ec7861457b84a4ee4a2be3b721c0cbcbe3f66a31 100644 (file)
@@ -404,6 +404,7 @@ nouveau_display_init(struct drm_device *dev)
        struct nouveau_display *disp = nouveau_display(dev);
        struct nouveau_drm *drm = nouveau_drm(dev);
        struct drm_connector *connector;
+       struct drm_connector_list_iter conn_iter;
        int ret;
 
        ret = disp->init(dev);
@@ -411,10 +412,12 @@ nouveau_display_init(struct drm_device *dev)
                return ret;
 
        /* enable hotplug interrupts */
-       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+       drm_connector_list_iter_begin(dev, &conn_iter);
+       nouveau_for_each_non_mst_connector_iter(connector, &conn_iter) {
                struct nouveau_connector *conn = nouveau_connector(connector);
                nvif_notify_get(&conn->hpd);
        }
+       drm_connector_list_iter_end(&conn_iter);
 
        /* enable flip completion events */
        nvif_notify_get(&drm->flip);
@@ -427,6 +430,7 @@ nouveau_display_fini(struct drm_device *dev, bool suspend)
        struct nouveau_display *disp = nouveau_display(dev);
        struct nouveau_drm *drm = nouveau_drm(dev);
        struct drm_connector *connector;
+       struct drm_connector_list_iter conn_iter;
 
        if (!suspend) {
                if (drm_drv_uses_atomic_modeset(dev))
@@ -439,10 +443,12 @@ nouveau_display_fini(struct drm_device *dev, bool suspend)
        nvif_notify_put(&drm->flip);
 
        /* disable hotplug interrupts */
-       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+       drm_connector_list_iter_begin(dev, &conn_iter);
+       nouveau_for_each_non_mst_connector_iter(connector, &conn_iter) {
                struct nouveau_connector *conn = nouveau_connector(connector);
                nvif_notify_put(&conn->hpd);
        }
+       drm_connector_list_iter_end(&conn_iter);
 
        drm_kms_helper_poll_disable(dev);
        disp->fini(dev);
index 775443c9af943eb2e96ffd3187852b681fc3f916..f5d3158f0378a4537742fee31df766ce23027e76 100644 (file)
@@ -81,6 +81,10 @@ MODULE_PARM_DESC(modeset, "enable driver (default: auto, "
 int nouveau_modeset = -1;
 module_param_named(modeset, nouveau_modeset, int, 0400);
 
+MODULE_PARM_DESC(atomic, "Expose atomic ioctl (default: disabled)");
+static int nouveau_atomic = 0;
+module_param_named(atomic, nouveau_atomic, int, 0400);
+
 MODULE_PARM_DESC(runpm, "disable (0), force enable (1), optimus only default (-1)");
 static int nouveau_runtime_pm = -1;
 module_param_named(runpm, nouveau_runtime_pm, int, 0400);
@@ -509,6 +513,9 @@ static int nouveau_drm_probe(struct pci_dev *pdev,
 
        pci_set_master(pdev);
 
+       if (nouveau_atomic)
+               driver_pci.driver_features |= DRIVER_ATOMIC;
+
        ret = drm_get_pci_dev(pdev, pent, &driver_pci);
        if (ret) {
                nvkm_device_del(&device);
@@ -874,22 +881,11 @@ nouveau_pmops_runtime_resume(struct device *dev)
 static int
 nouveau_pmops_runtime_idle(struct device *dev)
 {
-       struct pci_dev *pdev = to_pci_dev(dev);
-       struct drm_device *drm_dev = pci_get_drvdata(pdev);
-       struct nouveau_drm *drm = nouveau_drm(drm_dev);
-       struct drm_crtc *crtc;
-
        if (!nouveau_pmops_runtime()) {
                pm_runtime_forbid(dev);
                return -EBUSY;
        }
 
-       list_for_each_entry(crtc, &drm->dev->mode_config.crtc_list, head) {
-               if (crtc->enabled) {
-                       DRM_DEBUG_DRIVER("failing to power off - crtc active\n");
-                       return -EBUSY;
-               }
-       }
        pm_runtime_mark_last_busy(dev);
        pm_runtime_autosuspend(dev);
        /* we don't want the main rpm_idle to call suspend - we want to autosuspend */
index 300daee74209ab82a1675e94a052089f9523c6b4..e6ccafcb9c414d5f62a98005ed95a5d1f4113b77 100644 (file)
@@ -616,7 +616,7 @@ nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
                struct nouveau_bo *nvbo;
                uint32_t data;
 
-               if (unlikely(r->bo_index > req->nr_buffers)) {
+               if (unlikely(r->bo_index >= req->nr_buffers)) {
                        NV_PRINTK(err, cli, "reloc bo index invalid\n");
                        ret = -EINVAL;
                        break;
@@ -626,7 +626,7 @@ nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
                if (b->presumed.valid)
                        continue;
 
-               if (unlikely(r->reloc_bo_index > req->nr_buffers)) {
+               if (unlikely(r->reloc_bo_index >= req->nr_buffers)) {
                        NV_PRINTK(err, cli, "reloc container bo index invalid\n");
                        ret = -EINVAL;
                        break;
index 73b5d46104bd3bfc97d8139ea640dbaa4aa3c8a6..434d2fc5bb1ce90c92c16299208ba5a290cbbe06 100644 (file)
@@ -140,6 +140,9 @@ nvkm_fb_init(struct nvkm_subdev *subdev)
        if (fb->func->init)
                fb->func->init(fb);
 
+       if (fb->func->init_remapper)
+               fb->func->init_remapper(fb);
+
        if (fb->func->init_page) {
                ret = fb->func->init_page(fb);
                if (WARN_ON(ret))
index dffe1f5e10712e6739fcfad81f589b6bd87aa30c..8205ce436b3e847663d5d44ab35c9e13885008a4 100644 (file)
@@ -36,6 +36,14 @@ gp100_fb_init_unkn(struct nvkm_fb *base)
        nvkm_wr32(device, 0x1faccc, nvkm_rd32(device, 0x100ccc));
 }
 
+void
+gp100_fb_init_remapper(struct nvkm_fb *fb)
+{
+       struct nvkm_device *device = fb->subdev.device;
+       /* Disable address remapper. */
+       nvkm_mask(device, 0x100c14, 0x00040000, 0x00000000);
+}
+
 void
 gp100_fb_init(struct nvkm_fb *base)
 {
@@ -56,6 +64,7 @@ gp100_fb = {
        .dtor = gf100_fb_dtor,
        .oneinit = gf100_fb_oneinit,
        .init = gp100_fb_init,
+       .init_remapper = gp100_fb_init_remapper,
        .init_page = gm200_fb_init_page,
        .init_unkn = gp100_fb_init_unkn,
        .ram_new = gp100_ram_new,
index b84b9861ef269e264e7838f308ff4a8d43545dc2..b4d74e81567447078fef479f49a722a8cd0f7cfc 100644 (file)
@@ -31,6 +31,7 @@ gp102_fb = {
        .dtor = gf100_fb_dtor,
        .oneinit = gf100_fb_oneinit,
        .init = gp100_fb_init,
+       .init_remapper = gp100_fb_init_remapper,
        .init_page = gm200_fb_init_page,
        .ram_new = gp100_ram_new,
 };
index 2857f31466bff2a30d3145d6b212a067428a0500..1e4ad61c19e1a2daca5600f80e0d2949db473e83 100644 (file)
@@ -11,6 +11,7 @@ struct nvkm_fb_func {
        u32 (*tags)(struct nvkm_fb *);
        int (*oneinit)(struct nvkm_fb *);
        void (*init)(struct nvkm_fb *);
+       void (*init_remapper)(struct nvkm_fb *);
        int (*init_page)(struct nvkm_fb *);
        void (*init_unkn)(struct nvkm_fb *);
        void (*intr)(struct nvkm_fb *);
@@ -69,5 +70,6 @@ int gf100_fb_init_page(struct nvkm_fb *);
 
 int gm200_fb_init_page(struct nvkm_fb *);
 
+void gp100_fb_init_remapper(struct nvkm_fb *);
 void gp100_fb_init_unkn(struct nvkm_fb *);
 #endif
index b8cda94492412c820c2d44ec0f40afa298f163b0..768207fbbae3d8d23e287428ccca34bcdfedbda2 100644 (file)
@@ -623,7 +623,7 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane,
        struct qxl_cursor_cmd *cmd;
        struct qxl_cursor *cursor;
        struct drm_gem_object *obj;
-       struct qxl_bo *cursor_bo = NULL, *user_bo = NULL;
+       struct qxl_bo *cursor_bo = NULL, *user_bo = NULL, *old_cursor_bo = NULL;
        int ret;
        void *user_ptr;
        int size = 64*64*4;
@@ -677,7 +677,7 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane,
                                                           cursor_bo, 0);
                cmd->type = QXL_CURSOR_SET;
 
-               qxl_bo_unref(&qcrtc->cursor_bo);
+               old_cursor_bo = qcrtc->cursor_bo;
                qcrtc->cursor_bo = cursor_bo;
                cursor_bo = NULL;
        } else {
@@ -697,6 +697,9 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane,
        qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
        qxl_release_fence_buffer_objects(release);
 
+       if (old_cursor_bo)
+               qxl_bo_unref(&old_cursor_bo);
+
        qxl_bo_unref(&cursor_bo);
 
        return;
index 2589f4acd5ae22ab6255e3a7baa35db2ac38c073..9c81301d0eedabd120c2cb26e1e530ce15b71205 100644 (file)
@@ -32,7 +32,10 @@ obj-$(CONFIG_DRM_SUN4I)              += sun4i-tcon.o
 obj-$(CONFIG_DRM_SUN4I)                += sun4i_tv.o
 obj-$(CONFIG_DRM_SUN4I)                += sun6i_drc.o
 
-obj-$(CONFIG_DRM_SUN4I_BACKEND)        += sun4i-backend.o sun4i-frontend.o
+obj-$(CONFIG_DRM_SUN4I_BACKEND)        += sun4i-backend.o
+ifdef CONFIG_DRM_SUN4I_BACKEND
+obj-$(CONFIG_DRM_SUN4I)                += sun4i-frontend.o
+endif
 obj-$(CONFIG_DRM_SUN4I_HDMI)   += sun4i-drm-hdmi.o
 obj-$(CONFIG_DRM_SUN6I_DSI)    += sun6i-dsi.o
 obj-$(CONFIG_DRM_SUN8I_DW_HDMI)        += sun8i-drm-hdmi.o
index 08747fc3ee713d6ba796b946103334b302a48758..8232b39e16ca700d17ebfae9ab207a0a3c4d5c78 100644 (file)
@@ -17,7 +17,6 @@
 #include <drm/drm_encoder.h>
 #include <drm/drm_modes.h>
 #include <drm/drm_of.h>
-#include <drm/drm_panel.h>
 
 #include <uapi/drm/drm_mode.h>
 
@@ -418,9 +417,6 @@ static void sun4i_tcon0_mode_set_lvds(struct sun4i_tcon *tcon,
 static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon,
                                     const struct drm_display_mode *mode)
 {
-       struct drm_panel *panel = tcon->panel;
-       struct drm_connector *connector = panel->connector;
-       struct drm_display_info display_info = connector->display_info;
        unsigned int bp, hsync, vsync;
        u8 clk_delay;
        u32 val = 0;
@@ -478,27 +474,6 @@ static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon,
        if (mode->flags & DRM_MODE_FLAG_PVSYNC)
                val |= SUN4I_TCON0_IO_POL_VSYNC_POSITIVE;
 
-       /*
-        * On A20 and similar SoCs, the only way to achieve Positive Edge
-        * (Rising Edge), is setting dclk clock phase to 2/3(240°).
-        * By default TCON works in Negative Edge(Falling Edge),
-        * this is why phase is set to 0 in that case.
-        * Unfortunately there's no way to logically invert dclk through
-        * IO_POL register.
-        * The only acceptable way to work, triple checked with scope,
-        * is using clock phase set to 0° for Negative Edge and set to 240°
-        * for Positive Edge.
-        * On A33 and similar SoCs there would be a 90° phase option,
-        * but it divides also dclk by 2.
-        * Following code is a way to avoid quirks all around TCON
-        * and DOTCLOCK drivers.
-        */
-       if (display_info.bus_flags & DRM_BUS_FLAG_PIXDATA_POSEDGE)
-               clk_set_phase(tcon->dclk, 240);
-
-       if (display_info.bus_flags & DRM_BUS_FLAG_PIXDATA_NEGEDGE)
-               clk_set_phase(tcon->dclk, 0);
-
        regmap_update_bits(tcon->regs, SUN4I_TCON0_IO_POL_REG,
                           SUN4I_TCON0_IO_POL_HSYNC_POSITIVE | SUN4I_TCON0_IO_POL_VSYNC_POSITIVE,
                           val);
index 776c1513e582827aae0764e52e376df92e884ab6..a2bd5876c633515950f23be6cdff6f0f49306312 100644 (file)
@@ -398,7 +398,7 @@ int tegra_drm_submit(struct tegra_drm_context *context,
                 * unaligned offset is malformed and cause commands stream
                 * corruption on the buffer address relocation.
                 */
-               if (offset & 3 || offset >= obj->gem.size) {
+               if (offset & 3 || offset > obj->gem.size) {
                        err = -EINVAL;
                        goto fail;
                }
index 2ebdc6d5a76e60a33d6a271ff158258a61b7908c..d5583190f3e44de77560d76c2d5a5a4cf7db62de 100644 (file)
@@ -137,7 +137,10 @@ int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
 
        if (cmd > (char *) urb->transfer_buffer) {
                /* Send partial buffer remaining before exiting */
-               int len = cmd - (char *) urb->transfer_buffer;
+               int len;
+               if (cmd < (char *) urb->transfer_buffer + urb->transfer_buffer_length)
+                       *cmd++ = 0xAF;
+               len = cmd - (char *) urb->transfer_buffer;
                ret = udl_submit_urb(dev, urb, len);
                bytes_sent += len;
        } else
index 0c87b1ac6b68f0d41cfd01851a14b9a092455f4f..b992644c17e6b565b414351b1e262d4b61c9f38d 100644 (file)
@@ -153,11 +153,11 @@ static void udl_compress_hline16(
                raw_pixels_count_byte = cmd++; /*  we'll know this later */
                raw_pixel_start = pixel;
 
-               cmd_pixel_end = pixel + (min(MAX_CMD_PIXELS + 1,
-                       min((int)(pixel_end - pixel) / bpp,
-                           (int)(cmd_buffer_end - cmd) / 2))) * bpp;
+               cmd_pixel_end = pixel + min3(MAX_CMD_PIXELS + 1UL,
+                                       (unsigned long)(pixel_end - pixel) / bpp,
+                                       (unsigned long)(cmd_buffer_end - 1 - cmd) / 2) * bpp;
 
-               prefetch_range((void *) pixel, (cmd_pixel_end - pixel) * bpp);
+               prefetch_range((void *) pixel, cmd_pixel_end - pixel);
                pixel_val16 = get_pixel_val16(pixel, bpp);
 
                while (pixel < cmd_pixel_end) {
@@ -193,6 +193,9 @@ static void udl_compress_hline16(
                if (pixel > raw_pixel_start) {
                        /* finalize last RAW span */
                        *raw_pixels_count_byte = ((pixel-raw_pixel_start) / bpp) & 0xFF;
+               } else {
+                       /* undo unused byte */
+                       cmd--;
                }
 
                *cmd_pixels_count_byte = ((pixel - cmd_pixel_start) / bpp) & 0xFF;
index 1d34619eb3fe3f57402291e7dbd05c4a193c45e1..a951ec75d01f8b1f579402512a4696af2c696533 100644 (file)
@@ -320,6 +320,9 @@ static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state)
                        vc4_state->x_scaling[0] = VC4_SCALING_TPZ;
                if (vc4_state->y_scaling[0] == VC4_SCALING_NONE)
                        vc4_state->y_scaling[0] = VC4_SCALING_TPZ;
+       } else {
+               vc4_state->x_scaling[1] = VC4_SCALING_NONE;
+               vc4_state->y_scaling[1] = VC4_SCALING_NONE;
        }
 
        vc4_state->is_unity = (vc4_state->x_scaling[0] == VC4_SCALING_NONE &&
index f1d5f76e9c33d8f31fa4e6f79f37e27e4f43c35c..d88073e7d22dddd94b33e2210fb20181dea6d245 100644 (file)
@@ -218,6 +218,9 @@ static int host1x_probe(struct platform_device *pdev)
                return err;
        }
 
+       if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL))
+               goto skip_iommu;
+
        host->group = iommu_group_get(&pdev->dev);
        if (host->group) {
                struct iommu_domain_geometry *geometry;
index e2f4a4d93d2012f3d21ea59271732c84a80189a0..527a1cddb14fd5d2a23fa96559ed0e33c54bd690 100644 (file)
@@ -569,7 +569,8 @@ void host1x_job_unpin(struct host1x_job *job)
        for (i = 0; i < job->num_unpins; i++) {
                struct host1x_job_unpin_data *unpin = &job->unpins[i];
 
-               if (!IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL) && host->domain) {
+               if (!IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL) &&
+                   unpin->size && host->domain) {
                        iommu_unmap(host->domain, job->addr_phys[i],
                                    unpin->size);
                        free_iova(&host->iova,
index caa05b0702e1671c39c15edff2e3801a37e83074..5450a2db12192dc9b90e6d34da17e3c5f6bb911c 100644 (file)
@@ -339,7 +339,8 @@ static void fill_csi_bus_cfg(struct ipu_csi_bus_config *csicfg,
                break;
        case V4L2_MBUS_BT656:
                csicfg->ext_vsync = 0;
-               if (V4L2_FIELD_HAS_BOTH(mbus_fmt->field))
+               if (V4L2_FIELD_HAS_BOTH(mbus_fmt->field) ||
+                   mbus_fmt->field == V4L2_FIELD_ALTERNATE)
                        csicfg->clk_mode = IPU_CSI_CLK_MODE_CCIR656_INTERLACED;
                else
                        csicfg->clk_mode = IPU_CSI_CLK_MODE_CCIR656_PROGRESSIVE;
index f858cc72011d183fa11892fb152e0d9b705c3059..3942ee61bd1c17e57867a7d8e5f521b5f8eae9b8 100644 (file)
@@ -1952,6 +1952,8 @@ static int hid_device_probe(struct device *dev)
        }
        hdev->io_started = false;
 
+       clear_bit(ffs(HID_STAT_REPROBED), &hdev->status);
+
        if (!hdev->driver) {
                id = hid_match_device(hdev, hdrv);
                if (id == NULL) {
@@ -2215,7 +2217,8 @@ static int __hid_bus_reprobe_drivers(struct device *dev, void *data)
        struct hid_device *hdev = to_hid_device(dev);
 
        if (hdev->driver == hdrv &&
-           !hdrv->match(hdev, hid_ignore_special_drivers))
+           !hdrv->match(hdev, hid_ignore_special_drivers) &&
+           !test_and_set_bit(ffs(HID_STAT_REPROBED), &hdev->status))
                return device_reprobe(dev);
 
        return 0;
index 8469b6964ff64e45f7807641ef8eda8197f8f81f..b48100236df890cdd1bbffa0daac97257357a38d 100644 (file)
@@ -1154,6 +1154,8 @@ copy_rest:
                        goto out;
                if (list->tail > list->head) {
                        len = list->tail - list->head;
+                       if (len > count)
+                               len = count;
 
                        if (copy_to_user(buffer + ret, &list->hid_debug_buf[list->head], len)) {
                                ret = -EFAULT;
@@ -1163,6 +1165,8 @@ copy_rest:
                        list->head += len;
                } else {
                        len = HID_DEBUG_BUFSIZE - list->head;
+                       if (len > count)
+                               len = count;
 
                        if (copy_to_user(buffer, &list->hid_debug_buf[list->head], len)) {
                                ret = -EFAULT;
@@ -1170,7 +1174,9 @@ copy_rest:
                        }
                        list->head = 0;
                        ret += len;
-                       goto copy_rest;
+                       count -= len;
+                       if (count > 0)
+                               goto copy_rest;
                }
 
        }
index 7b8e17b03cb864a7bc0ab0cbd114594b01f28c5c..6bf4da7ad63a51f3b9aa6713552c96be6042bba2 100644 (file)
@@ -124,6 +124,8 @@ static const struct hid_device_id hammer_devices[] = {
                     USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_STAFF) },
        { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
                     USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_WAND) },
+       { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
+                    USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_WHISKERS) },
        { }
 };
 MODULE_DEVICE_TABLE(hid, hammer_devices);
index a85634fe033f01bd6f9a2b41c67d027c0b55ccc1..c7981ddd8776377faa9a238b8e58d6162054b9c6 100644 (file)
 #define USB_DEVICE_ID_GOOGLE_TOUCH_ROSE        0x5028
 #define USB_DEVICE_ID_GOOGLE_STAFF     0x502b
 #define USB_DEVICE_ID_GOOGLE_WAND      0x502d
+#define USB_DEVICE_ID_GOOGLE_WHISKERS  0x5030
 
 #define USB_VENDOR_ID_GOTOP            0x08f2
 #define USB_DEVICE_ID_SUPER_Q2         0x007f
index cb86cc834201c89f660daa3509722f6fa72cb98c..0422ec2b13d208d98acdf22c5eb97b6393c5f530 100644 (file)
@@ -573,7 +573,7 @@ static bool steam_is_valve_interface(struct hid_device *hdev)
 
 static int steam_client_ll_parse(struct hid_device *hdev)
 {
-       struct steam_device *steam = hid_get_drvdata(hdev);
+       struct steam_device *steam = hdev->driver_data;
 
        return hid_parse_report(hdev, steam->hdev->dev_rdesc,
                        steam->hdev->dev_rsize);
@@ -590,7 +590,7 @@ static void steam_client_ll_stop(struct hid_device *hdev)
 
 static int steam_client_ll_open(struct hid_device *hdev)
 {
-       struct steam_device *steam = hid_get_drvdata(hdev);
+       struct steam_device *steam = hdev->driver_data;
        int ret;
 
        ret = hid_hw_open(steam->hdev);
@@ -605,7 +605,7 @@ static int steam_client_ll_open(struct hid_device *hdev)
 
 static void steam_client_ll_close(struct hid_device *hdev)
 {
-       struct steam_device *steam = hid_get_drvdata(hdev);
+       struct steam_device *steam = hdev->driver_data;
 
        mutex_lock(&steam->mutex);
        steam->client_opened = false;
@@ -623,7 +623,7 @@ static int steam_client_ll_raw_request(struct hid_device *hdev,
                                size_t count, unsigned char report_type,
                                int reqtype)
 {
-       struct steam_device *steam = hid_get_drvdata(hdev);
+       struct steam_device *steam = hdev->driver_data;
 
        return hid_hw_raw_request(steam->hdev, reportnum, buf, count,
                        report_type, reqtype);
@@ -710,7 +710,7 @@ static int steam_probe(struct hid_device *hdev,
                ret = PTR_ERR(steam->client_hdev);
                goto client_hdev_fail;
        }
-       hid_set_drvdata(steam->client_hdev, steam);
+       steam->client_hdev->driver_data = steam;
 
        /*
         * With the real steam controller interface, do not connect hidraw.
index c1652bb7bd156e298514bcc63ce818ea49e6d7c6..eae0cb3ddec668e8d2f82b1d571bb826b0fb1dd5 100644 (file)
@@ -484,7 +484,7 @@ static void i2c_hid_get_input(struct i2c_hid *ihid)
                return;
        }
 
-       if ((ret_size > size) || (ret_size <= 2)) {
+       if ((ret_size > size) || (ret_size < 2)) {
                dev_err(&ihid->client->dev, "%s: incomplete report (%d/%d)\n",
                        __func__, size, ret_size);
                return;
index 582e449be9feeeebd5924fea4a28aab4a5c2e8a2..a2c53ea3b5edfce82eeb2ef4b4e2392f5c7fb98f 100644 (file)
@@ -205,8 +205,7 @@ static void ish_remove(struct pci_dev *pdev)
        kfree(ishtp_dev);
 }
 
-#ifdef CONFIG_PM
-static struct device *ish_resume_device;
+static struct device __maybe_unused *ish_resume_device;
 
 /* 50ms to get resume response */
 #define WAIT_FOR_RESUME_ACK_MS         50
@@ -220,7 +219,7 @@ static struct device *ish_resume_device;
  * in that case a simple resume message is enough, others we need
  * a reset sequence.
  */
-static void ish_resume_handler(struct work_struct *work)
+static void __maybe_unused ish_resume_handler(struct work_struct *work)
 {
        struct pci_dev *pdev = to_pci_dev(ish_resume_device);
        struct ishtp_device *dev = pci_get_drvdata(pdev);
@@ -262,7 +261,7 @@ static void ish_resume_handler(struct work_struct *work)
  *
  * Return: 0 to the pm core
  */
-static int ish_suspend(struct device *device)
+static int __maybe_unused ish_suspend(struct device *device)
 {
        struct pci_dev *pdev = to_pci_dev(device);
        struct ishtp_device *dev = pci_get_drvdata(pdev);
@@ -288,7 +287,7 @@ static int ish_suspend(struct device *device)
        return 0;
 }
 
-static DECLARE_WORK(resume_work, ish_resume_handler);
+static __maybe_unused DECLARE_WORK(resume_work, ish_resume_handler);
 /**
  * ish_resume() - ISH resume callback
  * @device:    device pointer
@@ -297,7 +296,7 @@ static DECLARE_WORK(resume_work, ish_resume_handler);
  *
  * Return: 0 to the pm core
  */
-static int ish_resume(struct device *device)
+static int __maybe_unused ish_resume(struct device *device)
 {
        struct pci_dev *pdev = to_pci_dev(device);
        struct ishtp_device *dev = pci_get_drvdata(pdev);
@@ -311,21 +310,14 @@ static int ish_resume(struct device *device)
        return 0;
 }
 
-static const struct dev_pm_ops ish_pm_ops = {
-       .suspend = ish_suspend,
-       .resume = ish_resume,
-};
-#define ISHTP_ISH_PM_OPS       (&ish_pm_ops)
-#else
-#define ISHTP_ISH_PM_OPS       NULL
-#endif /* CONFIG_PM */
+static SIMPLE_DEV_PM_OPS(ish_pm_ops, ish_suspend, ish_resume);
 
 static struct pci_driver ish_driver = {
        .name = KBUILD_MODNAME,
        .id_table = ish_pci_tbl,
        .probe = ish_probe,
        .remove = ish_remove,
-       .driver.pm = ISHTP_ISH_PM_OPS,
+       .driver.pm = &ish_pm_ops,
 };
 
 module_pci_driver(ish_driver);
index e3ce233f8bdcc5bdcae97ffa217f65e022938b0e..23872d08308cdb5857d53b5bcdf907e20d74c345 100644 (file)
@@ -36,6 +36,7 @@
 #include <linux/hiddev.h>
 #include <linux/compat.h>
 #include <linux/vmalloc.h>
+#include <linux/nospec.h>
 #include "usbhid.h"
 
 #ifdef CONFIG_USB_DYNAMIC_MINORS
@@ -469,10 +470,14 @@ static noinline int hiddev_ioctl_usage(struct hiddev *hiddev, unsigned int cmd,
 
                if (uref->field_index >= report->maxfield)
                        goto inval;
+               uref->field_index = array_index_nospec(uref->field_index,
+                                                      report->maxfield);
 
                field = report->field[uref->field_index];
                if (uref->usage_index >= field->maxusage)
                        goto inval;
+               uref->usage_index = array_index_nospec(uref->usage_index,
+                                                      field->maxusage);
 
                uref->usage_code = field->usage[uref->usage_index].hid;
 
@@ -499,6 +504,8 @@ static noinline int hiddev_ioctl_usage(struct hiddev *hiddev, unsigned int cmd,
 
                        if (uref->field_index >= report->maxfield)
                                goto inval;
+                       uref->field_index = array_index_nospec(uref->field_index,
+                                                              report->maxfield);
 
                        field = report->field[uref->field_index];
 
@@ -753,6 +760,8 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 
                if (finfo.field_index >= report->maxfield)
                        break;
+               finfo.field_index = array_index_nospec(finfo.field_index,
+                                                      report->maxfield);
 
                field = report->field[finfo.field_index];
                memset(&finfo, 0, sizeof(finfo));
@@ -797,6 +806,8 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 
                if (cinfo.index >= hid->maxcollection)
                        break;
+               cinfo.index = array_index_nospec(cinfo.index,
+                                                hid->maxcollection);
 
                cinfo.type = hid->collection[cinfo.index].type;
                cinfo.usage = hid->collection[cinfo.index].usage;
index c101369b51de88b927fdf2295f3bb664ed415899..d6797535fff97217b477cf7c2009397dcce2d1ec 100644 (file)
@@ -395,6 +395,14 @@ static void wacom_usage_mapping(struct hid_device *hdev,
                }
        }
 
+       /* 2nd-generation Intuos Pro Large has incorrect Y maximum */
+       if (hdev->vendor == USB_VENDOR_ID_WACOM &&
+           hdev->product == 0x0358 &&
+           WACOM_PEN_FIELD(field) &&
+           wacom_equivalent_usage(usage->hid) == HID_GD_Y) {
+               field->logical_maximum = 43200;
+       }
+
        switch (usage->hid) {
        case HID_GD_X:
                features->x_max = field->logical_maximum;
index 0bb44d0088edb5f8bd8da44a23c57095db14bb68..ad7afa74d3657d902cf655ef6f1467854625b98b 100644 (file)
@@ -3365,8 +3365,14 @@ void wacom_setup_device_quirks(struct wacom *wacom)
                        if (features->type >= INTUOSHT && features->type <= BAMBOO_PT)
                                features->device_type |= WACOM_DEVICETYPE_PAD;
 
-                       features->x_max = 4096;
-                       features->y_max = 4096;
+                       if (features->type == INTUOSHT2) {
+                               features->x_max = features->x_max / 10;
+                               features->y_max = features->y_max / 10;
+                       }
+                       else {
+                               features->x_max = 4096;
+                               features->y_max = 4096;
+                       }
                }
                else if (features->pktlen == WACOM_PKGLEN_BBTOUCH) {
                        features->device_type |= WACOM_DEVICETYPE_PAD;
index f10840ad465c2bc468f0457bcaa8ec8bbf1c7d0f..ccf42663a908a493aaddf669d43fa207d9ca0931 100644 (file)
@@ -937,6 +937,18 @@ config SENSORS_MCP3021
          This driver can also be built as a module.  If so, the module
          will be called mcp3021.
 
+config SENSORS_MLXREG_FAN
+       tristate "Mellanox Mellanox FAN driver"
+       depends on MELLANOX_PLATFORM
+       imply THERMAL
+       select REGMAP
+       help
+         This option enables support for the FAN control on the Mellanox
+         Ethernet and InfiniBand switches. The driver can be activated by the
+         platform device add call. Say Y to enable these. To compile this
+         driver as a module, choose 'M' here: the module will be called
+         mlxreg-fan.
+
 config SENSORS_TC654
        tristate "Microchip TC654/TC655 and compatibles"
        depends on I2C
@@ -1256,6 +1268,16 @@ config SENSORS_NCT7904
          This driver can also be built as a module.  If so, the module
          will be called nct7904.
 
+config SENSORS_NPCM7XX
+       tristate "Nuvoton NPCM750 and compatible PWM and Fan controllers"
+       imply THERMAL
+       help
+         This driver provides support for Nuvoton NPCM750/730/715/705 PWM
+          and Fan controllers.
+
+          This driver can also be built as a module. If so, the module
+          will be called npcm750-pwm-fan.
+
 config SENSORS_NSA320
        tristate "ZyXEL NSA320 and compatible fan speed and temperature sensors"
        depends on GPIOLIB && OF
index e7d52a36e6c4f1dcea766a35d8ffa2bee1d7ae8e..842c92f83ce62875e05b07e0bf7835fd3f3da6cd 100644 (file)
@@ -129,11 +129,13 @@ obj-$(CONFIG_SENSORS_MAX31790)    += max31790.o
 obj-$(CONFIG_SENSORS_MC13783_ADC)+= mc13783-adc.o
 obj-$(CONFIG_SENSORS_MCP3021)  += mcp3021.o
 obj-$(CONFIG_SENSORS_TC654)    += tc654.o
+obj-$(CONFIG_SENSORS_MLXREG_FAN) += mlxreg-fan.o
 obj-$(CONFIG_SENSORS_MENF21BMC_HWMON) += menf21bmc_hwmon.o
 obj-$(CONFIG_SENSORS_NCT6683)  += nct6683.o
 obj-$(CONFIG_SENSORS_NCT6775)  += nct6775.o
 obj-$(CONFIG_SENSORS_NCT7802)  += nct7802.o
 obj-$(CONFIG_SENSORS_NCT7904)  += nct7904.o
+obj-$(CONFIG_SENSORS_NPCM7XX)  += npcm750-pwm-fan.o
 obj-$(CONFIG_SENSORS_NSA320)   += nsa320-hwmon.o
 obj-$(CONFIG_SENSORS_NTC_THERMISTOR)   += ntc_thermistor.o
 obj-$(CONFIG_SENSORS_PC87360)  += pc87360.o
index 9ef84998c7f382b114cd11d1c4ea04e7c916dda4..90837f7c7d0f3203ee64de86dcccd8a1e7cc26ad 100644 (file)
@@ -194,8 +194,7 @@ struct adt7475_data {
        struct mutex lock;
 
        unsigned long measure_updated;
-       unsigned long limits_updated;
-       char valid;
+       bool valid;
 
        u8 config4;
        u8 config5;
@@ -326,6 +325,9 @@ static ssize_t show_voltage(struct device *dev, struct device_attribute *attr,
        struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
        unsigned short val;
 
+       if (IS_ERR(data))
+               return PTR_ERR(data);
+
        switch (sattr->nr) {
        case ALARM:
                return sprintf(buf, "%d\n",
@@ -381,6 +383,9 @@ static ssize_t show_temp(struct device *dev, struct device_attribute *attr,
        struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
        int out;
 
+       if (IS_ERR(data))
+               return PTR_ERR(data);
+
        switch (sattr->nr) {
        case HYSTERSIS:
                mutex_lock(&data->lock);
@@ -625,6 +630,9 @@ static ssize_t show_point2(struct device *dev, struct device_attribute *attr,
        struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
        int out, val;
 
+       if (IS_ERR(data))
+               return PTR_ERR(data);
+
        mutex_lock(&data->lock);
        out = (data->range[sattr->index] >> 4) & 0x0F;
        val = reg2temp(data, data->temp[AUTOMIN][sattr->index]);
@@ -683,6 +691,9 @@ static ssize_t show_tach(struct device *dev, struct device_attribute *attr,
        struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
        int out;
 
+       if (IS_ERR(data))
+               return PTR_ERR(data);
+
        if (sattr->nr == ALARM)
                out = (data->alarms >> (sattr->index + 10)) & 1;
        else
@@ -720,6 +731,9 @@ static ssize_t show_pwm(struct device *dev, struct device_attribute *attr,
        struct adt7475_data *data = adt7475_update_device(dev);
        struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
 
+       if (IS_ERR(data))
+               return PTR_ERR(data);
+
        return sprintf(buf, "%d\n", data->pwm[sattr->nr][sattr->index]);
 }
 
@@ -729,6 +743,9 @@ static ssize_t show_pwmchan(struct device *dev, struct device_attribute *attr,
        struct adt7475_data *data = adt7475_update_device(dev);
        struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
 
+       if (IS_ERR(data))
+               return PTR_ERR(data);
+
        return sprintf(buf, "%d\n", data->pwmchan[sattr->index]);
 }
 
@@ -738,6 +755,9 @@ static ssize_t show_pwmctrl(struct device *dev, struct device_attribute *attr,
        struct adt7475_data *data = adt7475_update_device(dev);
        struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
 
+       if (IS_ERR(data))
+               return PTR_ERR(data);
+
        return sprintf(buf, "%d\n", data->pwmctl[sattr->index]);
 }
 
@@ -945,6 +965,9 @@ static ssize_t show_pwmfreq(struct device *dev, struct device_attribute *attr,
        int i = clamp_val(data->range[sattr->index] & 0xf, 0,
                          ARRAY_SIZE(pwmfreq_table) - 1);
 
+       if (IS_ERR(data))
+               return PTR_ERR(data);
+
        return sprintf(buf, "%d\n", pwmfreq_table[i]);
 }
 
@@ -1035,6 +1058,10 @@ static ssize_t cpu0_vid_show(struct device *dev,
                             struct device_attribute *devattr, char *buf)
 {
        struct adt7475_data *data = adt7475_update_device(dev);
+
+       if (IS_ERR(data))
+               return PTR_ERR(data);
+
        return sprintf(buf, "%d\n", vid_from_reg(data->vid, data->vrm));
 }
 
@@ -1385,6 +1412,121 @@ static void adt7475_remove_files(struct i2c_client *client,
                sysfs_remove_group(&client->dev.kobj, &vid_attr_group);
 }
 
+static int adt7475_update_limits(struct i2c_client *client)
+{
+       struct adt7475_data *data = i2c_get_clientdata(client);
+       int i;
+       int ret;
+
+       ret = adt7475_read(REG_CONFIG4);
+       if (ret < 0)
+               return ret;
+       data->config4 = ret;
+
+       ret = adt7475_read(REG_CONFIG5);
+       if (ret < 0)
+               return ret;
+       data->config5 = ret;
+
+       for (i = 0; i < ADT7475_VOLTAGE_COUNT; i++) {
+               if (!(data->has_voltage & (1 << i)))
+                       continue;
+               /* Adjust values so they match the input precision */
+               ret = adt7475_read(VOLTAGE_MIN_REG(i));
+               if (ret < 0)
+                       return ret;
+               data->voltage[MIN][i] = ret << 2;
+
+               ret = adt7475_read(VOLTAGE_MAX_REG(i));
+               if (ret < 0)
+                       return ret;
+               data->voltage[MAX][i] = ret << 2;
+       }
+
+       if (data->has_voltage & (1 << 5)) {
+               ret = adt7475_read(REG_VTT_MIN);
+               if (ret < 0)
+                       return ret;
+               data->voltage[MIN][5] = ret << 2;
+
+               ret = adt7475_read(REG_VTT_MAX);
+               if (ret < 0)
+                       return ret;
+               data->voltage[MAX][5] = ret << 2;
+       }
+
+       for (i = 0; i < ADT7475_TEMP_COUNT; i++) {
+               /* Adjust values so they match the input precision */
+               ret = adt7475_read(TEMP_MIN_REG(i));
+               if (ret < 0)
+                       return ret;
+               data->temp[MIN][i] = ret << 2;
+
+               ret = adt7475_read(TEMP_MAX_REG(i));
+               if (ret < 0)
+                       return ret;
+               data->temp[MAX][i] = ret << 2;
+
+               ret = adt7475_read(TEMP_TMIN_REG(i));
+               if (ret < 0)
+                       return ret;
+               data->temp[AUTOMIN][i] = ret << 2;
+
+               ret = adt7475_read(TEMP_THERM_REG(i));
+               if (ret < 0)
+                       return ret;
+               data->temp[THERM][i] = ret << 2;
+
+               ret = adt7475_read(TEMP_OFFSET_REG(i));
+               if (ret < 0)
+                       return ret;
+               data->temp[OFFSET][i] = ret;
+       }
+       adt7475_read_hystersis(client);
+
+       for (i = 0; i < ADT7475_TACH_COUNT; i++) {
+               if (i == 3 && !data->has_fan4)
+                       continue;
+               ret = adt7475_read_word(client, TACH_MIN_REG(i));
+               if (ret < 0)
+                       return ret;
+               data->tach[MIN][i] = ret;
+       }
+
+       for (i = 0; i < ADT7475_PWM_COUNT; i++) {
+               if (i == 1 && !data->has_pwm2)
+                       continue;
+               ret = adt7475_read(PWM_MAX_REG(i));
+               if (ret < 0)
+                       return ret;
+               data->pwm[MAX][i] = ret;
+
+               ret = adt7475_read(PWM_MIN_REG(i));
+               if (ret < 0)
+                       return ret;
+               data->pwm[MIN][i] = ret;
+               /* Set the channel and control information */
+               adt7475_read_pwm(client, i);
+       }
+
+       ret = adt7475_read(TEMP_TRANGE_REG(0));
+       if (ret < 0)
+               return ret;
+       data->range[0] = ret;
+
+       ret = adt7475_read(TEMP_TRANGE_REG(1));
+       if (ret < 0)
+               return ret;
+       data->range[1] = ret;
+
+       ret = adt7475_read(TEMP_TRANGE_REG(2));
+       if (ret < 0)
+               return ret;
+       data->range[2] = ret;
+
+       return 0;
+}
+
 static int adt7475_probe(struct i2c_client *client,
                         const struct i2c_device_id *id)
 {
@@ -1562,6 +1704,11 @@ static int adt7475_probe(struct i2c_client *client,
                         (data->bypass_attn & (1 << 3)) ? " in3" : "",
                         (data->bypass_attn & (1 << 4)) ? " in4" : "");
 
+       /* Limits and settings, should never change update more than once */
+       ret = adt7475_update_limits(client);
+       if (ret)
+               goto eremove;
+
        return 0;
 
 eremove:
@@ -1658,121 +1805,122 @@ static void adt7475_read_pwm(struct i2c_client *client, int index)
        }
 }
 
-static struct adt7475_data *adt7475_update_device(struct device *dev)
+static int adt7475_update_measure(struct device *dev)
 {
        struct i2c_client *client = to_i2c_client(dev);
        struct adt7475_data *data = i2c_get_clientdata(client);
        u16 ext;
        int i;
+       int ret;
 
-       mutex_lock(&data->lock);
+       ret = adt7475_read(REG_STATUS2);
+       if (ret < 0)
+               return ret;
+       data->alarms = ret << 8;
 
-       /* Measurement values update every 2 seconds */
-       if (time_after(jiffies, data->measure_updated + HZ * 2) ||
-           !data->valid) {
-               data->alarms = adt7475_read(REG_STATUS2) << 8;
-               data->alarms |= adt7475_read(REG_STATUS1);
-
-               ext = (adt7475_read(REG_EXTEND2) << 8) |
-                       adt7475_read(REG_EXTEND1);
-               for (i = 0; i < ADT7475_VOLTAGE_COUNT; i++) {
-                       if (!(data->has_voltage & (1 << i)))
-                               continue;
-                       data->voltage[INPUT][i] =
-                               (adt7475_read(VOLTAGE_REG(i)) << 2) |
-                               ((ext >> (i * 2)) & 3);
-               }
+       ret = adt7475_read(REG_STATUS1);
+       if (ret < 0)
+               return ret;
+       data->alarms |= ret;
 
-               for (i = 0; i < ADT7475_TEMP_COUNT; i++)
-                       data->temp[INPUT][i] =
-                               (adt7475_read(TEMP_REG(i)) << 2) |
-                               ((ext >> ((i + 5) * 2)) & 3);
+       ret = adt7475_read(REG_EXTEND2);
+       if (ret < 0)
+               return ret;
 
-               if (data->has_voltage & (1 << 5)) {
-                       data->alarms |= adt7475_read(REG_STATUS4) << 24;
-                       ext = adt7475_read(REG_EXTEND3);
-                       data->voltage[INPUT][5] = adt7475_read(REG_VTT) << 2 |
-                               ((ext >> 4) & 3);
-               }
+       ext = (ret << 8);
 
-               for (i = 0; i < ADT7475_TACH_COUNT; i++) {
-                       if (i == 3 && !data->has_fan4)
-                               continue;
-                       data->tach[INPUT][i] =
-                               adt7475_read_word(client, TACH_REG(i));
-               }
+       ret = adt7475_read(REG_EXTEND1);
+       if (ret < 0)
+               return ret;
 
-               /* Updated by hw when in auto mode */
-               for (i = 0; i < ADT7475_PWM_COUNT; i++) {
-                       if (i == 1 && !data->has_pwm2)
-                               continue;
-                       data->pwm[INPUT][i] = adt7475_read(PWM_REG(i));
-               }
+       ext |= ret;
+
+       for (i = 0; i < ADT7475_VOLTAGE_COUNT; i++) {
+               if (!(data->has_voltage & (1 << i)))
+                       continue;
+               ret = adt7475_read(VOLTAGE_REG(i));
+               if (ret < 0)
+                       return ret;
+               data->voltage[INPUT][i] =
+                       (ret << 2) |
+                       ((ext >> (i * 2)) & 3);
+       }
 
-               if (data->has_vid)
-                       data->vid = adt7475_read(REG_VID) & 0x3f;
+       for (i = 0; i < ADT7475_TEMP_COUNT; i++) {
+               ret = adt7475_read(TEMP_REG(i));
+               if (ret < 0)
+                       return ret;
+               data->temp[INPUT][i] =
+                       (ret << 2) |
+                       ((ext >> ((i + 5) * 2)) & 3);
+       }
 
-               data->measure_updated = jiffies;
+       if (data->has_voltage & (1 << 5)) {
+               ret = adt7475_read(REG_STATUS4);
+               if (ret < 0)
+                       return ret;
+               data->alarms |= ret << 24;
+
+               ret = adt7475_read(REG_EXTEND3);
+               if (ret < 0)
+                       return ret;
+               ext = ret;
+
+               ret = adt7475_read(REG_VTT);
+               if (ret < 0)
+                       return ret;
+               data->voltage[INPUT][5] = ret << 2 |
+                       ((ext >> 4) & 3);
        }
 
-       /* Limits and settings, should never change update every 60 seconds */
-       if (time_after(jiffies, data->limits_updated + HZ * 60) ||
-           !data->valid) {
-               data->config4 = adt7475_read(REG_CONFIG4);
-               data->config5 = adt7475_read(REG_CONFIG5);
-
-               for (i = 0; i < ADT7475_VOLTAGE_COUNT; i++) {
-                       if (!(data->has_voltage & (1 << i)))
-                               continue;
-                       /* Adjust values so they match the input precision */
-                       data->voltage[MIN][i] =
-                               adt7475_read(VOLTAGE_MIN_REG(i)) << 2;
-                       data->voltage[MAX][i] =
-                               adt7475_read(VOLTAGE_MAX_REG(i)) << 2;
-               }
+       for (i = 0; i < ADT7475_TACH_COUNT; i++) {
+               if (i == 3 && !data->has_fan4)
+                       continue;
+               ret = adt7475_read_word(client, TACH_REG(i));
+               if (ret < 0)
+                       return ret;
+               data->tach[INPUT][i] = ret;
+       }
 
-               if (data->has_voltage & (1 << 5)) {
-                       data->voltage[MIN][5] = adt7475_read(REG_VTT_MIN) << 2;
-                       data->voltage[MAX][5] = adt7475_read(REG_VTT_MAX) << 2;
-               }
+       /* Updated by hw when in auto mode */
+       for (i = 0; i < ADT7475_PWM_COUNT; i++) {
+               if (i == 1 && !data->has_pwm2)
+                       continue;
+               ret = adt7475_read(PWM_REG(i));
+               if (ret < 0)
+                       return ret;
+               data->pwm[INPUT][i] = ret;
+       }
 
-               for (i = 0; i < ADT7475_TEMP_COUNT; i++) {
-                       /* Adjust values so they match the input precision */
-                       data->temp[MIN][i] =
-                               adt7475_read(TEMP_MIN_REG(i)) << 2;
-                       data->temp[MAX][i] =
-                               adt7475_read(TEMP_MAX_REG(i)) << 2;
-                       data->temp[AUTOMIN][i] =
-                               adt7475_read(TEMP_TMIN_REG(i)) << 2;
-                       data->temp[THERM][i] =
-                               adt7475_read(TEMP_THERM_REG(i)) << 2;
-                       data->temp[OFFSET][i] =
-                               adt7475_read(TEMP_OFFSET_REG(i));
-               }
-               adt7475_read_hystersis(client);
+       if (data->has_vid) {
+               ret = adt7475_read(REG_VID);
+               if (ret < 0)
+                       return ret;
+               data->vid = ret & 0x3f;
+       }
 
-               for (i = 0; i < ADT7475_TACH_COUNT; i++) {
-                       if (i == 3 && !data->has_fan4)
-                               continue;
-                       data->tach[MIN][i] =
-                               adt7475_read_word(client, TACH_MIN_REG(i));
-               }
+       return 0;
+}
 
-               for (i = 0; i < ADT7475_PWM_COUNT; i++) {
-                       if (i == 1 && !data->has_pwm2)
-                               continue;
-                       data->pwm[MAX][i] = adt7475_read(PWM_MAX_REG(i));
-                       data->pwm[MIN][i] = adt7475_read(PWM_MIN_REG(i));
-                       /* Set the channel and control information */
-                       adt7475_read_pwm(client, i);
-               }
+static struct adt7475_data *adt7475_update_device(struct device *dev)
+{
+       struct i2c_client *client = to_i2c_client(dev);
+       struct adt7475_data *data = i2c_get_clientdata(client);
+       int ret;
 
-               data->range[0] = adt7475_read(TEMP_TRANGE_REG(0));
-               data->range[1] = adt7475_read(TEMP_TRANGE_REG(1));
-               data->range[2] = adt7475_read(TEMP_TRANGE_REG(2));
+       mutex_lock(&data->lock);
 
-               data->limits_updated = jiffies;
-               data->valid = 1;
+       /* Measurement values update every 2 seconds */
+       if (time_after(jiffies, data->measure_updated + HZ * 2) ||
+           !data->valid) {
+               ret = adt7475_update_measure(dev);
+               if (ret) {
+                       data->valid = false;
+                       mutex_unlock(&data->lock);
+                       return ERR_PTR(ret);
+               }
+               data->measure_updated = jiffies;
+               data->valid = true;
        }
 
        mutex_unlock(&data->lock);
index bf3bb7e1adab8579cf7647a24cdba12c9c828050..9d3ef879dc51e1aa08848649cfaec435a6f882fa 100644 (file)
@@ -1074,6 +1074,13 @@ static struct dmi_system_id i8k_blacklist_fan_support_dmi_table[] __initdata = {
                        DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Vostro 3360"),
                },
        },
+       {
+               .ident = "Dell XPS13 9333",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+                       DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "XPS13 9333"),
+               },
+       },
        { }
 };
 
index 1ea7ca510f84a3ca74e75464a4f95d10a22262ef..aaebeb726d6ad0c02109c3b4031cd35d9f13c512 100644 (file)
@@ -443,8 +443,10 @@ static int emc1403_probe(struct i2c_client *client,
        switch (id->driver_data) {
        case emc1404:
                data->groups[2] = &emc1404_group;
+               /* fall through */
        case emc1403:
                data->groups[1] = &emc1403_group;
+               /* fall through */
        case emc1402:
                data->groups[0] = &emc1402_group;
        }
index 69031a0f7ed2c5ff44df47e102fbd7eacea3a1fc..2f3f875c06ac174a973233bb607da7dec7086c57 100644 (file)
@@ -22,7 +22,6 @@
  * struct iio_hwmon_state - device instance state
  * @channels:          filled with array of channels from iio
  * @num_channels:      number of channels in channels (saves counting twice)
- * @hwmon_dev:         associated hwmon device
  * @attr_group:                the group of attributes
  * @groups:            null terminated array of attribute groups
  * @attrs:             null terminated array of attribute pointers.
@@ -30,7 +29,6 @@
 struct iio_hwmon_state {
        struct iio_channel *channels;
        int num_channels;
-       struct device *hwmon_dev;
        struct attribute_group attr_group;
        const struct attribute_group *groups[2];
        struct attribute **attrs;
@@ -68,12 +66,13 @@ static int iio_hwmon_probe(struct platform_device *pdev)
        enum iio_chan_type type;
        struct iio_channel *channels;
        const char *name = "iio_hwmon";
+       struct device *hwmon_dev;
        char *sname;
 
        if (dev->of_node && dev->of_node->name)
                name = dev->of_node->name;
 
-       channels = iio_channel_get_all(dev);
+       channels = devm_iio_channel_get_all(dev);
        if (IS_ERR(channels)) {
                if (PTR_ERR(channels) == -ENODEV)
                        return -EPROBE_DEFER;
@@ -81,10 +80,8 @@ static int iio_hwmon_probe(struct platform_device *pdev)
        }
 
        st = devm_kzalloc(dev, sizeof(*st), GFP_KERNEL);
-       if (st == NULL) {
-               ret = -ENOMEM;
-               goto error_release_channels;
-       }
+       if (st == NULL)
+               return -ENOMEM;
 
        st->channels = channels;
 
@@ -95,22 +92,18 @@ static int iio_hwmon_probe(struct platform_device *pdev)
        st->attrs = devm_kcalloc(dev,
                                 st->num_channels + 1, sizeof(*st->attrs),
                                 GFP_KERNEL);
-       if (st->attrs == NULL) {
-               ret = -ENOMEM;
-               goto error_release_channels;
-       }
+       if (st->attrs == NULL)
+               return -ENOMEM;
 
        for (i = 0; i < st->num_channels; i++) {
                a = devm_kzalloc(dev, sizeof(*a), GFP_KERNEL);
-               if (a == NULL) {
-                       ret = -ENOMEM;
-                       goto error_release_channels;
-               }
+               if (a == NULL)
+                       return -ENOMEM;
 
                sysfs_attr_init(&a->dev_attr.attr);
                ret = iio_get_channel_type(&st->channels[i], &type);
                if (ret < 0)
-                       goto error_release_channels;
+                       return ret;
 
                switch (type) {
                case IIO_VOLTAGE:
@@ -134,13 +127,11 @@ static int iio_hwmon_probe(struct platform_device *pdev)
                                                               humidity_i++);
                        break;
                default:
-                       ret = -EINVAL;
-                       goto error_release_channels;
-               }
-               if (a->dev_attr.attr.name == NULL) {
-                       ret = -ENOMEM;
-                       goto error_release_channels;
+                       return -EINVAL;
                }
+               if (a->dev_attr.attr.name == NULL)
+                       return -ENOMEM;
+
                a->dev_attr.show = iio_hwmon_read_val;
                a->dev_attr.attr.mode = S_IRUGO;
                a->index = i;
@@ -151,34 +142,13 @@ static int iio_hwmon_probe(struct platform_device *pdev)
        st->groups[0] = &st->attr_group;
 
        sname = devm_kstrdup(dev, name, GFP_KERNEL);
-       if (!sname) {
-               ret = -ENOMEM;
-               goto error_release_channels;
-       }
+       if (!sname)
+               return -ENOMEM;
 
        strreplace(sname, '-', '_');
-       st->hwmon_dev = hwmon_device_register_with_groups(dev, sname, st,
-                                                         st->groups);
-       if (IS_ERR(st->hwmon_dev)) {
-               ret = PTR_ERR(st->hwmon_dev);
-               goto error_release_channels;
-       }
-       platform_set_drvdata(pdev, st);
-       return 0;
-
-error_release_channels:
-       iio_channel_release_all(channels);
-       return ret;
-}
-
-static int iio_hwmon_remove(struct platform_device *pdev)
-{
-       struct iio_hwmon_state *st = platform_get_drvdata(pdev);
-
-       hwmon_device_unregister(st->hwmon_dev);
-       iio_channel_release_all(st->channels);
-
-       return 0;
+       hwmon_dev = devm_hwmon_device_register_with_groups(dev, sname, st,
+                                                          st->groups);
+       return PTR_ERR_OR_ZERO(hwmon_dev);
 }
 
 static const struct of_device_id iio_hwmon_of_match[] = {
@@ -193,7 +163,6 @@ static struct platform_driver __refdata iio_hwmon_driver = {
                .of_match_table = iio_hwmon_of_match,
        },
        .probe = iio_hwmon_probe,
-       .remove = iio_hwmon_remove,
 };
 
 module_platform_driver(iio_hwmon_driver);
index 17c6460ae35129de67bbb51be5a5e8ecff3efb28..bb15d7816a294f0fd3c9b99d4bc04195e374698e 100644 (file)
@@ -99,12 +99,8 @@ static const struct tctl_offset tctl_offset_table[] = {
        { 0x17, "AMD Ryzen 7 1700X", 20000 },
        { 0x17, "AMD Ryzen 7 1800X", 20000 },
        { 0x17, "AMD Ryzen 7 2700X", 10000 },
-       { 0x17, "AMD Ryzen Threadripper 1950X", 27000 },
-       { 0x17, "AMD Ryzen Threadripper 1920X", 27000 },
-       { 0x17, "AMD Ryzen Threadripper 1900X", 27000 },
-       { 0x17, "AMD Ryzen Threadripper 1950", 10000 },
-       { 0x17, "AMD Ryzen Threadripper 1920", 10000 },
-       { 0x17, "AMD Ryzen Threadripper 1910", 10000 },
+       { 0x17, "AMD Ryzen Threadripper 19", 27000 }, /* 19{00,20,50}X */
+       { 0x17, "AMD Ryzen Threadripper 29", 27000 }, /* 29{20,50,70,90}[W]X */
 };
 
 static void read_htcreg_pci(struct pci_dev *pdev, u32 *regval)
diff --git a/drivers/hwmon/mlxreg-fan.c b/drivers/hwmon/mlxreg-fan.c
new file mode 100644 (file)
index 0000000..de46577
--- /dev/null
@@ -0,0 +1,489 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+//
+// Copyright (c) 2018 Mellanox Technologies. All rights reserved.
+// Copyright (c) 2018 Vadim Pasternak <vadimp@mellanox.com>
+
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/hwmon.h>
+#include <linux/module.h>
+#include <linux/platform_data/mlxreg.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/thermal.h>
+
+#define MLXREG_FAN_MAX_TACHO           12
+#define MLXREG_FAN_MAX_STATE           10
+#define MLXREG_FAN_MIN_DUTY            51      /* 20% */
+#define MLXREG_FAN_MAX_DUTY            255     /* 100% */
+/*
+ * Minimum and maximum FAN allowed speed in percent: from 20% to 100%. Values
+ * MLXREG_FAN_MAX_STATE + x, where x is between 2 and 10 are used for
+ * setting FAN speed dynamic minimum. For example, if value is set to 14 (40%)
+ * cooling levels vector will be set to 4, 4, 4, 4, 4, 5, 6, 7, 8, 9, 10 to
+ * introduce PWM speed in percent: 40, 40, 40, 40, 40, 50, 60. 70, 80, 90, 100.
+ */
+#define MLXREG_FAN_SPEED_MIN                   (MLXREG_FAN_MAX_STATE + 2)
+#define MLXREG_FAN_SPEED_MAX                   (MLXREG_FAN_MAX_STATE * 2)
+#define MLXREG_FAN_SPEED_MIN_LEVEL             2       /* 20 percent */
+#define MLXREG_FAN_TACHO_SAMPLES_PER_PULSE_DEF 44
+#define MLXREG_FAN_TACHO_DIVIDER_DEF           1132
+/*
+ * FAN datasheet defines the formula for RPM calculations as RPM = 15/t-high.
+ * The logic in a programmable device measures the time t-high by sampling the
+ * tachometer every t-sample (with the default value 11.32 uS) and increment
+ * a counter (N) as long as the pulse has not change:
+ * RPM = 15 / (t-sample * (K + Regval)), where:
+ * Regval: is the value read from the programmable device register;
+ *  - 0xff - represents tachometer fault;
+ *  - 0xfe - represents tachometer minimum value , which is 4444 RPM;
+ *  - 0x00 - represents tachometer maximum value , which is 300000 RPM;
+ * K: is 44 and it represents the minimum allowed samples per pulse;
+ * N: is equal K + Regval;
+ * In order to calculate RPM from the register value the following formula is
+ * used: RPM = 15 / ((Regval + K) * 11.32) * 10^(-6)), which in  the
+ * default case is modified to:
+ * RPM = 15000000 * 100 / ((Regval + 44) * 1132);
+ * - for Regval 0x00, RPM will be 15000000 * 100 / (44 * 1132) = 30115;
+ * - for Regval 0xfe, RPM will be 15000000 * 100 / ((254 + 44) * 1132) = 4446;
+ * In common case the formula is modified to:
+ * RPM = 15000000 * 100 / ((Regval + samples) * divider).
+ */
+#define MLXREG_FAN_GET_RPM(rval, d, s) (DIV_ROUND_CLOSEST(15000000 * 100, \
+                                        ((rval) + (s)) * (d)))
+#define MLXREG_FAN_GET_FAULT(val, mask) (!!((val) ^ (mask)))
+#define MLXREG_FAN_PWM_DUTY2STATE(duty)        (DIV_ROUND_CLOSEST((duty) *     \
+                                        MLXREG_FAN_MAX_STATE,          \
+                                        MLXREG_FAN_MAX_DUTY))
+#define MLXREG_FAN_PWM_STATE2DUTY(stat)        (DIV_ROUND_CLOSEST((stat) *     \
+                                        MLXREG_FAN_MAX_DUTY,           \
+                                        MLXREG_FAN_MAX_STATE))
+
+/*
+ * struct mlxreg_fan_tacho - tachometer data (internal use):
+ *
+ * @connected: indicates if tachometer is connected;
+ * @reg: register offset;
+ * @mask: fault mask;
+ */
+struct mlxreg_fan_tacho {
+       bool connected;
+       u32 reg;
+       u32 mask;
+};
+
+/*
+ * struct mlxreg_fan_pwm - PWM data (internal use):
+ *
+ * @connected: indicates if PWM is connected;
+ * @reg: register offset;
+ */
+struct mlxreg_fan_pwm {
+       bool connected;
+       u32 reg;
+};
+
+/*
+ * struct mlxreg_fan - private data (internal use):
+ *
+ * @dev: basic device;
+ * @regmap: register map of parent device;
+ * @tacho: tachometer data;
+ * @pwm: PWM data;
+ * @samples: minimum allowed samples per pulse;
+ * @divider: divider value for tachometer RPM calculation;
+ * @cooling: cooling device levels;
+ * @cdev: cooling device;
+ */
+struct mlxreg_fan {
+       struct device *dev;
+       void *regmap;
+       struct mlxreg_core_platform_data *pdata;
+       struct mlxreg_fan_tacho tacho[MLXREG_FAN_MAX_TACHO];
+       struct mlxreg_fan_pwm pwm;
+       int samples;
+       int divider;
+       u8 cooling_levels[MLXREG_FAN_MAX_STATE + 1];
+       struct thermal_cooling_device *cdev;
+};
+
+static int
+mlxreg_fan_read(struct device *dev, enum hwmon_sensor_types type, u32 attr,
+               int channel, long *val)
+{
+       struct mlxreg_fan *fan = dev_get_drvdata(dev);
+       struct mlxreg_fan_tacho *tacho;
+       u32 regval;
+       int err;
+
+       switch (type) {
+       case hwmon_fan:
+               tacho = &fan->tacho[channel];
+               switch (attr) {
+               case hwmon_fan_input:
+                       err = regmap_read(fan->regmap, tacho->reg, &regval);
+                       if (err)
+                               return err;
+
+                       *val = MLXREG_FAN_GET_RPM(regval, fan->divider,
+                                                 fan->samples);
+                       break;
+
+               case hwmon_fan_fault:
+                       err = regmap_read(fan->regmap, tacho->reg, &regval);
+                       if (err)
+                               return err;
+
+                       *val = MLXREG_FAN_GET_FAULT(regval, tacho->mask);
+                       break;
+
+               default:
+                       return -EOPNOTSUPP;
+               }
+               break;
+
+       case hwmon_pwm:
+               switch (attr) {
+               case hwmon_pwm_input:
+                       err = regmap_read(fan->regmap, fan->pwm.reg, &regval);
+                       if (err)
+                               return err;
+
+                       *val = regval;
+                       break;
+
+               default:
+                       return -EOPNOTSUPP;
+               }
+               break;
+
+       default:
+               return -EOPNOTSUPP;
+       }
+
+       return 0;
+}
+
+static int
+mlxreg_fan_write(struct device *dev, enum hwmon_sensor_types type, u32 attr,
+                int channel, long val)
+{
+       struct mlxreg_fan *fan = dev_get_drvdata(dev);
+
+       switch (type) {
+       case hwmon_pwm:
+               switch (attr) {
+               case hwmon_pwm_input:
+                       if (val < MLXREG_FAN_MIN_DUTY ||
+                           val > MLXREG_FAN_MAX_DUTY)
+                               return -EINVAL;
+                       return regmap_write(fan->regmap, fan->pwm.reg, val);
+               default:
+                       return -EOPNOTSUPP;
+               }
+               break;
+
+       default:
+               return -EOPNOTSUPP;
+       }
+
+       return -EOPNOTSUPP;
+}
+
+static umode_t
+mlxreg_fan_is_visible(const void *data, enum hwmon_sensor_types type, u32 attr,
+                     int channel)
+{
+       switch (type) {
+       case hwmon_fan:
+               if (!(((struct mlxreg_fan *)data)->tacho[channel].connected))
+                       return 0;
+
+               switch (attr) {
+               case hwmon_fan_input:
+               case hwmon_fan_fault:
+                       return 0444;
+               default:
+                       break;
+               }
+               break;
+
+       case hwmon_pwm:
+               if (!(((struct mlxreg_fan *)data)->pwm.connected))
+                       return 0;
+
+               switch (attr) {
+               case hwmon_pwm_input:
+                       return 0644;
+               default:
+                       break;
+               }
+               break;
+
+       default:
+               break;
+       }
+
+       return 0;
+}
+
+static const u32 mlxreg_fan_hwmon_fan_config[] = {
+       HWMON_F_INPUT | HWMON_F_FAULT,
+       HWMON_F_INPUT | HWMON_F_FAULT,
+       HWMON_F_INPUT | HWMON_F_FAULT,
+       HWMON_F_INPUT | HWMON_F_FAULT,
+       HWMON_F_INPUT | HWMON_F_FAULT,
+       HWMON_F_INPUT | HWMON_F_FAULT,
+       HWMON_F_INPUT | HWMON_F_FAULT,
+       HWMON_F_INPUT | HWMON_F_FAULT,
+       HWMON_F_INPUT | HWMON_F_FAULT,
+       HWMON_F_INPUT | HWMON_F_FAULT,
+       HWMON_F_INPUT | HWMON_F_FAULT,
+       HWMON_F_INPUT | HWMON_F_FAULT,
+       0
+};
+
+static const struct hwmon_channel_info mlxreg_fan_hwmon_fan = {
+       .type = hwmon_fan,
+       .config = mlxreg_fan_hwmon_fan_config,
+};
+
+static const u32 mlxreg_fan_hwmon_pwm_config[] = {
+       HWMON_PWM_INPUT,
+       0
+};
+
+static const struct hwmon_channel_info mlxreg_fan_hwmon_pwm = {
+       .type = hwmon_pwm,
+       .config = mlxreg_fan_hwmon_pwm_config,
+};
+
+static const struct hwmon_channel_info *mlxreg_fan_hwmon_info[] = {
+       &mlxreg_fan_hwmon_fan,
+       &mlxreg_fan_hwmon_pwm,
+       NULL
+};
+
+static const struct hwmon_ops mlxreg_fan_hwmon_hwmon_ops = {
+       .is_visible = mlxreg_fan_is_visible,
+       .read = mlxreg_fan_read,
+       .write = mlxreg_fan_write,
+};
+
+static const struct hwmon_chip_info mlxreg_fan_hwmon_chip_info = {
+       .ops = &mlxreg_fan_hwmon_hwmon_ops,
+       .info = mlxreg_fan_hwmon_info,
+};
+
+static int mlxreg_fan_get_max_state(struct thermal_cooling_device *cdev,
+                                   unsigned long *state)
+{
+       *state = MLXREG_FAN_MAX_STATE;
+       return 0;
+}
+
+static int mlxreg_fan_get_cur_state(struct thermal_cooling_device *cdev,
+                                   unsigned long *state)
+
+{
+       struct mlxreg_fan *fan = cdev->devdata;
+       u32 regval;
+       int err;
+
+       err = regmap_read(fan->regmap, fan->pwm.reg, &regval);
+       if (err) {
+               dev_err(fan->dev, "Failed to query PWM duty\n");
+               return err;
+       }
+
+       *state = MLXREG_FAN_PWM_DUTY2STATE(regval);
+
+       return 0;
+}
+
+static int mlxreg_fan_set_cur_state(struct thermal_cooling_device *cdev,
+                                   unsigned long state)
+
+{
+       struct mlxreg_fan *fan = cdev->devdata;
+       unsigned long cur_state;
+       u32 regval;
+       int i;
+       int err;
+
+       /*
+        * Verify if this request is for changing allowed FAN dynamical
+        * minimum. If it is - update cooling levels accordingly and update
+        * state, if current state is below the newly requested minimum state.
+        * For example, if current state is 5, and minimal state is to be
+        * changed from 4 to 6, fan->cooling_levels[0 to 5] will be changed all
+        * from 4 to 6. And state 5 (fan->cooling_levels[4]) should be
+        * overwritten.
+        */
+       if (state >= MLXREG_FAN_SPEED_MIN && state <= MLXREG_FAN_SPEED_MAX) {
+               state -= MLXREG_FAN_MAX_STATE;
+               for (i = 0; i < state; i++)
+                       fan->cooling_levels[i] = state;
+               for (i = state; i <= MLXREG_FAN_MAX_STATE; i++)
+                       fan->cooling_levels[i] = i;
+
+               err = regmap_read(fan->regmap, fan->pwm.reg, &regval);
+               if (err) {
+                       dev_err(fan->dev, "Failed to query PWM duty\n");
+                       return err;
+               }
+
+               cur_state = MLXREG_FAN_PWM_DUTY2STATE(regval);
+               if (state < cur_state)
+                       return 0;
+
+               state = cur_state;
+       }
+
+       if (state > MLXREG_FAN_MAX_STATE)
+               return -EINVAL;
+
+       /* Normalize the state to the valid speed range. */
+       state = fan->cooling_levels[state];
+       err = regmap_write(fan->regmap, fan->pwm.reg,
+                          MLXREG_FAN_PWM_STATE2DUTY(state));
+       if (err) {
+               dev_err(fan->dev, "Failed to write PWM duty\n");
+               return err;
+       }
+       return 0;
+}
+
+static const struct thermal_cooling_device_ops mlxreg_fan_cooling_ops = {
+       .get_max_state  = mlxreg_fan_get_max_state,
+       .get_cur_state  = mlxreg_fan_get_cur_state,
+       .set_cur_state  = mlxreg_fan_set_cur_state,
+};
+
+static int mlxreg_fan_config(struct mlxreg_fan *fan,
+                            struct mlxreg_core_platform_data *pdata)
+{
+       struct mlxreg_core_data *data = pdata->data;
+       bool configured = false;
+       int tacho_num = 0, i;
+
+       fan->samples = MLXREG_FAN_TACHO_SAMPLES_PER_PULSE_DEF;
+       fan->divider = MLXREG_FAN_TACHO_DIVIDER_DEF;
+       for (i = 0; i < pdata->counter; i++, data++) {
+               if (strnstr(data->label, "tacho", sizeof(data->label))) {
+                       if (tacho_num == MLXREG_FAN_MAX_TACHO) {
+                               dev_err(fan->dev, "too many tacho entries: %s\n",
+                                       data->label);
+                               return -EINVAL;
+                       }
+                       fan->tacho[tacho_num].reg = data->reg;
+                       fan->tacho[tacho_num].mask = data->mask;
+                       fan->tacho[tacho_num++].connected = true;
+               } else if (strnstr(data->label, "pwm", sizeof(data->label))) {
+                       if (fan->pwm.connected) {
+                               dev_err(fan->dev, "duplicate pwm entry: %s\n",
+                                       data->label);
+                               return -EINVAL;
+                       }
+                       fan->pwm.reg = data->reg;
+                       fan->pwm.connected = true;
+               } else if (strnstr(data->label, "conf", sizeof(data->label))) {
+                       if (configured) {
+                               dev_err(fan->dev, "duplicate conf entry: %s\n",
+                                       data->label);
+                               return -EINVAL;
+                       }
+                       /* Validate that conf parameters are not zeros. */
+                       if (!data->mask || !data->bit) {
+                               dev_err(fan->dev, "invalid conf entry params: %s\n",
+                                       data->label);
+                               return -EINVAL;
+                       }
+                       fan->samples = data->mask;
+                       fan->divider = data->bit;
+                       configured = true;
+               } else {
+                       dev_err(fan->dev, "invalid label: %s\n", data->label);
+                       return -EINVAL;
+               }
+       }
+
+       /* Init cooling levels per PWM state. */
+       for (i = 0; i < MLXREG_FAN_SPEED_MIN_LEVEL; i++)
+               fan->cooling_levels[i] = MLXREG_FAN_SPEED_MIN_LEVEL;
+       for (i = MLXREG_FAN_SPEED_MIN_LEVEL; i <= MLXREG_FAN_MAX_STATE; i++)
+               fan->cooling_levels[i] = i;
+
+       return 0;
+}
+
+static int mlxreg_fan_probe(struct platform_device *pdev)
+{
+       struct mlxreg_core_platform_data *pdata;
+       struct mlxreg_fan *fan;
+       struct device *hwm;
+       int err;
+
+       pdata = dev_get_platdata(&pdev->dev);
+       if (!pdata) {
+               dev_err(&pdev->dev, "Failed to get platform data.\n");
+               return -EINVAL;
+       }
+
+       fan = devm_kzalloc(&pdev->dev, sizeof(*fan), GFP_KERNEL);
+       if (!fan)
+               return -ENOMEM;
+
+       fan->dev = &pdev->dev;
+       fan->regmap = pdata->regmap;
+       platform_set_drvdata(pdev, fan);
+
+       err = mlxreg_fan_config(fan, pdata);
+       if (err)
+               return err;
+
+       hwm = devm_hwmon_device_register_with_info(&pdev->dev, "mlxreg_fan",
+                                                  fan,
+                                                  &mlxreg_fan_hwmon_chip_info,
+                                                  NULL);
+       if (IS_ERR(hwm)) {
+               dev_err(&pdev->dev, "Failed to register hwmon device\n");
+               return PTR_ERR(hwm);
+       }
+
+       if (IS_REACHABLE(CONFIG_THERMAL)) {
+               fan->cdev = thermal_cooling_device_register("mlxreg_fan", fan,
+                                               &mlxreg_fan_cooling_ops);
+               if (IS_ERR(fan->cdev)) {
+                       dev_err(&pdev->dev, "Failed to register cooling device\n");
+                       return PTR_ERR(fan->cdev);
+               }
+       }
+
+       return 0;
+}
+
+static int mlxreg_fan_remove(struct platform_device *pdev)
+{
+       struct mlxreg_fan *fan = platform_get_drvdata(pdev);
+
+       if (IS_REACHABLE(CONFIG_THERMAL))
+               thermal_cooling_device_unregister(fan->cdev);
+
+       return 0;
+}
+
+static struct platform_driver mlxreg_fan_driver = {
+       .driver = {
+           .name = "mlxreg-fan",
+       },
+       .probe = mlxreg_fan_probe,
+       .remove = mlxreg_fan_remove,
+};
+
+module_platform_driver(mlxreg_fan_driver);
+
+MODULE_AUTHOR("Vadim Pasternak <vadimp@mellanox.com>");
+MODULE_DESCRIPTION("Mellanox FAN driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:mlxreg-fan");
index 155d4d1d1585af4aa7debc37163072af4979bd02..c6bd61e4695abc01a7ed909f1d27f6944c45e246 100644 (file)
@@ -1050,8 +1050,8 @@ struct nct6775_data {
        u64 beeps;
 
        u8 pwm_num;     /* number of pwm */
-       u8 pwm_mode[NUM_FAN];   /* 1->DC variable voltage,
-                                * 0->PWM variable duty cycle
+       u8 pwm_mode[NUM_FAN];   /* 0->DC variable voltage,
+                                * 1->PWM variable duty cycle
                                 */
        enum pwm_enable pwm_enable[NUM_FAN];
                        /* 0->off
@@ -2541,7 +2541,7 @@ static void pwm_update_registers(struct nct6775_data *data, int nr)
        case thermal_cruise:
                nct6775_write_value(data, data->REG_TARGET[nr],
                                    data->target_temp[nr]);
-               /* intentional */
+               /* fall through  */
        default:
                reg = nct6775_read_value(data, data->REG_FAN_MODE[nr]);
                reg = (reg & ~data->tolerance_mask) |
@@ -4175,7 +4175,7 @@ static int nct6775_probe(struct platform_device *pdev)
         * The temperature is already monitored if the respective bit in <mask>
         * is set.
         */
-       for (i = 0; i < 32; i++) {
+       for (i = 0; i < 31; i++) {
                if (!(data->temp_mask & BIT(i + 1)))
                        continue;
                if (!reg_temp_alternate[i])
index 95a68ab175c7f27ab60e8a26ab0e0cf9029d6e67..7815ddf149f60864f83b0c51986dd252415eb7d1 100644 (file)
@@ -77,7 +77,7 @@ struct nct7904_data {
 };
 
 /* Access functions */
-static int nct7904_bank_lock(struct nct7904_data *data, unsigned bank)
+static int nct7904_bank_lock(struct nct7904_data *data, unsigned int bank)
 {
        int ret;
 
@@ -99,7 +99,7 @@ static inline void nct7904_bank_release(struct nct7904_data *data)
 
 /* Read 1-byte register. Returns unsigned reg or -ERRNO on error. */
 static int nct7904_read_reg(struct nct7904_data *data,
-                           unsigned bank, unsigned reg)
+                           unsigned int bank, unsigned int reg)
 {
        struct i2c_client *client = data->client;
        int ret;
@@ -117,7 +117,7 @@ static int nct7904_read_reg(struct nct7904_data *data,
  * -ERRNO on error.
  */
 static int nct7904_read_reg16(struct nct7904_data *data,
-                             unsigned bank, unsigned reg)
+                             unsigned int bank, unsigned int reg)
 {
        struct i2c_client *client = data->client;
        int ret, hi;
@@ -139,7 +139,7 @@ static int nct7904_read_reg16(struct nct7904_data *data,
 
 /* Write 1-byte register. Returns 0 or -ERRNO on error. */
 static int nct7904_write_reg(struct nct7904_data *data,
-                            unsigned bank, unsigned reg, u8 val)
+                            unsigned int bank, unsigned int reg, u8 val)
 {
        struct i2c_client *client = data->client;
        int ret;
@@ -159,7 +159,7 @@ static int nct7904_read_fan(struct device *dev, u32 attr, int channel,
        unsigned int cnt, rpm;
        int ret;
 
-       switch(attr) {
+       switch (attr) {
        case hwmon_fan_input:
                ret = nct7904_read_reg16(data, BANK_0,
                                         FANIN1_HV_REG + channel * 2);
@@ -200,7 +200,7 @@ static int nct7904_read_in(struct device *dev, u32 attr, int channel,
 
        index = nct7904_chan_to_index[channel];
 
-       switch(attr) {
+       switch (attr) {
        case hwmon_in_input:
                ret = nct7904_read_reg16(data, BANK_0,
                                         VSEN1_HV_REG + index * 2);
@@ -236,7 +236,7 @@ static int nct7904_read_temp(struct device *dev, u32 attr, int channel,
        struct nct7904_data *data = dev_get_drvdata(dev);
        int ret, temp;
 
-       switch(attr) {
+       switch (attr) {
        case hwmon_temp_input:
                if (channel == 0)
                        ret = nct7904_read_reg16(data, BANK_0, LTD_HV_REG);
@@ -276,7 +276,7 @@ static int nct7904_read_pwm(struct device *dev, u32 attr, int channel,
        struct nct7904_data *data = dev_get_drvdata(dev);
        int ret;
 
-       switch(attr) {
+       switch (attr) {
        case hwmon_pwm_input:
                ret = nct7904_read_reg(data, BANK_3, FANCTL1_OUT_REG + channel);
                if (ret < 0)
@@ -301,7 +301,7 @@ static int nct7904_write_pwm(struct device *dev, u32 attr, int channel,
        struct nct7904_data *data = dev_get_drvdata(dev);
        int ret;
 
-       switch(attr) {
+       switch (attr) {
        case hwmon_pwm_input:
                if (val < 0 || val > 255)
                        return -EINVAL;
@@ -322,7 +322,7 @@ static int nct7904_write_pwm(struct device *dev, u32 attr, int channel,
 
 static umode_t nct7904_pwm_is_visible(const void *_data, u32 attr, int channel)
 {
-       switch(attr) {
+       switch (attr) {
        case hwmon_pwm_input:
        case hwmon_pwm_enable:
                return S_IRUGO | S_IWUSR;
@@ -431,15 +431,15 @@ static const struct hwmon_channel_info nct7904_in = {
 };
 
 static const u32 nct7904_fan_config[] = {
-            HWMON_F_INPUT,
-            HWMON_F_INPUT,
-            HWMON_F_INPUT,
-            HWMON_F_INPUT,
-            HWMON_F_INPUT,
-            HWMON_F_INPUT,
-            HWMON_F_INPUT,
-            HWMON_F_INPUT,
-           0
+       HWMON_F_INPUT,
+       HWMON_F_INPUT,
+       HWMON_F_INPUT,
+       HWMON_F_INPUT,
+       HWMON_F_INPUT,
+       HWMON_F_INPUT,
+       HWMON_F_INPUT,
+       HWMON_F_INPUT,
+       0
 };
 
 static const struct hwmon_channel_info nct7904_fan = {
@@ -448,11 +448,11 @@ static const struct hwmon_channel_info nct7904_fan = {
 };
 
 static const u32 nct7904_pwm_config[] = {
-            HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
-            HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
-            HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
-            HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
-           0
+       HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
+       HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
+       HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
+       HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
+       0
 };
 
 static const struct hwmon_channel_info nct7904_pwm = {
@@ -461,16 +461,16 @@ static const struct hwmon_channel_info nct7904_pwm = {
 };
 
 static const u32 nct7904_temp_config[] = {
-            HWMON_T_INPUT,
-            HWMON_T_INPUT,
-            HWMON_T_INPUT,
-            HWMON_T_INPUT,
-            HWMON_T_INPUT,
-            HWMON_T_INPUT,
-            HWMON_T_INPUT,
-            HWMON_T_INPUT,
-            HWMON_T_INPUT,
-           0
+       HWMON_T_INPUT,
+       HWMON_T_INPUT,
+       HWMON_T_INPUT,
+       HWMON_T_INPUT,
+       HWMON_T_INPUT,
+       HWMON_T_INPUT,
+       HWMON_T_INPUT,
+       HWMON_T_INPUT,
+       HWMON_T_INPUT,
+       0
 };
 
 static const struct hwmon_channel_info nct7904_temp = {
diff --git a/drivers/hwmon/npcm750-pwm-fan.c b/drivers/hwmon/npcm750-pwm-fan.c
new file mode 100644 (file)
index 0000000..8474d60
--- /dev/null
@@ -0,0 +1,1057 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2014-2018 Nuvoton Technology corporation.
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/sysfs.h>
+#include <linux/thermal.h>
+
+/* NPCM7XX PWM registers */
+#define NPCM7XX_PWM_REG_BASE(base, n)    ((base) + ((n) * 0x1000L))
+
+#define NPCM7XX_PWM_REG_PR(base, n)    (NPCM7XX_PWM_REG_BASE(base, n) + 0x00)
+#define NPCM7XX_PWM_REG_CSR(base, n)   (NPCM7XX_PWM_REG_BASE(base, n) + 0x04)
+#define NPCM7XX_PWM_REG_CR(base, n)    (NPCM7XX_PWM_REG_BASE(base, n) + 0x08)
+#define NPCM7XX_PWM_REG_CNRx(base, n, ch) \
+                       (NPCM7XX_PWM_REG_BASE(base, n) + 0x0C + (12 * (ch)))
+#define NPCM7XX_PWM_REG_CMRx(base, n, ch) \
+                       (NPCM7XX_PWM_REG_BASE(base, n) + 0x10 + (12 * (ch)))
+#define NPCM7XX_PWM_REG_PDRx(base, n, ch) \
+                       (NPCM7XX_PWM_REG_BASE(base, n) + 0x14 + (12 * (ch)))
+#define NPCM7XX_PWM_REG_PIER(base, n)  (NPCM7XX_PWM_REG_BASE(base, n) + 0x3C)
+#define NPCM7XX_PWM_REG_PIIR(base, n)  (NPCM7XX_PWM_REG_BASE(base, n) + 0x40)
+
+#define NPCM7XX_PWM_CTRL_CH0_MODE_BIT          BIT(3)
+#define NPCM7XX_PWM_CTRL_CH1_MODE_BIT          BIT(11)
+#define NPCM7XX_PWM_CTRL_CH2_MODE_BIT          BIT(15)
+#define NPCM7XX_PWM_CTRL_CH3_MODE_BIT          BIT(19)
+
+#define NPCM7XX_PWM_CTRL_CH0_INV_BIT           BIT(2)
+#define NPCM7XX_PWM_CTRL_CH1_INV_BIT           BIT(10)
+#define NPCM7XX_PWM_CTRL_CH2_INV_BIT           BIT(14)
+#define NPCM7XX_PWM_CTRL_CH3_INV_BIT           BIT(18)
+
+#define NPCM7XX_PWM_CTRL_CH0_EN_BIT            BIT(0)
+#define NPCM7XX_PWM_CTRL_CH1_EN_BIT            BIT(8)
+#define NPCM7XX_PWM_CTRL_CH2_EN_BIT            BIT(12)
+#define NPCM7XX_PWM_CTRL_CH3_EN_BIT            BIT(16)
+
+/* Define the maximum PWM channel number */
+#define NPCM7XX_PWM_MAX_CHN_NUM                        8
+#define NPCM7XX_PWM_MAX_CHN_NUM_IN_A_MODULE    4
+#define NPCM7XX_PWM_MAX_MODULES                 2
+
+/* Define the Counter Register, value = 100 for match 100% */
+#define NPCM7XX_PWM_COUNTER_DEFAULT_NUM                255
+#define NPCM7XX_PWM_CMR_DEFAULT_NUM            127
+#define NPCM7XX_PWM_CMR_MAX                    255
+
+/* default all PWM channels PRESCALE2 = 1 */
+#define NPCM7XX_PWM_PRESCALE2_DEFAULT_CH0      0x4
+#define NPCM7XX_PWM_PRESCALE2_DEFAULT_CH1      0x40
+#define NPCM7XX_PWM_PRESCALE2_DEFAULT_CH2      0x400
+#define NPCM7XX_PWM_PRESCALE2_DEFAULT_CH3      0x4000
+
+#define PWM_OUTPUT_FREQ_25KHZ                  25000
+#define PWN_CNT_DEFAULT                                256
+#define MIN_PRESCALE1                          2
+#define NPCM7XX_PWM_PRESCALE_SHIFT_CH01                8
+
+#define NPCM7XX_PWM_PRESCALE2_DEFAULT  (NPCM7XX_PWM_PRESCALE2_DEFAULT_CH0 | \
+                                       NPCM7XX_PWM_PRESCALE2_DEFAULT_CH1 | \
+                                       NPCM7XX_PWM_PRESCALE2_DEFAULT_CH2 | \
+                                       NPCM7XX_PWM_PRESCALE2_DEFAULT_CH3)
+
+#define NPCM7XX_PWM_CTRL_MODE_DEFAULT  (NPCM7XX_PWM_CTRL_CH0_MODE_BIT | \
+                                       NPCM7XX_PWM_CTRL_CH1_MODE_BIT | \
+                                       NPCM7XX_PWM_CTRL_CH2_MODE_BIT | \
+                                       NPCM7XX_PWM_CTRL_CH3_MODE_BIT)
+
+/* NPCM7XX FAN Tacho registers */
+#define NPCM7XX_FAN_REG_BASE(base, n)  ((base) + ((n) * 0x1000L))
+
+#define NPCM7XX_FAN_REG_TCNT1(base, n)    (NPCM7XX_FAN_REG_BASE(base, n) + 0x00)
+#define NPCM7XX_FAN_REG_TCRA(base, n)     (NPCM7XX_FAN_REG_BASE(base, n) + 0x02)
+#define NPCM7XX_FAN_REG_TCRB(base, n)     (NPCM7XX_FAN_REG_BASE(base, n) + 0x04)
+#define NPCM7XX_FAN_REG_TCNT2(base, n)    (NPCM7XX_FAN_REG_BASE(base, n) + 0x06)
+#define NPCM7XX_FAN_REG_TPRSC(base, n)    (NPCM7XX_FAN_REG_BASE(base, n) + 0x08)
+#define NPCM7XX_FAN_REG_TCKC(base, n)     (NPCM7XX_FAN_REG_BASE(base, n) + 0x0A)
+#define NPCM7XX_FAN_REG_TMCTRL(base, n)   (NPCM7XX_FAN_REG_BASE(base, n) + 0x0C)
+#define NPCM7XX_FAN_REG_TICTRL(base, n)   (NPCM7XX_FAN_REG_BASE(base, n) + 0x0E)
+#define NPCM7XX_FAN_REG_TICLR(base, n)    (NPCM7XX_FAN_REG_BASE(base, n) + 0x10)
+#define NPCM7XX_FAN_REG_TIEN(base, n)     (NPCM7XX_FAN_REG_BASE(base, n) + 0x12)
+#define NPCM7XX_FAN_REG_TCPA(base, n)     (NPCM7XX_FAN_REG_BASE(base, n) + 0x14)
+#define NPCM7XX_FAN_REG_TCPB(base, n)     (NPCM7XX_FAN_REG_BASE(base, n) + 0x16)
+#define NPCM7XX_FAN_REG_TCPCFG(base, n)   (NPCM7XX_FAN_REG_BASE(base, n) + 0x18)
+#define NPCM7XX_FAN_REG_TINASEL(base, n)  (NPCM7XX_FAN_REG_BASE(base, n) + 0x1A)
+#define NPCM7XX_FAN_REG_TINBSEL(base, n)  (NPCM7XX_FAN_REG_BASE(base, n) + 0x1C)
+
+#define NPCM7XX_FAN_TCKC_CLKX_NONE     0
+#define NPCM7XX_FAN_TCKC_CLK1_APB      BIT(0)
+#define NPCM7XX_FAN_TCKC_CLK2_APB      BIT(3)
+
+#define NPCM7XX_FAN_TMCTRL_TBEN                BIT(6)
+#define NPCM7XX_FAN_TMCTRL_TAEN                BIT(5)
+#define NPCM7XX_FAN_TMCTRL_TBEDG       BIT(4)
+#define NPCM7XX_FAN_TMCTRL_TAEDG       BIT(3)
+#define NPCM7XX_FAN_TMCTRL_MODE_5      BIT(2)
+
+#define NPCM7XX_FAN_TICLR_CLEAR_ALL    GENMASK(5, 0)
+#define NPCM7XX_FAN_TICLR_TFCLR                BIT(5)
+#define NPCM7XX_FAN_TICLR_TECLR                BIT(4)
+#define NPCM7XX_FAN_TICLR_TDCLR                BIT(3)
+#define NPCM7XX_FAN_TICLR_TCCLR                BIT(2)
+#define NPCM7XX_FAN_TICLR_TBCLR                BIT(1)
+#define NPCM7XX_FAN_TICLR_TACLR                BIT(0)
+
+#define NPCM7XX_FAN_TIEN_ENABLE_ALL    GENMASK(5, 0)
+#define NPCM7XX_FAN_TIEN_TFIEN         BIT(5)
+#define NPCM7XX_FAN_TIEN_TEIEN         BIT(4)
+#define NPCM7XX_FAN_TIEN_TDIEN         BIT(3)
+#define NPCM7XX_FAN_TIEN_TCIEN         BIT(2)
+#define NPCM7XX_FAN_TIEN_TBIEN         BIT(1)
+#define NPCM7XX_FAN_TIEN_TAIEN         BIT(0)
+
+#define NPCM7XX_FAN_TICTRL_TFPND       BIT(5)
+#define NPCM7XX_FAN_TICTRL_TEPND       BIT(4)
+#define NPCM7XX_FAN_TICTRL_TDPND       BIT(3)
+#define NPCM7XX_FAN_TICTRL_TCPND       BIT(2)
+#define NPCM7XX_FAN_TICTRL_TBPND       BIT(1)
+#define NPCM7XX_FAN_TICTRL_TAPND       BIT(0)
+
+#define NPCM7XX_FAN_TCPCFG_HIBEN       BIT(7)
+#define NPCM7XX_FAN_TCPCFG_EQBEN       BIT(6)
+#define NPCM7XX_FAN_TCPCFG_LOBEN       BIT(5)
+#define NPCM7XX_FAN_TCPCFG_CPBSEL      BIT(4)
+#define NPCM7XX_FAN_TCPCFG_HIAEN       BIT(3)
+#define NPCM7XX_FAN_TCPCFG_EQAEN       BIT(2)
+#define NPCM7XX_FAN_TCPCFG_LOAEN       BIT(1)
+#define NPCM7XX_FAN_TCPCFG_CPASEL      BIT(0)
+
+/* FAN General Definition */
+/* Define the maximum FAN channel number */
+#define NPCM7XX_FAN_MAX_MODULE                 8
+#define NPCM7XX_FAN_MAX_CHN_NUM_IN_A_MODULE    2
+#define NPCM7XX_FAN_MAX_CHN_NUM                        16
+
+/*
+ * Get Fan Tach Timeout (base on clock 214843.75Hz, 1 cnt = 4.654us)
+ * Timeout 94ms ~= 0x5000
+ * (The minimum FAN speed could to support ~640RPM/pulse 1,
+ * 320RPM/pulse 2, ...-- 10.6Hz)
+ */
+#define NPCM7XX_FAN_TIMEOUT    0x5000
+#define NPCM7XX_FAN_TCNT       0xFFFF
+#define NPCM7XX_FAN_TCPA       (NPCM7XX_FAN_TCNT - NPCM7XX_FAN_TIMEOUT)
+#define NPCM7XX_FAN_TCPB       (NPCM7XX_FAN_TCNT - NPCM7XX_FAN_TIMEOUT)
+
+#define NPCM7XX_FAN_POLL_TIMER_200MS                   200
+#define NPCM7XX_FAN_DEFAULT_PULSE_PER_REVOLUTION       2
+#define NPCM7XX_FAN_TINASEL_FANIN_DEFAULT              0
+#define NPCM7XX_FAN_CLK_PRESCALE                       255
+
+#define NPCM7XX_FAN_CMPA                               0
+#define NPCM7XX_FAN_CMPB                               1
+
+/* Obtain the fan number */
+#define NPCM7XX_FAN_INPUT(fan, cmp)            (((fan) << 1) + (cmp))
+
+/* fan sample status */
+#define FAN_DISABLE                            0xFF
+#define FAN_INIT                               0x00
+#define FAN_PREPARE_TO_GET_FIRST_CAPTURE       0x01
+#define FAN_ENOUGH_SAMPLE                      0x02
+
+struct npcm7xx_fan_dev {
+       u8 fan_st_flg;
+       u8 fan_pls_per_rev;
+       u16 fan_cnt;
+       u32 fan_cnt_tmp;
+};
+
+struct npcm7xx_cooling_device {
+       char name[THERMAL_NAME_LENGTH];
+       struct npcm7xx_pwm_fan_data *data;
+       struct thermal_cooling_device *tcdev;
+       int pwm_port;
+       u8 *cooling_levels;
+       u8 max_state;
+       u8 cur_state;
+};
+
+struct npcm7xx_pwm_fan_data {
+       void __iomem *pwm_base;
+       void __iomem *fan_base;
+       unsigned long pwm_clk_freq;
+       unsigned long fan_clk_freq;
+       struct clk *pwm_clk;
+       struct clk *fan_clk;
+       struct mutex pwm_lock[NPCM7XX_PWM_MAX_MODULES];
+       spinlock_t fan_lock[NPCM7XX_FAN_MAX_MODULE];
+       int fan_irq[NPCM7XX_FAN_MAX_MODULE];
+       bool pwm_present[NPCM7XX_PWM_MAX_CHN_NUM];
+       bool fan_present[NPCM7XX_FAN_MAX_CHN_NUM];
+       u32 input_clk_freq;
+       struct timer_list fan_timer;
+       struct npcm7xx_fan_dev fan_dev[NPCM7XX_FAN_MAX_CHN_NUM];
+       struct npcm7xx_cooling_device *cdev[NPCM7XX_PWM_MAX_CHN_NUM];
+       u8 fan_select;
+};
+
+static int npcm7xx_pwm_config_set(struct npcm7xx_pwm_fan_data *data,
+                                 int channel, u16 val)
+{
+       u32 pwm_ch = (channel % NPCM7XX_PWM_MAX_CHN_NUM_IN_A_MODULE);
+       u32 module = (channel / NPCM7XX_PWM_MAX_CHN_NUM_IN_A_MODULE);
+       u32 tmp_buf, ctrl_en_bit, env_bit;
+
+       /*
+        * Config PWM Comparator register for setting duty cycle
+        */
+       mutex_lock(&data->pwm_lock[module]);
+
+       /* write new CMR value  */
+       iowrite32(val, NPCM7XX_PWM_REG_CMRx(data->pwm_base, module, pwm_ch));
+       tmp_buf = ioread32(NPCM7XX_PWM_REG_CR(data->pwm_base, module));
+
+       switch (pwm_ch) {
+       case 0:
+               ctrl_en_bit = NPCM7XX_PWM_CTRL_CH0_EN_BIT;
+               env_bit = NPCM7XX_PWM_CTRL_CH0_INV_BIT;
+               break;
+       case 1:
+               ctrl_en_bit = NPCM7XX_PWM_CTRL_CH1_EN_BIT;
+               env_bit = NPCM7XX_PWM_CTRL_CH1_INV_BIT;
+               break;
+       case 2:
+               ctrl_en_bit = NPCM7XX_PWM_CTRL_CH2_EN_BIT;
+               env_bit = NPCM7XX_PWM_CTRL_CH2_INV_BIT;
+               break;
+       case 3:
+               ctrl_en_bit = NPCM7XX_PWM_CTRL_CH3_EN_BIT;
+               env_bit = NPCM7XX_PWM_CTRL_CH3_INV_BIT;
+               break;
+       default:
+               mutex_unlock(&data->pwm_lock[module]);
+               return -ENODEV;
+       }
+
+       if (val == 0) {
+               /* Disable PWM */
+               tmp_buf &= ~ctrl_en_bit;
+               tmp_buf |= env_bit;
+       } else {
+               /* Enable PWM */
+               tmp_buf |= ctrl_en_bit;
+               tmp_buf &= ~env_bit;
+       }
+
+       iowrite32(tmp_buf, NPCM7XX_PWM_REG_CR(data->pwm_base, module));
+       mutex_unlock(&data->pwm_lock[module]);
+
+       return 0;
+}
+
+static inline void npcm7xx_fan_start_capture(struct npcm7xx_pwm_fan_data *data,
+                                            u8 fan, u8 cmp)
+{
+       u8 fan_id;
+       u8 reg_mode;
+       u8 reg_int;
+       unsigned long flags;
+
+       fan_id = NPCM7XX_FAN_INPUT(fan, cmp);
+
+       /* to check whether any fan tach is enable */
+       if (data->fan_dev[fan_id].fan_st_flg != FAN_DISABLE) {
+               /* reset status */
+               spin_lock_irqsave(&data->fan_lock[fan], flags);
+
+               data->fan_dev[fan_id].fan_st_flg = FAN_INIT;
+               reg_int = ioread8(NPCM7XX_FAN_REG_TIEN(data->fan_base, fan));
+
+               /*
+                * the interrupt enable bits do not need to be cleared before
+                * it sets, the interrupt enable bits are cleared only on reset.
+                * the clock unit control register is behaving in the same
+                * manner that the interrupt enable register behave.
+                */
+               if (cmp == NPCM7XX_FAN_CMPA) {
+                       /* enable interrupt */
+                       iowrite8(reg_int | (NPCM7XX_FAN_TIEN_TAIEN |
+                                           NPCM7XX_FAN_TIEN_TEIEN),
+                                NPCM7XX_FAN_REG_TIEN(data->fan_base, fan));
+
+                       reg_mode = NPCM7XX_FAN_TCKC_CLK1_APB
+                               | ioread8(NPCM7XX_FAN_REG_TCKC(data->fan_base,
+                                                              fan));
+
+                       /* start to Capture */
+                       iowrite8(reg_mode, NPCM7XX_FAN_REG_TCKC(data->fan_base,
+                                                               fan));
+               } else {
+                       /* enable interrupt */
+                       iowrite8(reg_int | (NPCM7XX_FAN_TIEN_TBIEN |
+                                           NPCM7XX_FAN_TIEN_TFIEN),
+                                NPCM7XX_FAN_REG_TIEN(data->fan_base, fan));
+
+                       reg_mode =
+                               NPCM7XX_FAN_TCKC_CLK2_APB
+                               | ioread8(NPCM7XX_FAN_REG_TCKC(data->fan_base,
+                                                              fan));
+
+                       /* start to Capture */
+                       iowrite8(reg_mode,
+                                NPCM7XX_FAN_REG_TCKC(data->fan_base, fan));
+               }
+
+               spin_unlock_irqrestore(&data->fan_lock[fan], flags);
+       }
+}
+
+/*
+ * Enable a background timer to poll fan tach value, (200ms * 4)
+ * to polling all fan
+ */
+static void npcm7xx_fan_polling(struct timer_list *t)
+{
+       struct npcm7xx_pwm_fan_data *data;
+       int i;
+
+       data = from_timer(data, t, fan_timer);
+
+       /*
+        * Polling two module per one round,
+        * FAN01 & FAN89 / FAN23 & FAN1011 / FAN45 & FAN1213 / FAN67 & FAN1415
+        */
+       for (i = data->fan_select; i < NPCM7XX_FAN_MAX_MODULE;
+             i = i + 4) {
+               /* clear the flag and reset the counter (TCNT) */
+               iowrite8(NPCM7XX_FAN_TICLR_CLEAR_ALL,
+                        NPCM7XX_FAN_REG_TICLR(data->fan_base, i));
+
+               if (data->fan_present[i * 2]) {
+                       iowrite16(NPCM7XX_FAN_TCNT,
+                                 NPCM7XX_FAN_REG_TCNT1(data->fan_base, i));
+                       npcm7xx_fan_start_capture(data, i, NPCM7XX_FAN_CMPA);
+               }
+               if (data->fan_present[(i * 2) + 1]) {
+                       iowrite16(NPCM7XX_FAN_TCNT,
+                                 NPCM7XX_FAN_REG_TCNT2(data->fan_base, i));
+                       npcm7xx_fan_start_capture(data, i, NPCM7XX_FAN_CMPB);
+               }
+       }
+
+       data->fan_select++;
+       data->fan_select &= 0x3;
+
+       /* reset the timer interval */
+       data->fan_timer.expires = jiffies +
+               msecs_to_jiffies(NPCM7XX_FAN_POLL_TIMER_200MS);
+       add_timer(&data->fan_timer);
+}
+
+static inline void npcm7xx_fan_compute(struct npcm7xx_pwm_fan_data *data,
+                                      u8 fan, u8 cmp, u8 fan_id, u8 flag_int,
+                                      u8 flag_mode, u8 flag_clear)
+{
+       u8  reg_int;
+       u8  reg_mode;
+       u16 fan_cap;
+
+       if (cmp == NPCM7XX_FAN_CMPA)
+               fan_cap = ioread16(NPCM7XX_FAN_REG_TCRA(data->fan_base, fan));
+       else
+               fan_cap = ioread16(NPCM7XX_FAN_REG_TCRB(data->fan_base, fan));
+
+       /* clear capature flag, H/W will auto reset the NPCM7XX_FAN_TCNTx */
+       iowrite8(flag_clear, NPCM7XX_FAN_REG_TICLR(data->fan_base, fan));
+
+       if (data->fan_dev[fan_id].fan_st_flg == FAN_INIT) {
+               /* First capture, drop it */
+               data->fan_dev[fan_id].fan_st_flg =
+                       FAN_PREPARE_TO_GET_FIRST_CAPTURE;
+
+               /* reset counter */
+               data->fan_dev[fan_id].fan_cnt_tmp = 0;
+       } else if (data->fan_dev[fan_id].fan_st_flg < FAN_ENOUGH_SAMPLE) {
+               /*
+                * collect the enough sample,
+                * (ex: 2 pulse fan need to get 2 sample)
+                */
+               data->fan_dev[fan_id].fan_cnt_tmp +=
+                       (NPCM7XX_FAN_TCNT - fan_cap);
+
+               data->fan_dev[fan_id].fan_st_flg++;
+       } else {
+               /* get enough sample or fan disable */
+               if (data->fan_dev[fan_id].fan_st_flg == FAN_ENOUGH_SAMPLE) {
+                       data->fan_dev[fan_id].fan_cnt_tmp +=
+                               (NPCM7XX_FAN_TCNT - fan_cap);
+
+                       /* compute finial average cnt per pulse */
+                       data->fan_dev[fan_id].fan_cnt =
+                               data->fan_dev[fan_id].fan_cnt_tmp /
+                               FAN_ENOUGH_SAMPLE;
+
+                       data->fan_dev[fan_id].fan_st_flg = FAN_INIT;
+               }
+
+               reg_int =  ioread8(NPCM7XX_FAN_REG_TIEN(data->fan_base, fan));
+
+               /* disable interrupt */
+               iowrite8((reg_int & ~flag_int),
+                        NPCM7XX_FAN_REG_TIEN(data->fan_base, fan));
+               reg_mode =  ioread8(NPCM7XX_FAN_REG_TCKC(data->fan_base, fan));
+
+               /* stop capturing */
+               iowrite8((reg_mode & ~flag_mode),
+                        NPCM7XX_FAN_REG_TCKC(data->fan_base, fan));
+       }
+}
+
+static inline void npcm7xx_check_cmp(struct npcm7xx_pwm_fan_data *data,
+                                    u8 fan, u8 cmp, u8 flag)
+{
+       u8 reg_int;
+       u8 reg_mode;
+       u8 flag_timeout;
+       u8 flag_cap;
+       u8 flag_clear;
+       u8 flag_int;
+       u8 flag_mode;
+       u8 fan_id;
+
+       fan_id = NPCM7XX_FAN_INPUT(fan, cmp);
+
+       if (cmp == NPCM7XX_FAN_CMPA) {
+               flag_cap = NPCM7XX_FAN_TICTRL_TAPND;
+               flag_timeout = NPCM7XX_FAN_TICTRL_TEPND;
+               flag_int = NPCM7XX_FAN_TIEN_TAIEN | NPCM7XX_FAN_TIEN_TEIEN;
+               flag_mode = NPCM7XX_FAN_TCKC_CLK1_APB;
+               flag_clear = NPCM7XX_FAN_TICLR_TACLR | NPCM7XX_FAN_TICLR_TECLR;
+       } else {
+               flag_cap = NPCM7XX_FAN_TICTRL_TBPND;
+               flag_timeout = NPCM7XX_FAN_TICTRL_TFPND;
+               flag_int = NPCM7XX_FAN_TIEN_TBIEN | NPCM7XX_FAN_TIEN_TFIEN;
+               flag_mode = NPCM7XX_FAN_TCKC_CLK2_APB;
+               flag_clear = NPCM7XX_FAN_TICLR_TBCLR | NPCM7XX_FAN_TICLR_TFCLR;
+       }
+
+       if (flag & flag_timeout) {
+               reg_int =  ioread8(NPCM7XX_FAN_REG_TIEN(data->fan_base, fan));
+
+               /* disable interrupt */
+               iowrite8((reg_int & ~flag_int),
+                        NPCM7XX_FAN_REG_TIEN(data->fan_base, fan));
+
+               /* clear interrupt flag */
+               iowrite8(flag_clear,
+                        NPCM7XX_FAN_REG_TICLR(data->fan_base, fan));
+
+               reg_mode =  ioread8(NPCM7XX_FAN_REG_TCKC(data->fan_base, fan));
+
+               /* stop capturing */
+               iowrite8((reg_mode & ~flag_mode),
+                        NPCM7XX_FAN_REG_TCKC(data->fan_base, fan));
+
+               /*
+                *  If timeout occurs (NPCM7XX_FAN_TIMEOUT), the fan doesn't
+                *  connect or speed is lower than 10.6Hz (320RPM/pulse2).
+                *  In these situation, the RPM output should be zero.
+                */
+               data->fan_dev[fan_id].fan_cnt = 0;
+       } else {
+           /* input capture is occurred */
+               if (flag & flag_cap)
+                       npcm7xx_fan_compute(data, fan, cmp, fan_id, flag_int,
+                                           flag_mode, flag_clear);
+       }
+}
+
+static irqreturn_t npcm7xx_fan_isr(int irq, void *dev_id)
+{
+       struct npcm7xx_pwm_fan_data *data = dev_id;
+       unsigned long flags;
+       int module;
+       u8 flag;
+
+       module = irq - data->fan_irq[0];
+       spin_lock_irqsave(&data->fan_lock[module], flags);
+
+       flag = ioread8(NPCM7XX_FAN_REG_TICTRL(data->fan_base, module));
+       if (flag > 0) {
+               npcm7xx_check_cmp(data, module, NPCM7XX_FAN_CMPA, flag);
+               npcm7xx_check_cmp(data, module, NPCM7XX_FAN_CMPB, flag);
+               spin_unlock_irqrestore(&data->fan_lock[module], flags);
+               return IRQ_HANDLED;
+       }
+
+       spin_unlock_irqrestore(&data->fan_lock[module], flags);
+
+       return IRQ_NONE;
+}
+
+static int npcm7xx_read_pwm(struct device *dev, u32 attr, int channel,
+                           long *val)
+{
+       struct npcm7xx_pwm_fan_data *data = dev_get_drvdata(dev);
+       u32 pmw_ch = (channel % NPCM7XX_PWM_MAX_CHN_NUM_IN_A_MODULE);
+       u32 module = (channel / NPCM7XX_PWM_MAX_CHN_NUM_IN_A_MODULE);
+
+       switch (attr) {
+       case hwmon_pwm_input:
+               *val = ioread32
+                       (NPCM7XX_PWM_REG_CMRx(data->pwm_base, module, pmw_ch));
+               return 0;
+       default:
+               return -EOPNOTSUPP;
+       }
+}
+
+static int npcm7xx_write_pwm(struct device *dev, u32 attr, int channel,
+                            long val)
+{
+       struct npcm7xx_pwm_fan_data *data = dev_get_drvdata(dev);
+       int err;
+
+       switch (attr) {
+       case hwmon_pwm_input:
+               if (val < 0 || val > NPCM7XX_PWM_CMR_MAX)
+                       return -EINVAL;
+               err = npcm7xx_pwm_config_set(data, channel, (u16)val);
+               break;
+       default:
+               err = -EOPNOTSUPP;
+               break;
+       }
+
+       return err;
+}
+
+static umode_t npcm7xx_pwm_is_visible(const void *_data, u32 attr, int channel)
+{
+       const struct npcm7xx_pwm_fan_data *data = _data;
+
+       if (!data->pwm_present[channel])
+               return 0;
+
+       switch (attr) {
+       case hwmon_pwm_input:
+               return 0644;
+       default:
+               return 0;
+       }
+}
+
+static int npcm7xx_read_fan(struct device *dev, u32 attr, int channel,
+                           long *val)
+{
+       struct npcm7xx_pwm_fan_data *data = dev_get_drvdata(dev);
+
+       switch (attr) {
+       case hwmon_fan_input:
+               *val = 0;
+               if (data->fan_dev[channel].fan_cnt <= 0)
+                       return data->fan_dev[channel].fan_cnt;
+
+               /* Convert the raw reading to RPM */
+               if (data->fan_dev[channel].fan_cnt > 0 &&
+                   data->fan_dev[channel].fan_pls_per_rev > 0)
+                       *val = ((data->input_clk_freq * 60) /
+                               (data->fan_dev[channel].fan_cnt *
+                                data->fan_dev[channel].fan_pls_per_rev));
+               return 0;
+       default:
+               return -EOPNOTSUPP;
+       }
+}
+
+static umode_t npcm7xx_fan_is_visible(const void *_data, u32 attr, int channel)
+{
+       const struct npcm7xx_pwm_fan_data *data = _data;
+
+       if (!data->fan_present[channel])
+               return 0;
+
+       switch (attr) {
+       case hwmon_fan_input:
+               return 0444;
+       default:
+               return 0;
+       }
+}
+
+static int npcm7xx_read(struct device *dev, enum hwmon_sensor_types type,
+                       u32 attr, int channel, long *val)
+{
+       switch (type) {
+       case hwmon_pwm:
+               return npcm7xx_read_pwm(dev, attr, channel, val);
+       case hwmon_fan:
+               return npcm7xx_read_fan(dev, attr, channel, val);
+       default:
+               return -EOPNOTSUPP;
+       }
+}
+
+static int npcm7xx_write(struct device *dev, enum hwmon_sensor_types type,
+                        u32 attr, int channel, long val)
+{
+       switch (type) {
+       case hwmon_pwm:
+               return npcm7xx_write_pwm(dev, attr, channel, val);
+       default:
+               return -EOPNOTSUPP;
+       }
+}
+
+static umode_t npcm7xx_is_visible(const void *data,
+                                 enum hwmon_sensor_types type,
+                                 u32 attr, int channel)
+{
+       switch (type) {
+       case hwmon_pwm:
+               return npcm7xx_pwm_is_visible(data, attr, channel);
+       case hwmon_fan:
+               return npcm7xx_fan_is_visible(data, attr, channel);
+       default:
+               return 0;
+       }
+}
+
+static const u32 npcm7xx_pwm_config[] = {
+       HWMON_PWM_INPUT,
+       HWMON_PWM_INPUT,
+       HWMON_PWM_INPUT,
+       HWMON_PWM_INPUT,
+       HWMON_PWM_INPUT,
+       HWMON_PWM_INPUT,
+       HWMON_PWM_INPUT,
+       HWMON_PWM_INPUT,
+       0
+};
+
+static const struct hwmon_channel_info npcm7xx_pwm = {
+       .type = hwmon_pwm,
+       .config = npcm7xx_pwm_config,
+};
+
+static const u32 npcm7xx_fan_config[] = {
+       HWMON_F_INPUT,
+       HWMON_F_INPUT,
+       HWMON_F_INPUT,
+       HWMON_F_INPUT,
+       HWMON_F_INPUT,
+       HWMON_F_INPUT,
+       HWMON_F_INPUT,
+       HWMON_F_INPUT,
+       HWMON_F_INPUT,
+       HWMON_F_INPUT,
+       HWMON_F_INPUT,
+       HWMON_F_INPUT,
+       HWMON_F_INPUT,
+       HWMON_F_INPUT,
+       HWMON_F_INPUT,
+       HWMON_F_INPUT,
+       0
+};
+
+static const struct hwmon_channel_info npcm7xx_fan = {
+       .type = hwmon_fan,
+       .config = npcm7xx_fan_config,
+};
+
+static const struct hwmon_channel_info *npcm7xx_info[] = {
+       &npcm7xx_pwm,
+       &npcm7xx_fan,
+       NULL
+};
+
+static const struct hwmon_ops npcm7xx_hwmon_ops = {
+       .is_visible = npcm7xx_is_visible,
+       .read = npcm7xx_read,
+       .write = npcm7xx_write,
+};
+
+static const struct hwmon_chip_info npcm7xx_chip_info = {
+       .ops = &npcm7xx_hwmon_ops,
+       .info = npcm7xx_info,
+};
+
+static u32 npcm7xx_pwm_init(struct npcm7xx_pwm_fan_data *data)
+{
+       int m, ch;
+       u32 prescale_val, output_freq;
+
+       data->pwm_clk_freq = clk_get_rate(data->pwm_clk);
+
+       /* Adjust NPCM7xx PWMs output frequency to ~25Khz */
+       output_freq = data->pwm_clk_freq / PWN_CNT_DEFAULT;
+       prescale_val = DIV_ROUND_CLOSEST(output_freq, PWM_OUTPUT_FREQ_25KHZ);
+
+       /* If prescale_val = 0, then the prescale output clock is stopped */
+       if (prescale_val < MIN_PRESCALE1)
+               prescale_val = MIN_PRESCALE1;
+       /*
+        * prescale_val need to decrement in one because in the PWM Prescale
+        * register the Prescale value increment by one
+        */
+       prescale_val--;
+
+       /* Setting PWM Prescale Register value register to both modules */
+       prescale_val |= (prescale_val << NPCM7XX_PWM_PRESCALE_SHIFT_CH01);
+
+       for (m = 0; m < NPCM7XX_PWM_MAX_MODULES  ; m++) {
+               iowrite32(prescale_val, NPCM7XX_PWM_REG_PR(data->pwm_base, m));
+               iowrite32(NPCM7XX_PWM_PRESCALE2_DEFAULT,
+                         NPCM7XX_PWM_REG_CSR(data->pwm_base, m));
+               iowrite32(NPCM7XX_PWM_CTRL_MODE_DEFAULT,
+                         NPCM7XX_PWM_REG_CR(data->pwm_base, m));
+
+               for (ch = 0; ch < NPCM7XX_PWM_MAX_CHN_NUM_IN_A_MODULE; ch++) {
+                       iowrite32(NPCM7XX_PWM_COUNTER_DEFAULT_NUM,
+                                 NPCM7XX_PWM_REG_CNRx(data->pwm_base, m, ch));
+               }
+       }
+
+       return output_freq / ((prescale_val & 0xf) + 1);
+}
+
+static void npcm7xx_fan_init(struct npcm7xx_pwm_fan_data *data)
+{
+       int md;
+       int ch;
+       int i;
+       u32 apb_clk_freq;
+
+       for (md = 0; md < NPCM7XX_FAN_MAX_MODULE; md++) {
+               /* stop FAN0~7 clock */
+               iowrite8(NPCM7XX_FAN_TCKC_CLKX_NONE,
+                        NPCM7XX_FAN_REG_TCKC(data->fan_base, md));
+
+               /* disable all interrupt */
+               iowrite8(0x00, NPCM7XX_FAN_REG_TIEN(data->fan_base, md));
+
+               /* clear all interrupt */
+               iowrite8(NPCM7XX_FAN_TICLR_CLEAR_ALL,
+                        NPCM7XX_FAN_REG_TICLR(data->fan_base, md));
+
+               /* set FAN0~7 clock prescaler */
+               iowrite8(NPCM7XX_FAN_CLK_PRESCALE,
+                        NPCM7XX_FAN_REG_TPRSC(data->fan_base, md));
+
+               /* set FAN0~7 mode (high-to-low transition) */
+               iowrite8((NPCM7XX_FAN_TMCTRL_MODE_5 | NPCM7XX_FAN_TMCTRL_TBEN |
+                         NPCM7XX_FAN_TMCTRL_TAEN),
+                        NPCM7XX_FAN_REG_TMCTRL(data->fan_base, md));
+
+               /* set FAN0~7 Initial Count/Cap */
+               iowrite16(NPCM7XX_FAN_TCNT,
+                         NPCM7XX_FAN_REG_TCNT1(data->fan_base, md));
+               iowrite16(NPCM7XX_FAN_TCNT,
+                         NPCM7XX_FAN_REG_TCNT2(data->fan_base, md));
+
+               /* set FAN0~7 compare (equal to count) */
+               iowrite8((NPCM7XX_FAN_TCPCFG_EQAEN | NPCM7XX_FAN_TCPCFG_EQBEN),
+                        NPCM7XX_FAN_REG_TCPCFG(data->fan_base, md));
+
+               /* set FAN0~7 compare value */
+               iowrite16(NPCM7XX_FAN_TCPA,
+                         NPCM7XX_FAN_REG_TCPA(data->fan_base, md));
+               iowrite16(NPCM7XX_FAN_TCPB,
+                         NPCM7XX_FAN_REG_TCPB(data->fan_base, md));
+
+               /* set FAN0~7 fan input FANIN 0~15 */
+               iowrite8(NPCM7XX_FAN_TINASEL_FANIN_DEFAULT,
+                        NPCM7XX_FAN_REG_TINASEL(data->fan_base, md));
+               iowrite8(NPCM7XX_FAN_TINASEL_FANIN_DEFAULT,
+                        NPCM7XX_FAN_REG_TINBSEL(data->fan_base, md));
+
+               for (i = 0; i < NPCM7XX_FAN_MAX_CHN_NUM_IN_A_MODULE; i++) {
+                       ch = md * NPCM7XX_FAN_MAX_CHN_NUM_IN_A_MODULE + i;
+                       data->fan_dev[ch].fan_st_flg = FAN_DISABLE;
+                       data->fan_dev[ch].fan_pls_per_rev =
+                               NPCM7XX_FAN_DEFAULT_PULSE_PER_REVOLUTION;
+                       data->fan_dev[ch].fan_cnt = 0;
+               }
+       }
+
+       apb_clk_freq = clk_get_rate(data->fan_clk);
+
+       /* Fan tach input clock = APB clock / prescalar, default is 255. */
+       data->input_clk_freq = apb_clk_freq / (NPCM7XX_FAN_CLK_PRESCALE + 1);
+}
+
+static int
+npcm7xx_pwm_cz_get_max_state(struct thermal_cooling_device *tcdev,
+                            unsigned long *state)
+{
+       struct npcm7xx_cooling_device *cdev = tcdev->devdata;
+
+       *state = cdev->max_state;
+
+       return 0;
+}
+
+static int
+npcm7xx_pwm_cz_get_cur_state(struct thermal_cooling_device *tcdev,
+                            unsigned long *state)
+{
+       struct npcm7xx_cooling_device *cdev = tcdev->devdata;
+
+       *state = cdev->cur_state;
+
+       return 0;
+}
+
+static int
+npcm7xx_pwm_cz_set_cur_state(struct thermal_cooling_device *tcdev,
+                            unsigned long state)
+{
+       struct npcm7xx_cooling_device *cdev = tcdev->devdata;
+       int ret;
+
+       if (state > cdev->max_state)
+               return -EINVAL;
+
+       cdev->cur_state = state;
+       ret = npcm7xx_pwm_config_set(cdev->data, cdev->pwm_port,
+                                    cdev->cooling_levels[cdev->cur_state]);
+
+       return ret;
+}
+
+static const struct thermal_cooling_device_ops npcm7xx_pwm_cool_ops = {
+       .get_max_state = npcm7xx_pwm_cz_get_max_state,
+       .get_cur_state = npcm7xx_pwm_cz_get_cur_state,
+       .set_cur_state = npcm7xx_pwm_cz_set_cur_state,
+};
+
+static int npcm7xx_create_pwm_cooling(struct device *dev,
+                                     struct device_node *child,
+                                     struct npcm7xx_pwm_fan_data *data,
+                                     u32 pwm_port, u8 num_levels)
+{
+       int ret;
+       struct npcm7xx_cooling_device *cdev;
+
+       cdev = devm_kzalloc(dev, sizeof(*cdev), GFP_KERNEL);
+       if (!cdev)
+               return -ENOMEM;
+
+       cdev->cooling_levels = devm_kzalloc(dev, num_levels, GFP_KERNEL);
+       if (!cdev->cooling_levels)
+               return -ENOMEM;
+
+       cdev->max_state = num_levels - 1;
+       ret = of_property_read_u8_array(child, "cooling-levels",
+                                       cdev->cooling_levels,
+                                       num_levels);
+       if (ret) {
+               dev_err(dev, "Property 'cooling-levels' cannot be read.\n");
+               return ret;
+       }
+       snprintf(cdev->name, THERMAL_NAME_LENGTH, "%s%d", child->name,
+                pwm_port);
+
+       cdev->tcdev = thermal_of_cooling_device_register(child,
+                                                        cdev->name,
+                                                        cdev,
+                                                        &npcm7xx_pwm_cool_ops);
+       if (IS_ERR(cdev->tcdev))
+               return PTR_ERR(cdev->tcdev);
+
+       cdev->data = data;
+       cdev->pwm_port = pwm_port;
+
+       data->cdev[pwm_port] = cdev;
+
+       return 0;
+}
+
+static int npcm7xx_en_pwm_fan(struct device *dev,
+                             struct device_node *child,
+                             struct npcm7xx_pwm_fan_data *data)
+{
+       u8 *fan_ch;
+       u32 pwm_port;
+       int ret, fan_cnt;
+       u8 index, ch;
+
+       ret = of_property_read_u32(child, "reg", &pwm_port);
+       if (ret)
+               return ret;
+
+       data->pwm_present[pwm_port] = true;
+       ret = npcm7xx_pwm_config_set(data, pwm_port,
+                                    NPCM7XX_PWM_CMR_DEFAULT_NUM);
+
+       ret = of_property_count_u8_elems(child, "cooling-levels");
+       if (ret > 0) {
+               ret = npcm7xx_create_pwm_cooling(dev, child, data, pwm_port,
+                                                ret);
+               if (ret)
+                       return ret;
+       }
+
+       fan_cnt = of_property_count_u8_elems(child, "fan-tach-ch");
+       if (fan_cnt < 1)
+               return -EINVAL;
+
+       fan_ch = devm_kzalloc(dev, sizeof(*fan_ch) * fan_cnt, GFP_KERNEL);
+       if (!fan_ch)
+               return -ENOMEM;
+
+       ret = of_property_read_u8_array(child, "fan-tach-ch", fan_ch, fan_cnt);
+       if (ret)
+               return ret;
+
+       for (ch = 0; ch < fan_cnt; ch++) {
+               index = fan_ch[ch];
+               data->fan_present[index] = true;
+               data->fan_dev[index].fan_st_flg = FAN_INIT;
+       }
+
+       return 0;
+}
+
+static int npcm7xx_pwm_fan_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct device_node *np, *child;
+       struct npcm7xx_pwm_fan_data *data;
+       struct resource *res;
+       struct device *hwmon;
+       char name[20];
+       int ret, cnt;
+       u32 output_freq;
+       u32 i;
+
+       np = dev->of_node;
+
+       data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+       if (!data)
+               return -ENOMEM;
+
+       res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pwm");
+       if (!res) {
+               dev_err(dev, "pwm resource not found\n");
+               return -ENODEV;
+       }
+
+       data->pwm_base = devm_ioremap_resource(dev, res);
+       dev_dbg(dev, "pwm base resource is %pR\n", res);
+       if (IS_ERR(data->pwm_base))
+               return PTR_ERR(data->pwm_base);
+
+       data->pwm_clk = devm_clk_get(dev, "pwm");
+       if (IS_ERR(data->pwm_clk)) {
+               dev_err(dev, "couldn't get pwm clock\n");
+               return PTR_ERR(data->pwm_clk);
+       }
+
+       res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fan");
+       if (!res) {
+               dev_err(dev, "fan resource not found\n");
+               return -ENODEV;
+       }
+
+       data->fan_base = devm_ioremap_resource(dev, res);
+       dev_dbg(dev, "fan base resource is %pR\n", res);
+       if (IS_ERR(data->fan_base))
+               return PTR_ERR(data->fan_base);
+
+       data->fan_clk = devm_clk_get(dev, "fan");
+       if (IS_ERR(data->fan_clk)) {
+               dev_err(dev, "couldn't get fan clock\n");
+               return PTR_ERR(data->fan_clk);
+       }
+
+       output_freq = npcm7xx_pwm_init(data);
+       npcm7xx_fan_init(data);
+
+       for (cnt = 0; cnt < NPCM7XX_PWM_MAX_MODULES  ; cnt++)
+               mutex_init(&data->pwm_lock[cnt]);
+
+       for (i = 0; i < NPCM7XX_FAN_MAX_MODULE; i++) {
+               spin_lock_init(&data->fan_lock[i]);
+
+               data->fan_irq[i] = platform_get_irq(pdev, i);
+               if (data->fan_irq[i] < 0) {
+                       dev_err(dev, "get IRQ fan%d failed\n", i);
+                       return data->fan_irq[i];
+               }
+
+               sprintf(name, "NPCM7XX-FAN-MD%d", i);
+               ret = devm_request_irq(dev, data->fan_irq[i], npcm7xx_fan_isr,
+                                      0, name, (void *)data);
+               if (ret) {
+                       dev_err(dev, "register IRQ fan%d failed\n", i);
+                       return ret;
+               }
+       }
+
+       for_each_child_of_node(np, child) {
+               ret = npcm7xx_en_pwm_fan(dev, child, data);
+               if (ret) {
+                       dev_err(dev, "enable pwm and fan failed\n");
+                       of_node_put(child);
+                       return ret;
+               }
+       }
+
+       hwmon = devm_hwmon_device_register_with_info(dev, "npcm7xx_pwm_fan",
+                                                    data, &npcm7xx_chip_info,
+                                                    NULL);
+       if (IS_ERR(hwmon)) {
+               dev_err(dev, "unable to register hwmon device\n");
+               return PTR_ERR(hwmon);
+       }
+
+       for (i = 0; i < NPCM7XX_FAN_MAX_CHN_NUM; i++) {
+               if (data->fan_present[i]) {
+                       /* fan timer initialization */
+                       data->fan_timer.expires = jiffies +
+                               msecs_to_jiffies(NPCM7XX_FAN_POLL_TIMER_200MS);
+                       timer_setup(&data->fan_timer,
+                                   npcm7xx_fan_polling, 0);
+                       add_timer(&data->fan_timer);
+                       break;
+               }
+       }
+
+       pr_info("NPCM7XX PWM-FAN Driver probed, output Freq %dHz[PWM], input Freq %dHz[FAN]\n",
+               output_freq, data->input_clk_freq);
+
+       return 0;
+}
+
+static const struct of_device_id of_pwm_fan_match_table[] = {
+       { .compatible = "nuvoton,npcm750-pwm-fan", },
+       {},
+};
+MODULE_DEVICE_TABLE(of, of_pwm_fan_match_table);
+
+static struct platform_driver npcm7xx_pwm_fan_driver = {
+       .probe          = npcm7xx_pwm_fan_probe,
+       .driver         = {
+               .name   = "npcm7xx_pwm_fan",
+               .of_match_table = of_pwm_fan_match_table,
+       },
+};
+
+module_platform_driver(npcm7xx_pwm_fan_driver);
+
+MODULE_DESCRIPTION("Nuvoton NPCM7XX PWM and Fan Tacho driver");
+MODULE_AUTHOR("Tomer Maimon <tomer.maimon@nuvoton.com>");
+MODULE_LICENSE("GPL v2");
index e71aec69e76ef366330f4d147984e8e65072ec29..a82018aaf4736582ec62ba329a80cefeae13b153 100644 (file)
@@ -130,7 +130,7 @@ config SENSORS_MAX34440
        default n
        help
          If you say yes here you get hardware monitoring support for Maxim
-         MAX34440, MAX34441, MAX34446, MAX34460, and MAX34461.
+         MAX34440, MAX34441, MAX34446, MAX34451, MAX34460, and MAX34461.
 
          This driver can also be built as a module. If so, the module will
          be called max34440.
index 74a1f6f68fb384fc02c530aa27bce7f243107969..47576c4600105cf0d9de5424548ef9495c2b0a60 100644 (file)
@@ -27,7 +27,7 @@
 #include <linux/i2c.h>
 #include "pmbus.h"
 
-enum chips { max34440, max34441, max34446, max34460, max34461 };
+enum chips { max34440, max34441, max34446, max34451, max34460, max34461 };
 
 #define MAX34440_MFR_VOUT_PEAK         0xd4
 #define MAX34440_MFR_IOUT_PEAK         0xd5
@@ -44,6 +44,9 @@ enum chips { max34440, max34441, max34446, max34460, max34461 };
 #define MAX34440_STATUS_OT_FAULT       BIT(5)
 #define MAX34440_STATUS_OT_WARN                BIT(6)
 
+#define MAX34451_MFR_CHANNEL_CONFIG    0xe4
+#define MAX34451_MFR_CHANNEL_CONFIG_SEL_MASK   0x3f
+
 struct max34440_data {
        int id;
        struct pmbus_driver_info info;
@@ -67,7 +70,7 @@ static int max34440_read_word_data(struct i2c_client *client, int page, int reg)
                                           MAX34440_MFR_VOUT_PEAK);
                break;
        case PMBUS_VIRT_READ_IOUT_AVG:
-               if (data->id != max34446)
+               if (data->id != max34446 && data->id != max34451)
                        return -ENXIO;
                ret = pmbus_read_word_data(client, page,
                                           MAX34446_MFR_IOUT_AVG);
@@ -143,7 +146,7 @@ static int max34440_write_word_data(struct i2c_client *client, int page,
        case PMBUS_VIRT_RESET_IOUT_HISTORY:
                ret = pmbus_write_word_data(client, page,
                                            MAX34440_MFR_IOUT_PEAK, 0);
-               if (!ret && data->id == max34446)
+               if (!ret && (data->id == max34446 || data->id == max34451))
                        ret = pmbus_write_word_data(client, page,
                                        MAX34446_MFR_IOUT_AVG, 0);
 
@@ -202,6 +205,58 @@ static int max34440_read_byte_data(struct i2c_client *client, int page, int reg)
        return ret;
 }
 
+static int max34451_set_supported_funcs(struct i2c_client *client,
+                                        struct max34440_data *data)
+{
+       /*
+        * Each of the channel 0-15 can be configured to monitor the following
+        * functions based on MFR_CHANNEL_CONFIG[5:0]
+        * 0x10: Sequencing + voltage monitoring (only valid for PAGES 0–11)
+        * 0x20: Voltage monitoring (no sequencing)
+        * 0x21: Voltage read only
+        * 0x22: Current monitoring
+        * 0x23: Current read only
+        * 0x30: General-purpose input active low
+        * 0x34: General-purpose input active high
+        * 0x00:  Disabled
+        */
+
+       int page, rv;
+
+       for (page = 0; page < 16; page++) {
+               rv = i2c_smbus_write_byte_data(client, PMBUS_PAGE, page);
+               if (rv < 0)
+                       return rv;
+
+               rv = i2c_smbus_read_word_data(client,
+                                             MAX34451_MFR_CHANNEL_CONFIG);
+               if (rv < 0)
+                       return rv;
+
+               switch (rv & MAX34451_MFR_CHANNEL_CONFIG_SEL_MASK) {
+               case 0x10:
+               case 0x20:
+                       data->info.func[page] = PMBUS_HAVE_VOUT |
+                               PMBUS_HAVE_STATUS_VOUT;
+                       break;
+               case 0x21:
+                       data->info.func[page] = PMBUS_HAVE_VOUT;
+                       break;
+               case 0x22:
+                       data->info.func[page] = PMBUS_HAVE_IOUT |
+                               PMBUS_HAVE_STATUS_IOUT;
+                       break;
+               case 0x23:
+                       data->info.func[page] = PMBUS_HAVE_IOUT;
+                       break;
+               default:
+                       break;
+               }
+       }
+
+       return 0;
+}
+
 static struct pmbus_driver_info max34440_info[] = {
        [max34440] = {
                .pages = 14,
@@ -325,6 +380,30 @@ static struct pmbus_driver_info max34440_info[] = {
                .read_word_data = max34440_read_word_data,
                .write_word_data = max34440_write_word_data,
        },
+       [max34451] = {
+               .pages = 21,
+               .format[PSC_VOLTAGE_OUT] = direct,
+               .format[PSC_TEMPERATURE] = direct,
+               .format[PSC_CURRENT_OUT] = direct,
+               .m[PSC_VOLTAGE_OUT] = 1,
+               .b[PSC_VOLTAGE_OUT] = 0,
+               .R[PSC_VOLTAGE_OUT] = 3,
+               .m[PSC_CURRENT_OUT] = 1,
+               .b[PSC_CURRENT_OUT] = 0,
+               .R[PSC_CURRENT_OUT] = 2,
+               .m[PSC_TEMPERATURE] = 1,
+               .b[PSC_TEMPERATURE] = 0,
+               .R[PSC_TEMPERATURE] = 2,
+               /* func 0-15 is set dynamically before probing */
+               .func[16] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+               .func[17] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+               .func[18] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+               .func[19] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+               .func[20] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+               .read_byte_data = max34440_read_byte_data,
+               .read_word_data = max34440_read_word_data,
+               .write_word_data = max34440_write_word_data,
+       },
        [max34460] = {
                .pages = 18,
                .format[PSC_VOLTAGE_OUT] = direct,
@@ -398,6 +477,7 @@ static int max34440_probe(struct i2c_client *client,
                          const struct i2c_device_id *id)
 {
        struct max34440_data *data;
+       int rv;
 
        data = devm_kzalloc(&client->dev, sizeof(struct max34440_data),
                            GFP_KERNEL);
@@ -406,6 +486,12 @@ static int max34440_probe(struct i2c_client *client,
        data->id = id->driver_data;
        data->info = max34440_info[id->driver_data];
 
+       if (data->id == max34451) {
+               rv = max34451_set_supported_funcs(client, data);
+               if (rv)
+                       return rv;
+       }
+
        return pmbus_do_probe(client, id, &data->info);
 }
 
@@ -413,6 +499,7 @@ static const struct i2c_device_id max34440_id[] = {
        {"max34440", max34440},
        {"max34441", max34441},
        {"max34446", max34446},
+       {"max34451", max34451},
        {"max34460", max34460},
        {"max34461", max34461},
        {}
index 4a34f311e1ff4df2cd6cdfcff7ea1662c95a06eb..6ec65adaba49569ab7b9775f856859a0fcfbd967 100644 (file)
@@ -647,10 +647,10 @@ static int __i2c_bit_add_bus(struct i2c_adapter *adap,
        if (bit_adap->getscl == NULL)
                adap->quirks = &i2c_bit_quirk_no_clk_stretch;
 
-       /* Bring bus to a known state. Looks like STOP if bus is not free yet */
-       setscl(bit_adap, 1);
-       udelay(bit_adap->udelay);
-       setsda(bit_adap, 1);
+       /*
+        * We tried forcing SCL/SDA to an initial state here. But that caused a
+        * regression, sadly. Check Bugzilla #200045 for details.
+        */
 
        ret = add_adapter(adap);
        if (ret < 0)
index 44cffad43701f4839096bbde5c5937ee22cce135..c4d176f5ed793c76c78c412d081c21bc8dff2327 100644 (file)
@@ -234,7 +234,8 @@ static const struct irq_chip cht_wc_i2c_irq_chip = {
        .name                   = "cht_wc_ext_chrg_irq_chip",
 };
 
-static const char * const bq24190_suppliers[] = { "fusb302-typec-source" };
+static const char * const bq24190_suppliers[] = {
+       "tcpm-source-psy-i2c-fusb302" };
 
 static const struct property_entry bq24190_props[] = {
        PROPERTY_ENTRY_STRING_ARRAY("supplied-from", bq24190_suppliers),
index 75d6ab177055efa2119635eb56845388ac3a0848..7379043711dfa89455abd65bce74ac08707073f9 100644 (file)
@@ -237,12 +237,16 @@ static void i2c_davinci_calc_clk_dividers(struct davinci_i2c_dev *dev)
        /*
         * It's not always possible to have 1 to 2 ratio when d=7, so fall back
         * to minimal possible clkh in this case.
+        *
+        * Note:
+        * CLKH is not allowed to be 0, in this case I2C clock is not generated
+        * at all
         */
-       if (clk >= clkl + d) {
+       if (clk > clkl + d) {
                clkh = clk - clkl - d;
                clkl -= d;
        } else {
-               clkh = 0;
+               clkh = 1;
                clkl = clk - (d << 1);
        }
 
index 005e6e0330c278276a0d602fcfebdc3429218cfd..66f85bbf35917161cc36e4ffb308d78b8401c0cb 100644 (file)
@@ -279,9 +279,9 @@ static int i2c_gpio_probe(struct platform_device *pdev)
         * required for an I2C bus.
         */
        if (pdata->scl_is_open_drain)
-               gflags = GPIOD_OUT_LOW;
+               gflags = GPIOD_OUT_HIGH;
        else
-               gflags = GPIOD_OUT_LOW_OPEN_DRAIN;
+               gflags = GPIOD_OUT_HIGH_OPEN_DRAIN;
        priv->scl = i2c_gpio_get_desc(dev, "scl", 1, gflags);
        if (IS_ERR(priv->scl))
                return PTR_ERR(priv->scl);
index 0207e194f84bb4e667d4ebec548987f40caffdd1..498c5e89164988beb8d82e5f4bc53aac82b137ac 100644 (file)
@@ -368,6 +368,7 @@ static int i2c_imx_dma_xfer(struct imx_i2c_struct *i2c_imx,
                goto err_desc;
        }
 
+       reinit_completion(&dma->cmd_complete);
        txdesc->callback = i2c_imx_dma_callback;
        txdesc->callback_param = i2c_imx;
        if (dma_submit_error(dmaengine_submit(txdesc))) {
@@ -622,7 +623,6 @@ static int i2c_imx_dma_write(struct imx_i2c_struct *i2c_imx,
         * The first byte must be transmitted by the CPU.
         */
        imx_i2c_write_reg(i2c_8bit_addr_from_msg(msgs), i2c_imx, IMX_I2C_I2DR);
-       reinit_completion(&i2c_imx->dma->cmd_complete);
        time_left = wait_for_completion_timeout(
                                &i2c_imx->dma->cmd_complete,
                                msecs_to_jiffies(DMA_TIMEOUT));
@@ -681,7 +681,6 @@ static int i2c_imx_dma_read(struct imx_i2c_struct *i2c_imx,
        if (result)
                return result;
 
-       reinit_completion(&i2c_imx->dma->cmd_complete);
        time_left = wait_for_completion_timeout(
                                &i2c_imx->dma->cmd_complete,
                                msecs_to_jiffies(DMA_TIMEOUT));
@@ -1010,7 +1009,7 @@ static int i2c_imx_init_recovery_info(struct imx_i2c_struct *i2c_imx,
        i2c_imx->pinctrl_pins_gpio = pinctrl_lookup_state(i2c_imx->pinctrl,
                        "gpio");
        rinfo->sda_gpiod = devm_gpiod_get(&pdev->dev, "sda", GPIOD_IN);
-       rinfo->scl_gpiod = devm_gpiod_get(&pdev->dev, "scl", GPIOD_OUT_HIGH);
+       rinfo->scl_gpiod = devm_gpiod_get(&pdev->dev, "scl", GPIOD_OUT_HIGH_OPEN_DRAIN);
 
        if (PTR_ERR(rinfo->sda_gpiod) == -EPROBE_DEFER ||
            PTR_ERR(rinfo->scl_gpiod) == -EPROBE_DEFER) {
index 5e310efd94464897d9db7becf89294a8b2adc4f6..3c1c817f6968e43fdafd73ff6f53d381c2528b96 100644 (file)
@@ -32,6 +32,7 @@
 #include <linux/of_device.h>
 #include <linux/platform_device.h>
 #include <linux/pm_runtime.h>
+#include <linux/reset.h>
 #include <linux/slab.h>
 
 /* register offsets */
 #define ID_ARBLOST     (1 << 3)
 #define ID_NACK                (1 << 4)
 /* persistent flags */
+#define ID_P_NO_RXDMA  (1 << 30) /* HW forbids RXDMA sometimes */
 #define ID_P_PM_BLOCKED        (1 << 31)
-#define ID_P_MASK      ID_P_PM_BLOCKED
+#define ID_P_MASK      (ID_P_PM_BLOCKED | ID_P_NO_RXDMA)
 
 enum rcar_i2c_type {
        I2C_RCAR_GEN1,
@@ -141,6 +143,8 @@ struct rcar_i2c_priv {
        struct dma_chan *dma_rx;
        struct scatterlist sg;
        enum dma_data_direction dma_direction;
+
+       struct reset_control *rstc;
 };
 
 #define rcar_i2c_priv_to_dev(p)                ((p)->adap.dev.parent)
@@ -370,6 +374,11 @@ static void rcar_i2c_dma_unmap(struct rcar_i2c_priv *priv)
        dma_unmap_single(chan->device->dev, sg_dma_address(&priv->sg),
                         sg_dma_len(&priv->sg), priv->dma_direction);
 
+       /* Gen3 can only do one RXDMA per transfer and we just completed it */
+       if (priv->devtype == I2C_RCAR_GEN3 &&
+           priv->dma_direction == DMA_FROM_DEVICE)
+               priv->flags |= ID_P_NO_RXDMA;
+
        priv->dma_direction = DMA_NONE;
 }
 
@@ -407,8 +416,9 @@ static void rcar_i2c_dma(struct rcar_i2c_priv *priv)
        unsigned char *buf;
        int len;
 
-       /* Do not use DMA if it's not available or for messages < 8 bytes */
-       if (IS_ERR(chan) || msg->len < 8 || !(msg->flags & I2C_M_DMA_SAFE))
+       /* Do various checks to see if DMA is feasible at all */
+       if (IS_ERR(chan) || msg->len < 8 || !(msg->flags & I2C_M_DMA_SAFE) ||
+           (read && priv->flags & ID_P_NO_RXDMA))
                return;
 
        if (read) {
@@ -739,6 +749,25 @@ static void rcar_i2c_release_dma(struct rcar_i2c_priv *priv)
        }
 }
 
+/* I2C is a special case, we need to poll the status of a reset */
+static int rcar_i2c_do_reset(struct rcar_i2c_priv *priv)
+{
+       int i, ret;
+
+       ret = reset_control_reset(priv->rstc);
+       if (ret)
+               return ret;
+
+       for (i = 0; i < LOOP_TIMEOUT; i++) {
+               ret = reset_control_status(priv->rstc);
+               if (ret == 0)
+                       return 0;
+               udelay(1);
+       }
+
+       return -ETIMEDOUT;
+}
+
 static int rcar_i2c_master_xfer(struct i2c_adapter *adap,
                                struct i2c_msg *msgs,
                                int num)
@@ -750,6 +779,16 @@ static int rcar_i2c_master_xfer(struct i2c_adapter *adap,
 
        pm_runtime_get_sync(dev);
 
+       /* Gen3 needs a reset before allowing RXDMA once */
+       if (priv->devtype == I2C_RCAR_GEN3) {
+               priv->flags |= ID_P_NO_RXDMA;
+               if (!IS_ERR(priv->rstc)) {
+                       ret = rcar_i2c_do_reset(priv);
+                       if (ret == 0)
+                               priv->flags &= ~ID_P_NO_RXDMA;
+               }
+       }
+
        rcar_i2c_init(priv);
 
        ret = rcar_i2c_bus_barrier(priv);
@@ -920,6 +959,15 @@ static int rcar_i2c_probe(struct platform_device *pdev)
        if (ret < 0)
                goto out_pm_put;
 
+       if (priv->devtype == I2C_RCAR_GEN3) {
+               priv->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
+               if (!IS_ERR(priv->rstc)) {
+                       ret = reset_control_status(priv->rstc);
+                       if (ret < 0)
+                               priv->rstc = ERR_PTR(-ENOTSUPP);
+               }
+       }
+
        /* Stay always active when multi-master to keep arbitration working */
        if (of_property_read_bool(dev->of_node, "multi-master"))
                priv->flags |= ID_P_PM_BLOCKED;
index e866c481bfc325d3c42e733faa88d133b3388f0d..fce52bdab2b715a7123e34b153e2206662c67bf9 100644 (file)
@@ -127,7 +127,7 @@ enum stu300_error {
 
 /*
  * The number of address send athemps tried before giving up.
- * If the first one failes it seems like 5 to 8 attempts are required.
+ * If the first one fails it seems like 5 to 8 attempts are required.
  */
 #define NUM_ADDR_RESEND_ATTEMPTS 12
 
index 5fccd1f1bca85d28bcc249fa6b76f4297bf504bb..797def5319f1325adacf1974c0b44cdb3a7ca4a6 100644 (file)
@@ -545,6 +545,14 @@ static int tegra_i2c_disable_packet_mode(struct tegra_i2c_dev *i2c_dev)
 {
        u32 cnfg;
 
+       /*
+        * NACK interrupt is generated before the I2C controller generates
+        * the STOP condition on the bus. So wait for 2 clock periods
+        * before disabling the controller so that the STOP condition has
+        * been delivered properly.
+        */
+       udelay(DIV_ROUND_UP(2 * 1000000, i2c_dev->bus_clk_rate));
+
        cnfg = i2c_readl(i2c_dev, I2C_CNFG);
        if (cnfg & I2C_CNFG_PACKET_MODE_EN)
                i2c_writel(i2c_dev, cnfg & ~I2C_CNFG_PACKET_MODE_EN, I2C_CNFG);
@@ -706,15 +714,6 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev,
        if (likely(i2c_dev->msg_err == I2C_ERR_NONE))
                return 0;
 
-       /*
-        * NACK interrupt is generated before the I2C controller generates
-        * the STOP condition on the bus. So wait for 2 clock periods
-        * before resetting the controller so that the STOP condition has
-        * been delivered properly.
-        */
-       if (i2c_dev->msg_err == I2C_ERR_NO_ACK)
-               udelay(DIV_ROUND_UP(2 * 1000000, i2c_dev->bus_clk_rate));
-
        tegra_i2c_init(i2c_dev);
        if (i2c_dev->msg_err == I2C_ERR_NO_ACK) {
                if (msg->flags & I2C_M_IGNORE_NAK)
index 1f41a4f89c08f4c4b2da8cf3f0feb2bd643e9b5e..8a873975cf1252006e91d437893b2aff2791e843 100644 (file)
@@ -191,28 +191,43 @@ static void xlp9xx_i2c_drain_rx_fifo(struct xlp9xx_i2c_dev *priv)
        if (priv->len_recv) {
                /* read length byte */
                rlen = xlp9xx_read_i2c_reg(priv, XLP9XX_I2C_MRXFIFO);
+
+               /*
+                * We expect at least 2 interrupts for I2C_M_RECV_LEN
+                * transactions. The length is updated during the first
+                * interrupt, and the buffer contents are only copied
+                * during subsequent interrupts. If in case the interrupts
+                * get merged we would complete the transaction without
+                * copying out the bytes from RX fifo. To avoid this now we
+                * drain the fifo as and when data is available.
+                * We drained the rlen byte already, decrement total length
+                * by one.
+                */
+
+               len--;
                if (rlen > I2C_SMBUS_BLOCK_MAX || rlen == 0) {
                        rlen = 0;       /*abort transfer */
                        priv->msg_buf_remaining = 0;
                        priv->msg_len = 0;
-               } else {
-                       *buf++ = rlen;
-                       if (priv->client_pec)
-                               ++rlen; /* account for error check byte */
-                       /* update remaining bytes and message length */
-                       priv->msg_buf_remaining = rlen;
-                       priv->msg_len = rlen + 1;
+                       xlp9xx_i2c_update_rlen(priv);
+                       return;
                }
+
+               *buf++ = rlen;
+               if (priv->client_pec)
+                       ++rlen; /* account for error check byte */
+               /* update remaining bytes and message length */
+               priv->msg_buf_remaining = rlen;
+               priv->msg_len = rlen + 1;
                xlp9xx_i2c_update_rlen(priv);
                priv->len_recv = false;
-       } else {
-               len = min(priv->msg_buf_remaining, len);
-               for (i = 0; i < len; i++, buf++)
-                       *buf = xlp9xx_read_i2c_reg(priv, XLP9XX_I2C_MRXFIFO);
-
-               priv->msg_buf_remaining -= len;
        }
 
+       len = min(priv->msg_buf_remaining, len);
+       for (i = 0; i < len; i++, buf++)
+               *buf = xlp9xx_read_i2c_reg(priv, XLP9XX_I2C_MRXFIFO);
+
+       priv->msg_buf_remaining -= len;
        priv->msg_buf = buf;
 
        if (priv->msg_buf_remaining)
index 31d16ada6e7d9a789240cc62f50a7fcde840bb2e..15c95aaa484cfdd845111e27a69944939edeafe1 100644 (file)
@@ -198,7 +198,16 @@ int i2c_generic_scl_recovery(struct i2c_adapter *adap)
 
                val = !val;
                bri->set_scl(adap, val);
-               ndelay(RECOVERY_NDELAY);
+
+               /*
+                * If we can set SDA, we will always create STOP here to ensure
+                * the additional pulses will do no harm. This is achieved by
+                * letting SDA follow SCL half a cycle later.
+                */
+               ndelay(RECOVERY_NDELAY / 2);
+               if (bri->set_sda)
+                       bri->set_sda(adap, val);
+               ndelay(RECOVERY_NDELAY / 2);
        }
 
        /* check if recovery actually succeeded */
@@ -615,7 +624,7 @@ static int i2c_check_addr_busy(struct i2c_adapter *adapter, int addr)
 static void i2c_adapter_lock_bus(struct i2c_adapter *adapter,
                                 unsigned int flags)
 {
-       rt_mutex_lock(&adapter->bus_lock);
+       rt_mutex_lock_nested(&adapter->bus_lock, i2c_adapter_depth(adapter));
 }
 
 /**
index f3f683041e7f9199ad5799ef5c8fd83f59fc9856..51970bae3c4a5a4d08f03ae558816fd9c264996b 100644 (file)
@@ -465,15 +465,18 @@ static s32 i2c_smbus_xfer_emulated(struct i2c_adapter *adapter, u16 addr,
 
        status = i2c_transfer(adapter, msg, num);
        if (status < 0)
-               return status;
-       if (status != num)
-               return -EIO;
+               goto cleanup;
+       if (status != num) {
+               status = -EIO;
+               goto cleanup;
+       }
+       status = 0;
 
        /* Check PEC if last message is a read */
        if (i && (msg[num-1].flags & I2C_M_RD)) {
                status = i2c_smbus_check_pec(partial_pec, &msg[num-1]);
                if (status < 0)
-                       return status;
+                       goto cleanup;
        }
 
        if (read_write == I2C_SMBUS_READ)
@@ -499,12 +502,13 @@ static s32 i2c_smbus_xfer_emulated(struct i2c_adapter *adapter, u16 addr,
                        break;
                }
 
+cleanup:
        if (msg[0].flags & I2C_M_DMA_SAFE)
                kfree(msg[0].buf);
        if (msg[1].flags & I2C_M_DMA_SAFE)
                kfree(msg[1].buf);
 
-       return 0;
+       return status;
 }
 
 /**
index 300ab4b672e4992921b164a40d77d9e426fda305..29646aa6132e997a46f73cad48d195eb58a7b9e4 100644 (file)
@@ -144,7 +144,7 @@ static void i2c_mux_lock_bus(struct i2c_adapter *adapter, unsigned int flags)
        struct i2c_mux_priv *priv = adapter->algo_data;
        struct i2c_adapter *parent = priv->muxc->parent;
 
-       rt_mutex_lock(&parent->mux_lock);
+       rt_mutex_lock_nested(&parent->mux_lock, i2c_adapter_depth(adapter));
        if (!(flags & I2C_LOCK_ROOT_ADAPTER))
                return;
        i2c_lock_bus(parent, flags);
@@ -181,7 +181,7 @@ static void i2c_parent_lock_bus(struct i2c_adapter *adapter,
        struct i2c_mux_priv *priv = adapter->algo_data;
        struct i2c_adapter *parent = priv->muxc->parent;
 
-       rt_mutex_lock(&parent->mux_lock);
+       rt_mutex_lock_nested(&parent->mux_lock, i2c_adapter_depth(adapter));
        i2c_lock_bus(parent, flags);
 }
 
index 7e3d82cff3d5f2537608c0a21d9bf277767e7bd8..c149c9c360fc4f265ce1e406e1dd8ba7ae5615d8 100644 (file)
@@ -1053,7 +1053,7 @@ static irqreturn_t mma8452_interrupt(int irq, void *p)
        if (src < 0)
                return IRQ_NONE;
 
-       if (!(src & data->chip_info->enabled_events))
+       if (!(src & (data->chip_info->enabled_events | MMA8452_INT_DRDY)))
                return IRQ_NONE;
 
        if (src & MMA8452_INT_DRDY) {
index f9c0624505a2993e3a48d9a581faa8a26e9287de..42618fe4f83ed82d0f50b92e884dd59a11a1df09 100644 (file)
@@ -959,6 +959,8 @@ int inv_mpu_core_probe(struct regmap *regmap, int irq, const char *name,
        }
 
        irq_type = irqd_get_trigger_type(desc);
+       if (!irq_type)
+               irq_type = IRQF_TRIGGER_RISING;
        if (irq_type == IRQF_TRIGGER_RISING)
                st->irq_mask = INV_MPU6050_ACTIVE_HIGH;
        else if (irq_type == IRQF_TRIGGER_FALLING)
index 34d42a2504c92bf43ee1216f1855e9febfd03161..df5b2a0da96c4a9c311ddd6da57f990f6d1f821f 100644 (file)
@@ -582,6 +582,8 @@ static int tsl2772_als_calibrate(struct iio_dev *indio_dev)
                        "%s: failed to get lux\n", __func__);
                return lux_val;
        }
+       if (lux_val == 0)
+               return -ERANGE;
 
        ret = (chip->settings.als_cal_target * chip->settings.als_gain_trim) /
                        lux_val;
index 5ec3e41b65f2b8f991626a4522d6263d66ca2a27..fe87d27779d96b99ce4f847c9a9e02a4a1a87aa7 100644 (file)
@@ -415,10 +415,9 @@ static int bmp280_read_humid(struct bmp280_data *data, int *val, int *val2)
        }
        comp_humidity = bmp280_compensate_humidity(data, adc_humidity);
 
-       *val = comp_humidity;
-       *val2 = 1024;
+       *val = comp_humidity * 1000 / 1024;
 
-       return IIO_VAL_FRACTIONAL;
+       return IIO_VAL_INT;
 }
 
 static int bmp280_read_raw(struct iio_dev *indio_dev,
index a6e904973ba8a08973340b66f608eadfa04e2893..475910ffbcb6800f2e729f012b795483cbcc5c15 100644 (file)
@@ -121,7 +121,7 @@ static int uverbs_try_lock_object(struct ib_uobject *uobj, bool exclusive)
         * this lock.
         */
        if (!exclusive)
-               return __atomic_add_unless(&uobj->usecnt, 1, -1) == -1 ?
+               return atomic_fetch_add_unless(&uobj->usecnt, 1, -1) == -1 ?
                        -EBUSY : 0;
 
        /* lock is either WRITE or DESTROY - should be exclusive */
index 3e90b6a1d9d2d6a203d13d945e9322a9ec154fe8..583d3a10b94057e64615d2b0de1b3a08dc2e8c70 100644 (file)
@@ -1984,15 +1984,64 @@ static int modify_qp(struct ib_uverbs_file *file,
                goto release_qp;
        }
 
-       if ((cmd->base.attr_mask & IB_QP_AV) &&
-           !rdma_is_port_valid(qp->device, cmd->base.dest.port_num)) {
-               ret = -EINVAL;
-               goto release_qp;
+       if ((cmd->base.attr_mask & IB_QP_AV)) {
+               if (!rdma_is_port_valid(qp->device, cmd->base.dest.port_num)) {
+                       ret = -EINVAL;
+                       goto release_qp;
+               }
+
+               if (cmd->base.attr_mask & IB_QP_STATE &&
+                   cmd->base.qp_state == IB_QPS_RTR) {
+               /* We are in INIT->RTR TRANSITION (if we are not,
+                * this transition will be rejected in subsequent checks).
+                * In the INIT->RTR transition, we cannot have IB_QP_PORT set,
+                * but the IB_QP_STATE flag is required.
+                *
+                * Since kernel 3.14 (commit dbf727de7440), the uverbs driver,
+                * when IB_QP_AV is set, has required inclusion of a valid
+                * port number in the primary AV. (AVs are created and handled
+                * differently for infiniband and ethernet (RoCE) ports).
+                *
+                * Check the port number included in the primary AV against
+                * the port number in the qp struct, which was set (and saved)
+                * in the RST->INIT transition.
+                */
+                       if (cmd->base.dest.port_num != qp->real_qp->port) {
+                               ret = -EINVAL;
+                               goto release_qp;
+                       }
+               } else {
+               /* We are in SQD->SQD. (If we are not, this transition will
+                * be rejected later in the verbs layer checks).
+                * Check for both IB_QP_PORT and IB_QP_AV, these can be set
+                * together in the SQD->SQD transition.
+                *
+                * If only IP_QP_AV was set, add in IB_QP_PORT as well (the
+                * verbs layer driver does not track primary port changes
+                * resulting from path migration. Thus, in SQD, if the primary
+                * AV is modified, the primary port should also be modified).
+                *
+                * Note that in this transition, the IB_QP_STATE flag
+                * is not allowed.
+                */
+                       if (((cmd->base.attr_mask & (IB_QP_AV | IB_QP_PORT))
+                            == (IB_QP_AV | IB_QP_PORT)) &&
+                           cmd->base.port_num != cmd->base.dest.port_num) {
+                               ret = -EINVAL;
+                               goto release_qp;
+                       }
+                       if ((cmd->base.attr_mask & (IB_QP_AV | IB_QP_PORT))
+                           == IB_QP_AV) {
+                               cmd->base.attr_mask |= IB_QP_PORT;
+                               cmd->base.port_num = cmd->base.dest.port_num;
+                       }
+               }
        }
 
        if ((cmd->base.attr_mask & IB_QP_ALT_PATH) &&
            (!rdma_is_port_valid(qp->device, cmd->base.alt_port_num) ||
-           !rdma_is_port_valid(qp->device, cmd->base.alt_dest.port_num))) {
+           !rdma_is_port_valid(qp->device, cmd->base.alt_dest.port_num) ||
+           cmd->base.alt_port_num != cmd->base.alt_dest.port_num)) {
                ret = -EINVAL;
                goto release_qp;
        }
@@ -3488,8 +3537,8 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
        struct ib_flow_attr               *flow_attr;
        struct ib_qp                      *qp;
        struct ib_uflow_resources         *uflow_res;
+       struct ib_uverbs_flow_spec_hdr    *kern_spec;
        int err = 0;
-       void *kern_spec;
        void *ib_spec;
        int i;
 
@@ -3538,8 +3587,8 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
                if (!kern_flow_attr)
                        return -ENOMEM;
 
-               memcpy(kern_flow_attr, &cmd.flow_attr, sizeof(*kern_flow_attr));
-               err = ib_copy_from_udata(kern_flow_attr + 1, ucore,
+               *kern_flow_attr = cmd.flow_attr;
+               err = ib_copy_from_udata(&kern_flow_attr->flow_specs, ucore,
                                         cmd.flow_attr.size);
                if (err)
                        goto err_free_attr;
@@ -3559,6 +3608,11 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
                goto err_uobj;
        }
 
+       if (qp->qp_type != IB_QPT_UD && qp->qp_type != IB_QPT_RAW_PACKET) {
+               err = -EINVAL;
+               goto err_put;
+       }
+
        flow_attr = kzalloc(struct_size(flow_attr, flows,
                                cmd.flow_attr.num_of_specs), GFP_KERNEL);
        if (!flow_attr) {
@@ -3578,21 +3632,22 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
        flow_attr->flags = kern_flow_attr->flags;
        flow_attr->size = sizeof(*flow_attr);
 
-       kern_spec = kern_flow_attr + 1;
+       kern_spec = kern_flow_attr->flow_specs;
        ib_spec = flow_attr + 1;
        for (i = 0; i < flow_attr->num_of_specs &&
-            cmd.flow_attr.size > offsetof(struct ib_uverbs_flow_spec, reserved) &&
-            cmd.flow_attr.size >=
-            ((struct ib_uverbs_flow_spec *)kern_spec)->size; i++) {
-               err = kern_spec_to_ib_spec(file->ucontext, kern_spec, ib_spec,
-                                          uflow_res);
+                       cmd.flow_attr.size >= sizeof(*kern_spec) &&
+                       cmd.flow_attr.size >= kern_spec->size;
+            i++) {
+               err = kern_spec_to_ib_spec(
+                               file->ucontext, (struct ib_uverbs_flow_spec *)kern_spec,
+                               ib_spec, uflow_res);
                if (err)
                        goto err_free;
 
                flow_attr->size +=
                        ((union ib_flow_spec *) ib_spec)->size;
-               cmd.flow_attr.size -= ((struct ib_uverbs_flow_spec *)kern_spec)->size;
-               kern_spec += ((struct ib_uverbs_flow_spec *) kern_spec)->size;
+               cmd.flow_attr.size -= kern_spec->size;
+               kern_spec = ((void *)kern_spec) + kern_spec->size;
                ib_spec += ((union ib_flow_spec *) ib_spec)->size;
        }
        if (cmd.flow_attr.size || (i != flow_attr->num_of_specs)) {
index 3ae2339dd27a9f5b6c4d104674d096f9f22b5b67..2094d136513d6c5f144663ad9e74192fd85a191a 100644 (file)
@@ -736,10 +736,6 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
        if (ret)
                return ret;
 
-       if (!file->ucontext &&
-           (command != IB_USER_VERBS_CMD_GET_CONTEXT || extended))
-               return -EINVAL;
-
        if (extended) {
                if (count < (sizeof(hdr) + sizeof(ex_hdr)))
                        return -EINVAL;
@@ -759,6 +755,16 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
                goto out;
        }
 
+       /*
+        * Must be after the ib_dev check, as once the RCU clears ib_dev ==
+        * NULL means ucontext == NULL
+        */
+       if (!file->ucontext &&
+           (command != IB_USER_VERBS_CMD_GET_CONTEXT || extended)) {
+               ret = -EINVAL;
+               goto out;
+       }
+
        if (!verify_command_mask(ib_dev, command, extended)) {
                ret = -EOPNOTSUPP;
                goto out;
index 0b56828c1319b1b350385dfd5be2c981d26d6ffa..9d6beb948535bec89545e9f9b25f7b83976a654e 100644 (file)
@@ -1562,11 +1562,12 @@ EXPORT_SYMBOL(ib_destroy_qp);
 
 /* Completion queues */
 
-struct ib_cq *ib_create_cq(struct ib_device *device,
-                          ib_comp_handler comp_handler,
-                          void (*event_handler)(struct ib_event *, void *),
-                          void *cq_context,
-                          const struct ib_cq_init_attr *cq_attr)
+struct ib_cq *__ib_create_cq(struct ib_device *device,
+                            ib_comp_handler comp_handler,
+                            void (*event_handler)(struct ib_event *, void *),
+                            void *cq_context,
+                            const struct ib_cq_init_attr *cq_attr,
+                            const char *caller)
 {
        struct ib_cq *cq;
 
@@ -1580,12 +1581,13 @@ struct ib_cq *ib_create_cq(struct ib_device *device,
                cq->cq_context    = cq_context;
                atomic_set(&cq->usecnt, 0);
                cq->res.type = RDMA_RESTRACK_CQ;
+               cq->res.kern_name = caller;
                rdma_restrack_add(&cq->res);
        }
 
        return cq;
 }
-EXPORT_SYMBOL(ib_create_cq);
+EXPORT_SYMBOL(__ib_create_cq);
 
 int rdma_set_cq_moderation(struct ib_cq *cq, u16 cq_count, u16 cq_period)
 {
index 1445918e32392f28ae4ce9ea74e7df0feeddf371..7b76e6f81aeb477181afedc2f44fec990ce3090f 100644 (file)
@@ -774,7 +774,7 @@ static int c4iw_set_page(struct ib_mr *ibmr, u64 addr)
 {
        struct c4iw_mr *mhp = to_c4iw_mr(ibmr);
 
-       if (unlikely(mhp->mpl_len == mhp->max_mpl_len))
+       if (unlikely(mhp->mpl_len == mhp->attr.pbl_size))
                return -ENOMEM;
 
        mhp->mpl[mhp->mpl_len++] = addr;
index 1a1a47ac53c6f049285a30028ba4cde5bd21d5af..f15c931020810cdbc6125898af42146337974cc8 100644 (file)
@@ -271,7 +271,7 @@ int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
 
        lockdep_assert_held(&qp->s_lock);
        ps->s_txreq = get_txreq(ps->dev, qp);
-       if (IS_ERR(ps->s_txreq))
+       if (!ps->s_txreq)
                goto bail_no_tx;
 
        if (priv->hdr_type == HFI1_PKT_TYPE_9B) {
index b7b671017e594298c8bea18d029fbd1154a8cfe9..e254dcec6f647067a0efce4cee6b47e9f76dbf9c 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright(c) 2015, 2016 Intel Corporation.
+ * Copyright(c) 2015 - 2018 Intel Corporation.
  *
  * This file is provided under a dual BSD/GPLv2 license.  When using or
  * redistributing this file, you may do so under either license.
@@ -72,7 +72,7 @@ int hfi1_make_uc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
        int middle = 0;
 
        ps->s_txreq = get_txreq(ps->dev, qp);
-       if (IS_ERR(ps->s_txreq))
+       if (!ps->s_txreq)
                goto bail_no_tx;
 
        if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK)) {
index 1ab332f1866e878580ddb683f71ac83e13d35cbb..70d39fc450a1e112b2f97b4e499cbf96623d19ad 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright(c) 2015, 2016 Intel Corporation.
+ * Copyright(c) 2015 - 2018 Intel Corporation.
  *
  * This file is provided under a dual BSD/GPLv2 license.  When using or
  * redistributing this file, you may do so under either license.
@@ -503,7 +503,7 @@ int hfi1_make_ud_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
        u32 lid;
 
        ps->s_txreq = get_txreq(ps->dev, qp);
-       if (IS_ERR(ps->s_txreq))
+       if (!ps->s_txreq)
                goto bail_no_tx;
 
        if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_NEXT_SEND_OK)) {
index 873e48ea923fc42acc9cb2d5d3d7055dd07a1790..c4ab2d5b4502ee1e905ef2c193495f56e479eaf9 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright(c) 2016 - 2017 Intel Corporation.
+ * Copyright(c) 2016 - 2018 Intel Corporation.
  *
  * This file is provided under a dual BSD/GPLv2 license.  When using or
  * redistributing this file, you may do so under either license.
@@ -94,7 +94,7 @@ struct verbs_txreq *__get_txreq(struct hfi1_ibdev *dev,
                                struct rvt_qp *qp)
        __must_hold(&qp->s_lock)
 {
-       struct verbs_txreq *tx = ERR_PTR(-EBUSY);
+       struct verbs_txreq *tx = NULL;
 
        write_seqlock(&dev->txwait_lock);
        if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
index 729244c3086ce7eb7d28da104bb4f7f4363c96bf..1c19bbc764b2d6f93134fe7775f55569d7b70b84 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright(c) 2016 Intel Corporation.
+ * Copyright(c) 2016 - 2018 Intel Corporation.
  *
  * This file is provided under a dual BSD/GPLv2 license.  When using or
  * redistributing this file, you may do so under either license.
@@ -83,7 +83,7 @@ static inline struct verbs_txreq *get_txreq(struct hfi1_ibdev *dev,
        if (unlikely(!tx)) {
                /* call slow path to get the lock */
                tx = __get_txreq(dev, qp);
-               if (IS_ERR(tx))
+               if (!tx)
                        return tx;
        }
        tx->qp = qp;
index ed1f253faf977c5bf3b4f15f4a1ea8992e817d88..c7c85c22e4e3291a343319ffcdb2e00034d7cc5f 100644 (file)
@@ -486,8 +486,11 @@ int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags,
        }
 
        if (flags & IB_MR_REREG_ACCESS) {
-               if (ib_access_writable(mr_access_flags) && !mmr->umem->writable)
-                       return -EPERM;
+               if (ib_access_writable(mr_access_flags) &&
+                   !mmr->umem->writable) {
+                       err = -EPERM;
+                       goto release_mpt_entry;
+               }
 
                err = mlx4_mr_hw_change_access(dev->dev, *pmpt_entry,
                                               convert_access(mr_access_flags));
index e52dd21519b45ff00268ae33c21816a8b5a96b53..b3ba9a222550750f9c92a1ea8d1cf23b93e05d12 100644 (file)
@@ -3199,8 +3199,8 @@ static int flow_counters_set_data(struct ib_counters *ibcounters,
        if (!mcounters->hw_cntrs_hndl) {
                mcounters->hw_cntrs_hndl = mlx5_fc_create(
                        to_mdev(ibcounters->device)->mdev, false);
-               if (!mcounters->hw_cntrs_hndl) {
-                       ret = -ENOMEM;
+               if (IS_ERR(mcounters->hw_cntrs_hndl)) {
+                       ret = PTR_ERR(mcounters->hw_cntrs_hndl);
                        goto free;
                }
                hw_hndl = true;
@@ -3546,29 +3546,35 @@ static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp,
                        return ERR_PTR(-ENOMEM);
 
                err = ib_copy_from_udata(ucmd, udata, required_ucmd_sz);
-               if (err) {
-                       kfree(ucmd);
-                       return ERR_PTR(err);
-               }
+               if (err)
+                       goto free_ucmd;
        }
 
-       if (flow_attr->priority > MLX5_IB_FLOW_LAST_PRIO)
-               return ERR_PTR(-ENOMEM);
+       if (flow_attr->priority > MLX5_IB_FLOW_LAST_PRIO) {
+               err = -ENOMEM;
+               goto free_ucmd;
+       }
 
        if (domain != IB_FLOW_DOMAIN_USER ||
            flow_attr->port > dev->num_ports ||
            (flow_attr->flags & ~(IB_FLOW_ATTR_FLAGS_DONT_TRAP |
-                                 IB_FLOW_ATTR_FLAGS_EGRESS)))
-               return ERR_PTR(-EINVAL);
+                                 IB_FLOW_ATTR_FLAGS_EGRESS))) {
+               err = -EINVAL;
+               goto free_ucmd;
+       }
 
        if (is_egress &&
            (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT ||
-            flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT))
-               return ERR_PTR(-EINVAL);
+            flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT)) {
+               err = -EINVAL;
+               goto free_ucmd;
+       }
 
        dst = kzalloc(sizeof(*dst), GFP_KERNEL);
-       if (!dst)
-               return ERR_PTR(-ENOMEM);
+       if (!dst) {
+               err = -ENOMEM;
+               goto free_ucmd;
+       }
 
        mutex_lock(&dev->flow_db->lock);
 
@@ -3637,8 +3643,8 @@ destroy_ft:
 unlock:
        mutex_unlock(&dev->flow_db->lock);
        kfree(dst);
+free_ucmd:
        kfree(ucmd);
-       kfree(handler);
        return ERR_PTR(err);
 }
 
@@ -6107,7 +6113,7 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
        dev->num_ports = max(MLX5_CAP_GEN(mdev, num_ports),
                             MLX5_CAP_GEN(mdev, num_vhca_ports));
 
-       if (MLX5_VPORT_MANAGER(mdev) &&
+       if (MLX5_ESWITCH_MANAGER(mdev) &&
            mlx5_ib_eswitch_mode(mdev->priv.eswitch) == SRIOV_OFFLOADS) {
                dev->rep = mlx5_ib_vport_rep(mdev->priv.eswitch, 0);
 
index 0af7b7905550baddb5084d99293e9a36196eb6b3..f5de5adc9b1a4143b7d2c82f6e7fe59df1157f93 100644 (file)
@@ -266,18 +266,24 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
 
        desc_size = sizeof(struct mlx5_wqe_srq_next_seg) +
                    srq->msrq.max_gs * sizeof(struct mlx5_wqe_data_seg);
-       if (desc_size == 0 || srq->msrq.max_gs > desc_size)
-               return ERR_PTR(-EINVAL);
+       if (desc_size == 0 || srq->msrq.max_gs > desc_size) {
+               err = -EINVAL;
+               goto err_srq;
+       }
        desc_size = roundup_pow_of_two(desc_size);
        desc_size = max_t(size_t, 32, desc_size);
-       if (desc_size < sizeof(struct mlx5_wqe_srq_next_seg))
-               return ERR_PTR(-EINVAL);
+       if (desc_size < sizeof(struct mlx5_wqe_srq_next_seg)) {
+               err = -EINVAL;
+               goto err_srq;
+       }
        srq->msrq.max_avail_gather = (desc_size - sizeof(struct mlx5_wqe_srq_next_seg)) /
                sizeof(struct mlx5_wqe_data_seg);
        srq->msrq.wqe_shift = ilog2(desc_size);
        buf_size = srq->msrq.max * desc_size;
-       if (buf_size < desc_size)
-               return ERR_PTR(-EINVAL);
+       if (buf_size < desc_size) {
+               err = -EINVAL;
+               goto err_srq;
+       }
        in.type = init_attr->srq_type;
 
        if (pd->uobject)
index f7ac8fc9b531d7550fb0b41233b55e0bec51b4ff..f07b8df96f43954e67d4dfc32148e96a751e6974 100644 (file)
@@ -1957,6 +1957,9 @@ int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
        }
 
        if (attr_mask & (IB_QP_AV | IB_QP_PATH_MTU)) {
+               if (rdma_protocol_iwarp(&dev->ibdev, 1))
+                       return -EINVAL;
+
                if (attr_mask & IB_QP_PATH_MTU) {
                        if (attr->path_mtu < IB_MTU_256 ||
                            attr->path_mtu > IB_MTU_4096) {
index f30eeba3f772c5a8e0433cdc6b6fcaa47076583c..8be27238a86e4ee1f160b4058e9517ef58708d26 100644 (file)
@@ -645,6 +645,9 @@ next_wqe:
                } else {
                        goto exit;
                }
+               if ((wqe->wr.send_flags & IB_SEND_SIGNALED) ||
+                   qp->sq_sig_type == IB_SIGNAL_ALL_WR)
+                       rxe_run_task(&qp->comp.task, 1);
                qp->req.wqe_index = next_index(qp->sq.queue,
                                                qp->req.wqe_index);
                goto next_wqe;
@@ -709,6 +712,7 @@ next_wqe:
 
        if (fill_packet(qp, wqe, &pkt, skb, payload)) {
                pr_debug("qp#%d Error during fill packet\n", qp_num(qp));
+               kfree_skb(skb);
                goto err;
        }
 
@@ -740,7 +744,6 @@ next_wqe:
        goto next_wqe;
 
 err:
-       kfree_skb(skb);
        wqe->status = IB_WC_LOC_PROT_ERR;
        wqe->state = wqe_state_error;
        __rxe_do_task(&qp->comp.task);
index cf30523c6ef64c956e5ebf77c730c6bb146c4a1f..6c7326c93721c495c4e61a73cac2dfaf9a5bc8fc 100644 (file)
@@ -131,8 +131,10 @@ EXPORT_SYMBOL(input_mt_destroy_slots);
  * inactive, or if the tool type is changed, a new tracking id is
  * assigned to the slot. The tool type is only reported if the
  * corresponding absbit field is set.
+ *
+ * Returns true if contact is active.
  */
-void input_mt_report_slot_state(struct input_dev *dev,
+bool input_mt_report_slot_state(struct input_dev *dev,
                                unsigned int tool_type, bool active)
 {
        struct input_mt *mt = dev->mt;
@@ -140,22 +142,24 @@ void input_mt_report_slot_state(struct input_dev *dev,
        int id;
 
        if (!mt)
-               return;
+               return false;
 
        slot = &mt->slots[mt->slot];
        slot->frame = mt->frame;
 
        if (!active) {
                input_event(dev, EV_ABS, ABS_MT_TRACKING_ID, -1);
-               return;
+               return false;
        }
 
        id = input_mt_get_value(slot, ABS_MT_TRACKING_ID);
-       if (id < 0 || input_mt_get_value(slot, ABS_MT_TOOL_TYPE) != tool_type)
+       if (id < 0)
                id = input_mt_new_trkid(mt);
 
        input_event(dev, EV_ABS, ABS_MT_TRACKING_ID, id);
        input_event(dev, EV_ABS, ABS_MT_TOOL_TYPE, tool_type);
+
+       return true;
 }
 EXPORT_SYMBOL(input_mt_report_slot_state);
 
index 48e36acbeb496db7f5033029158a645f8d3cdb27..cd620e009bada3a8f8c1e70b99be25100bea9c44 100644 (file)
@@ -125,7 +125,7 @@ static const struct xpad_device {
        u8 mapping;
        u8 xtype;
 } xpad_device[] = {
-       { 0x0079, 0x18d4, "GPD Win 2 Controller", 0, XTYPE_XBOX360 },
+       { 0x0079, 0x18d4, "GPD Win 2 X-Box Controller", 0, XTYPE_XBOX360 },
        { 0x044f, 0x0f00, "Thrustmaster Wheel", 0, XTYPE_XBOX },
        { 0x044f, 0x0f03, "Thrustmaster Wheel", 0, XTYPE_XBOX },
        { 0x044f, 0x0f07, "Thrustmaster, Inc. Controller", 0, XTYPE_XBOX },
index f6e643b589b616c61d1751005fedb3288f0ad82c..e8dae6195b30500934f23738f995201521e05160 100644 (file)
@@ -45,7 +45,7 @@ struct event_dev {
 static irqreturn_t events_interrupt(int irq, void *dev_id)
 {
        struct event_dev *edev = dev_id;
-       unsigned type, code, value;
+       unsigned int type, code, value;
 
        type = __raw_readl(edev->addr + REG_READ);
        code = __raw_readl(edev->addr + REG_READ);
@@ -57,7 +57,7 @@ static irqreturn_t events_interrupt(int irq, void *dev_id)
 }
 
 static void events_import_bits(struct event_dev *edev,
-                       unsigned long bits[], unsigned type, size_t count)
+                       unsigned long bits[], unsigned int type, size_t count)
 {
        void __iomem *addr = edev->addr;
        int i, j;
@@ -99,6 +99,7 @@ static void events_import_abs_params(struct event_dev *edev)
 
                for (j = 0; j < ARRAY_SIZE(val); j++) {
                        int offset = (i * ARRAY_SIZE(val) + j) * sizeof(u32);
+
                        val[j] = __raw_readl(edev->addr + REG_DATA + offset);
                }
 
@@ -112,7 +113,7 @@ static int events_probe(struct platform_device *pdev)
        struct input_dev *input_dev;
        struct event_dev *edev;
        struct resource *res;
-       unsigned keymapnamelen;
+       unsigned int keymapnamelen;
        void __iomem *addr;
        int irq;
        int i;
@@ -150,7 +151,7 @@ static int events_probe(struct platform_device *pdev)
        for (i = 0; i < keymapnamelen; i++)
                edev->name[i] = __raw_readb(edev->addr + REG_DATA + i);
 
-       pr_debug("events_probe() keymap=%s\n", edev->name);
+       pr_debug("%s: keymap=%s\n", __func__, edev->name);
 
        input_dev->name = edev->name;
        input_dev->id.bustype = BUS_HOST;
index a4e404aaf64bdb822685519bbf6e8c01d329af45..5c7afdec192c139b4e65003a00d9549e1aea9b67 100644 (file)
@@ -57,8 +57,8 @@ MODULE_LICENSE("GPL v2");
  #define HIL_DATA              0x1
  #define HIL_CMD               0x3
  #define HIL_IRQ               2
- #define hil_readb(p)          readb(p)
- #define hil_writeb(v,p)       writeb((v),(p))
+ #define hil_readb(p)          readb((const volatile void __iomem *)(p))
+ #define hil_writeb(v, p)      writeb((v), (volatile void __iomem *)(p))
 
 #else
 #error "HIL is not supported on this platform"
index c25606e006938743d64498429cf3d0b69768d7fb..ca59a2be9bc5344f65389ea7372a7740b74b5343 100644 (file)
@@ -841,4 +841,14 @@ config INPUT_RAVE_SP_PWRBUTTON
          To compile this driver as a module, choose M here: the
          module will be called rave-sp-pwrbutton.
 
+config INPUT_SC27XX_VIBRA
+       tristate "Spreadtrum sc27xx vibrator support"
+       depends on MFD_SC27XX_PMIC || COMPILE_TEST
+       select INPUT_FF_MEMLESS
+       help
+         This option enables support for Spreadtrum sc27xx vibrator driver.
+
+         To compile this driver as a module, choose M here. The module will
+         be called sc27xx_vibra.
+
 endif
index 72cde28649e2c0bc4fec14f6898445d2f79880dc..9d0f9d1ff68f41a5ec7f13101bb11176e8fd8729 100644 (file)
@@ -66,6 +66,7 @@ obj-$(CONFIG_INPUT_RETU_PWRBUTTON)    += retu-pwrbutton.o
 obj-$(CONFIG_INPUT_AXP20X_PEK)         += axp20x-pek.o
 obj-$(CONFIG_INPUT_GPIO_ROTARY_ENCODER)        += rotary_encoder.o
 obj-$(CONFIG_INPUT_RK805_PWRKEY)       += rk805-pwrkey.o
+obj-$(CONFIG_INPUT_SC27XX_VIBRA)       += sc27xx-vibra.o
 obj-$(CONFIG_INPUT_SGI_BTNS)           += sgi_btns.o
 obj-$(CONFIG_INPUT_SIRFSOC_ONKEY)      += sirfsoc-onkey.o
 obj-$(CONFIG_INPUT_SOC_BUTTON_ARRAY)   += soc_button_array.o
diff --git a/drivers/input/misc/sc27xx-vibra.c b/drivers/input/misc/sc27xx-vibra.c
new file mode 100644 (file)
index 0000000..295251a
--- /dev/null
@@ -0,0 +1,154 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Spreadtrum Communications Inc.
+ */
+
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/input.h>
+#include <linux/workqueue.h>
+
+#define CUR_DRV_CAL_SEL                GENMASK(13, 12)
+#define SLP_LDOVIBR_PD_EN      BIT(9)
+#define LDO_VIBR_PD            BIT(8)
+
+struct vibra_info {
+       struct input_dev        *input_dev;
+       struct work_struct      play_work;
+       struct regmap           *regmap;
+       u32                     base;
+       u32                     strength;
+       bool                    enabled;
+};
+
+static void sc27xx_vibra_set(struct vibra_info *info, bool on)
+{
+       if (on) {
+               regmap_update_bits(info->regmap, info->base, LDO_VIBR_PD, 0);
+               regmap_update_bits(info->regmap, info->base,
+                                  SLP_LDOVIBR_PD_EN, 0);
+               info->enabled = true;
+       } else {
+               regmap_update_bits(info->regmap, info->base, LDO_VIBR_PD,
+                                  LDO_VIBR_PD);
+               regmap_update_bits(info->regmap, info->base,
+                                  SLP_LDOVIBR_PD_EN, SLP_LDOVIBR_PD_EN);
+               info->enabled = false;
+       }
+}
+
+static int sc27xx_vibra_hw_init(struct vibra_info *info)
+{
+       return regmap_update_bits(info->regmap, info->base, CUR_DRV_CAL_SEL, 0);
+}
+
+static void sc27xx_vibra_play_work(struct work_struct *work)
+{
+       struct vibra_info *info = container_of(work, struct vibra_info,
+                                              play_work);
+
+       if (info->strength && !info->enabled)
+               sc27xx_vibra_set(info, true);
+       else if (info->strength == 0 && info->enabled)
+               sc27xx_vibra_set(info, false);
+}
+
+static int sc27xx_vibra_play(struct input_dev *input, void *data,
+                            struct ff_effect *effect)
+{
+       struct vibra_info *info = input_get_drvdata(input);
+
+       info->strength = effect->u.rumble.weak_magnitude;
+       schedule_work(&info->play_work);
+
+       return 0;
+}
+
+static void sc27xx_vibra_close(struct input_dev *input)
+{
+       struct vibra_info *info = input_get_drvdata(input);
+
+       cancel_work_sync(&info->play_work);
+       if (info->enabled)
+               sc27xx_vibra_set(info, false);
+}
+
+static int sc27xx_vibra_probe(struct platform_device *pdev)
+{
+       struct vibra_info *info;
+       int error;
+
+       info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
+       if (!info)
+               return -ENOMEM;
+
+       info->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+       if (!info->regmap) {
+               dev_err(&pdev->dev, "failed to get vibrator regmap.\n");
+               return -ENODEV;
+       }
+
+       error = device_property_read_u32(&pdev->dev, "reg", &info->base);
+       if (error) {
+               dev_err(&pdev->dev, "failed to get vibrator base address.\n");
+               return error;
+       }
+
+       info->input_dev = devm_input_allocate_device(&pdev->dev);
+       if (!info->input_dev) {
+               dev_err(&pdev->dev, "failed to allocate input device.\n");
+               return -ENOMEM;
+       }
+
+       info->input_dev->name = "sc27xx:vibrator";
+       info->input_dev->id.version = 0;
+       info->input_dev->close = sc27xx_vibra_close;
+
+       input_set_drvdata(info->input_dev, info);
+       input_set_capability(info->input_dev, EV_FF, FF_RUMBLE);
+       INIT_WORK(&info->play_work, sc27xx_vibra_play_work);
+       info->enabled = false;
+
+       error = sc27xx_vibra_hw_init(info);
+       if (error) {
+               dev_err(&pdev->dev, "failed to initialize the vibrator.\n");
+               return error;
+       }
+
+       error = input_ff_create_memless(info->input_dev, NULL,
+                                       sc27xx_vibra_play);
+       if (error) {
+               dev_err(&pdev->dev, "failed to register vibrator to FF.\n");
+               return error;
+       }
+
+       error = input_register_device(info->input_dev);
+       if (error) {
+               dev_err(&pdev->dev, "failed to register input device.\n");
+               return error;
+       }
+
+       return 0;
+}
+
+static const struct of_device_id sc27xx_vibra_of_match[] = {
+       { .compatible = "sprd,sc2731-vibrator", },
+       {}
+};
+MODULE_DEVICE_TABLE(of, sc27xx_vibra_of_match);
+
+static struct platform_driver sc27xx_vibra_driver = {
+       .driver = {
+               .name = "sc27xx-vibrator",
+               .of_match_table = sc27xx_vibra_of_match,
+       },
+       .probe = sc27xx_vibra_probe,
+};
+
+module_platform_driver(sc27xx_vibra_driver);
+
+MODULE_DESCRIPTION("Spreadtrum SC27xx Vibrator Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Xiaotong Lu <xiaotong.lu@spreadtrum.com>");
index 599544c1a91cd365261b6ca2ec4e4f3149b0a63d..243e0fa6e3e3cb44ce22adc6e76421fda79f4ff2 100644 (file)
@@ -27,6 +27,8 @@
 #define ETP_DISABLE_POWER      0x0001
 #define ETP_PRESSURE_OFFSET    25
 
+#define ETP_CALIBRATE_MAX_LEN  3
+
 /* IAP Firmware handling */
 #define ETP_PRODUCT_ID_FORMAT_STRING   "%d.0"
 #define ETP_FW_NAME            "elan_i2c_" ETP_PRODUCT_ID_FORMAT_STRING ".bin"
index 8ff75114e7626dc3d2fa23a1d3457f1802b2a628..f5ae24865355a3292ae8a8efd713746b628cacb0 100644 (file)
@@ -613,7 +613,7 @@ static ssize_t calibrate_store(struct device *dev,
        int tries = 20;
        int retval;
        int error;
-       u8 val[3];
+       u8 val[ETP_CALIBRATE_MAX_LEN];
 
        retval = mutex_lock_interruptible(&data->sysfs_mutex);
        if (retval)
@@ -1345,6 +1345,9 @@ static const struct acpi_device_id elan_acpi_id[] = {
        { "ELAN060C", 0 },
        { "ELAN0611", 0 },
        { "ELAN0612", 0 },
+       { "ELAN0618", 0 },
+       { "ELAN061D", 0 },
+       { "ELAN0622", 0 },
        { "ELAN1000", 0 },
        { }
 };
index cfcb32559925baf1acf070f908f3b91b1fc1b905..c060d270bc4d862ad7366bd87529dbdc032672b6 100644 (file)
@@ -56,7 +56,7 @@
 static int elan_smbus_initialize(struct i2c_client *client)
 {
        u8 check[ETP_SMBUS_HELLOPACKET_LEN] = { 0x55, 0x55, 0x55, 0x55, 0x55 };
-       u8 values[ETP_SMBUS_HELLOPACKET_LEN] = { 0, 0, 0, 0, 0 };
+       u8 values[I2C_SMBUS_BLOCK_MAX] = {0};
        int len, error;
 
        /* Get hello packet */
@@ -117,12 +117,16 @@ static int elan_smbus_calibrate(struct i2c_client *client)
 static int elan_smbus_calibrate_result(struct i2c_client *client, u8 *val)
 {
        int error;
+       u8 buf[I2C_SMBUS_BLOCK_MAX] = {0};
+
+       BUILD_BUG_ON(ETP_CALIBRATE_MAX_LEN > sizeof(buf));
 
        error = i2c_smbus_read_block_data(client,
-                                         ETP_SMBUS_CALIBRATE_QUERY, val);
+                                         ETP_SMBUS_CALIBRATE_QUERY, buf);
        if (error < 0)
                return error;
 
+       memcpy(val, buf, ETP_CALIBRATE_MAX_LEN);
        return 0;
 }
 
@@ -472,6 +476,8 @@ static int elan_smbus_get_report(struct i2c_client *client, u8 *report)
 {
        int len;
 
+       BUILD_BUG_ON(I2C_SMBUS_BLOCK_MAX > ETP_SMBUS_REPORT_LEN);
+
        len = i2c_smbus_read_block_data(client,
                                        ETP_SMBUS_PACKET_QUERY,
                                        &report[ETP_SMBUS_REPORT_OFFSET]);
index fb4d902c440345d3cbc02329ed742d48b931dc85..dd85b16dc6f889bb366a10cbd4278234d3f9763c 100644 (file)
@@ -799,7 +799,7 @@ static int elantech_packet_check_v4(struct psmouse *psmouse)
        else if (ic_version == 7 && etd->info.samples[1] == 0x2A)
                sanity_check = ((packet[3] & 0x1c) == 0x10);
        else
-               sanity_check = ((packet[0] & 0x0c) == 0x04 &&
+               sanity_check = ((packet[0] & 0x08) == 0x00 &&
                                (packet[3] & 0x1c) == 0x10);
 
        if (!sanity_check)
@@ -1175,6 +1175,12 @@ static const struct dmi_system_id elantech_dmi_has_middle_button[] = {
        { }
 };
 
+static const char * const middle_button_pnp_ids[] = {
+       "LEN2131", /* ThinkPad P52 w/ NFC */
+       "LEN2132", /* ThinkPad P52 */
+       NULL
+};
+
 /*
  * Set the appropriate event bits for the input subsystem
  */
@@ -1194,7 +1200,8 @@ static int elantech_set_input_params(struct psmouse *psmouse)
        __clear_bit(EV_REL, dev->evbit);
 
        __set_bit(BTN_LEFT, dev->keybit);
-       if (dmi_check_system(elantech_dmi_has_middle_button))
+       if (dmi_check_system(elantech_dmi_has_middle_button) ||
+                       psmouse_matches_pnp_id(psmouse, middle_button_pnp_ids))
                __set_bit(BTN_MIDDLE, dev->keybit);
        __set_bit(BTN_RIGHT, dev->keybit);
 
index 5ff5b1952be0c7afe810cef7f6f086f71928e150..d3ff1fc09af712700507d05ac3548703e49173a1 100644 (file)
@@ -192,8 +192,8 @@ psmouse_ret_t psmouse_process_byte(struct psmouse *psmouse)
                        else
                                input_report_rel(dev, REL_WHEEL, -wheel);
 
-                       input_report_key(dev, BTN_SIDE,  BIT(4));
-                       input_report_key(dev, BTN_EXTRA, BIT(5));
+                       input_report_key(dev, BTN_SIDE,  packet[3] & BIT(4));
+                       input_report_key(dev, BTN_EXTRA, packet[3] & BIT(5));
                        break;
                }
                break;
@@ -203,13 +203,13 @@ psmouse_ret_t psmouse_process_byte(struct psmouse *psmouse)
                input_report_rel(dev, REL_WHEEL, -(s8) packet[3]);
 
                /* Extra buttons on Genius NewNet 3D */
-               input_report_key(dev, BTN_SIDE,  BIT(6));
-               input_report_key(dev, BTN_EXTRA, BIT(7));
+               input_report_key(dev, BTN_SIDE,  packet[0] & BIT(6));
+               input_report_key(dev, BTN_EXTRA, packet[0] & BIT(7));
                break;
 
        case PSMOUSE_THINKPS:
                /* Extra button on ThinkingMouse */
-               input_report_key(dev, BTN_EXTRA, BIT(3));
+               input_report_key(dev, BTN_EXTRA, packet[0] & BIT(3));
 
                /*
                 * Without this bit of weirdness moving up gives wildly
@@ -223,7 +223,7 @@ psmouse_ret_t psmouse_process_byte(struct psmouse *psmouse)
                 * Cortron PS2 Trackball reports SIDE button in the
                 * 4th bit of the first byte.
                 */
-               input_report_key(dev, BTN_SIDE, BIT(3));
+               input_report_key(dev, BTN_SIDE, packet[0] & BIT(3));
                packet[0] |= BIT(3);
                break;
 
index 7172b88cd0649c8de16ac7373ec2a16c42c338d2..fad2eae4a118e793e617a86a52b28351ef4fafed 100644 (file)
@@ -3,6 +3,7 @@
 #
 config RMI4_CORE
        tristate "Synaptics RMI4 bus support"
+       select IRQ_DOMAIN
        help
          Say Y here if you want to support the Synaptics RMI4 bus.  This is
          required for all RMI4 device support.
index 8bb866c7b9855c5025d31b7be3f722d469f73da9..8eeffa066022dadb9f718f77aab1609700f05543 100644 (file)
@@ -32,15 +32,15 @@ void rmi_2d_sensor_abs_process(struct rmi_2d_sensor *sensor,
        if (obj->type == RMI_2D_OBJECT_NONE)
                return;
 
-       if (axis_align->swap_axes)
-               swap(obj->x, obj->y);
-
        if (axis_align->flip_x)
                obj->x = sensor->max_x - obj->x;
 
        if (axis_align->flip_y)
                obj->y = sensor->max_y - obj->y;
 
+       if (axis_align->swap_axes)
+               swap(obj->x, obj->y);
+
        /*
         * Here checking if X offset or y offset are specified is
         * redundant. We just add the offsets or clip the values.
@@ -120,15 +120,15 @@ void rmi_2d_sensor_rel_report(struct rmi_2d_sensor *sensor, int x, int y)
        x = min(RMI_2D_REL_POS_MAX, max(RMI_2D_REL_POS_MIN, (int)x));
        y = min(RMI_2D_REL_POS_MAX, max(RMI_2D_REL_POS_MIN, (int)y));
 
-       if (axis_align->swap_axes)
-               swap(x, y);
-
        if (axis_align->flip_x)
                x = min(RMI_2D_REL_POS_MAX, -x);
 
        if (axis_align->flip_y)
                y = min(RMI_2D_REL_POS_MAX, -y);
 
+       if (axis_align->swap_axes)
+               swap(x, y);
+
        if (x || y) {
                input_report_rel(sensor->input, REL_X, x);
                input_report_rel(sensor->input, REL_Y, y);
@@ -141,17 +141,10 @@ static void rmi_2d_sensor_set_input_params(struct rmi_2d_sensor *sensor)
        struct input_dev *input = sensor->input;
        int res_x;
        int res_y;
+       int max_x, max_y;
        int input_flags = 0;
 
        if (sensor->report_abs) {
-               if (sensor->axis_align.swap_axes) {
-                       swap(sensor->max_x, sensor->max_y);
-                       swap(sensor->axis_align.clip_x_low,
-                            sensor->axis_align.clip_y_low);
-                       swap(sensor->axis_align.clip_x_high,
-                            sensor->axis_align.clip_y_high);
-               }
-
                sensor->min_x = sensor->axis_align.clip_x_low;
                if (sensor->axis_align.clip_x_high)
                        sensor->max_x = min(sensor->max_x,
@@ -163,14 +156,19 @@ static void rmi_2d_sensor_set_input_params(struct rmi_2d_sensor *sensor)
                                sensor->axis_align.clip_y_high);
 
                set_bit(EV_ABS, input->evbit);
-               input_set_abs_params(input, ABS_MT_POSITION_X, 0, sensor->max_x,
-                                       0, 0);
-               input_set_abs_params(input, ABS_MT_POSITION_Y, 0, sensor->max_y,
-                                       0, 0);
+
+               max_x = sensor->max_x;
+               max_y = sensor->max_y;
+               if (sensor->axis_align.swap_axes)
+                       swap(max_x, max_y);
+               input_set_abs_params(input, ABS_MT_POSITION_X, 0, max_x, 0, 0);
+               input_set_abs_params(input, ABS_MT_POSITION_Y, 0, max_y, 0, 0);
 
                if (sensor->x_mm && sensor->y_mm) {
                        res_x = (sensor->max_x - sensor->min_x) / sensor->x_mm;
                        res_y = (sensor->max_y - sensor->min_y) / sensor->y_mm;
+                       if (sensor->axis_align.swap_axes)
+                               swap(res_x, res_y);
 
                        input_abs_set_res(input, ABS_X, res_x);
                        input_abs_set_res(input, ABS_Y, res_y);
index c5fa53adba8d01318cfeacea440360c51c044a7d..bd0d5ff01b08f9c88920b03f56dbb4a3eed21af3 100644 (file)
@@ -9,6 +9,8 @@
 
 #include <linux/kernel.h>
 #include <linux/device.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
 #include <linux/list.h>
 #include <linux/pm.h>
 #include <linux/rmi.h>
@@ -167,6 +169,39 @@ static inline void rmi_function_of_probe(struct rmi_function *fn)
 {}
 #endif
 
+static struct irq_chip rmi_irq_chip = {
+       .name = "rmi4",
+};
+
+static int rmi_create_function_irq(struct rmi_function *fn,
+                                  struct rmi_function_handler *handler)
+{
+       struct rmi_driver_data *drvdata = dev_get_drvdata(&fn->rmi_dev->dev);
+       int i, error;
+
+       for (i = 0; i < fn->num_of_irqs; i++) {
+               set_bit(fn->irq_pos + i, fn->irq_mask);
+
+               fn->irq[i] = irq_create_mapping(drvdata->irqdomain,
+                                               fn->irq_pos + i);
+
+               irq_set_chip_data(fn->irq[i], fn);
+               irq_set_chip_and_handler(fn->irq[i], &rmi_irq_chip,
+                                        handle_simple_irq);
+               irq_set_nested_thread(fn->irq[i], 1);
+
+               error = devm_request_threaded_irq(&fn->dev, fn->irq[i], NULL,
+                                       handler->attention, IRQF_ONESHOT,
+                                       dev_name(&fn->dev), fn);
+               if (error) {
+                       dev_err(&fn->dev, "Error %d registering IRQ\n", error);
+                       return error;
+               }
+       }
+
+       return 0;
+}
+
 static int rmi_function_probe(struct device *dev)
 {
        struct rmi_function *fn = to_rmi_function(dev);
@@ -178,7 +213,14 @@ static int rmi_function_probe(struct device *dev)
 
        if (handler->probe) {
                error = handler->probe(fn);
-               return error;
+               if (error)
+                       return error;
+       }
+
+       if (fn->num_of_irqs && handler->attention) {
+               error = rmi_create_function_irq(fn, handler);
+               if (error)
+                       return error;
        }
 
        return 0;
@@ -230,12 +272,18 @@ err_put_device:
 
 void rmi_unregister_function(struct rmi_function *fn)
 {
+       int i;
+
        rmi_dbg(RMI_DEBUG_CORE, &fn->dev, "Unregistering F%02X.\n",
                        fn->fd.function_number);
 
        device_del(&fn->dev);
        of_node_put(fn->dev.of_node);
        put_device(&fn->dev);
+
+       for (i = 0; i < fn->num_of_irqs; i++)
+               irq_dispose_mapping(fn->irq[i]);
+
 }
 
 /**
index b7625a9ac66ab5384727cc83496223be3aedbe92..96383eab41ba1d850468a64e7ced8a3f1bf72ff6 100644 (file)
 
 struct rmi_device;
 
+/*
+ * The interrupt source count in the function descriptor can represent up to
+ * 6 interrupt sources in the normal manner.
+ */
+#define RMI_FN_MAX_IRQS        6
+
 /**
  * struct rmi_function - represents the implementation of an RMI4
  * function for a particular device (basically, a driver for that RMI4 function)
@@ -26,6 +32,7 @@ struct rmi_device;
  * @irq_pos: The position in the irq bitfield this function holds
  * @irq_mask: For convenience, can be used to mask IRQ bits off during ATTN
  * interrupt handling.
+ * @irqs: assigned virq numbers (up to num_of_irqs)
  *
  * @node: entry in device's list of functions
  */
@@ -36,6 +43,7 @@ struct rmi_function {
        struct list_head node;
 
        unsigned int num_of_irqs;
+       int irq[RMI_FN_MAX_IRQS];
        unsigned int irq_pos;
        unsigned long irq_mask[];
 };
@@ -76,7 +84,7 @@ struct rmi_function_handler {
        void (*remove)(struct rmi_function *fn);
        int (*config)(struct rmi_function *fn);
        int (*reset)(struct rmi_function *fn);
-       int (*attention)(struct rmi_function *fn, unsigned long *irq_bits);
+       irqreturn_t (*attention)(int irq, void *ctx);
        int (*suspend)(struct rmi_function *fn);
        int (*resume)(struct rmi_function *fn);
 };
index 7d29053dfb0f06878ff7897b59f52039a299a089..fc3ab93b7aea454475ee324eecee91470c4a9dc3 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/pm.h>
 #include <linux/slab.h>
 #include <linux/of.h>
+#include <linux/irqdomain.h>
 #include <uapi/linux/input.h>
 #include <linux/rmi.h>
 #include "rmi_bus.h"
@@ -127,28 +128,11 @@ static int rmi_driver_process_config_requests(struct rmi_device *rmi_dev)
        return 0;
 }
 
-static void process_one_interrupt(struct rmi_driver_data *data,
-                                 struct rmi_function *fn)
-{
-       struct rmi_function_handler *fh;
-
-       if (!fn || !fn->dev.driver)
-               return;
-
-       fh = to_rmi_function_handler(fn->dev.driver);
-       if (fh->attention) {
-               bitmap_and(data->fn_irq_bits, data->irq_status, fn->irq_mask,
-                               data->irq_count);
-               if (!bitmap_empty(data->fn_irq_bits, data->irq_count))
-                       fh->attention(fn, data->fn_irq_bits);
-       }
-}
-
 static int rmi_process_interrupt_requests(struct rmi_device *rmi_dev)
 {
        struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
        struct device *dev = &rmi_dev->dev;
-       struct rmi_function *entry;
+       int i;
        int error;
 
        if (!data)
@@ -173,16 +157,8 @@ static int rmi_process_interrupt_requests(struct rmi_device *rmi_dev)
         */
        mutex_unlock(&data->irq_mutex);
 
-       /*
-        * It would be nice to be able to use irq_chip to handle these
-        * nested IRQs.  Unfortunately, most of the current customers for
-        * this driver are using older kernels (3.0.x) that don't support
-        * the features required for that.  Once they've shifted to more
-        * recent kernels (say, 3.3 and higher), this should be switched to
-        * use irq_chip.
-        */
-       list_for_each_entry(entry, &data->function_list, node)
-               process_one_interrupt(data, entry);
+       for_each_set_bit(i, data->irq_status, data->irq_count)
+               handle_nested_irq(irq_find_mapping(data->irqdomain, i));
 
        if (data->input)
                input_sync(data->input);
@@ -1001,9 +977,13 @@ EXPORT_SYMBOL_GPL(rmi_driver_resume);
 static int rmi_driver_remove(struct device *dev)
 {
        struct rmi_device *rmi_dev = to_rmi_device(dev);
+       struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
 
        rmi_disable_irq(rmi_dev, false);
 
+       irq_domain_remove(data->irqdomain);
+       data->irqdomain = NULL;
+
        rmi_f34_remove_sysfs(rmi_dev);
        rmi_free_function_list(rmi_dev);
 
@@ -1035,7 +1015,8 @@ int rmi_probe_interrupts(struct rmi_driver_data *data)
 {
        struct rmi_device *rmi_dev = data->rmi_dev;
        struct device *dev = &rmi_dev->dev;
-       int irq_count;
+       struct fwnode_handle *fwnode = rmi_dev->xport->dev->fwnode;
+       int irq_count = 0;
        size_t size;
        int retval;
 
@@ -1046,7 +1027,6 @@ int rmi_probe_interrupts(struct rmi_driver_data *data)
         * being accessed.
         */
        rmi_dbg(RMI_DEBUG_CORE, dev, "%s: Counting IRQs.\n", __func__);
-       irq_count = 0;
        data->bootloader_mode = false;
 
        retval = rmi_scan_pdt(rmi_dev, &irq_count, rmi_count_irqs);
@@ -1058,6 +1038,15 @@ int rmi_probe_interrupts(struct rmi_driver_data *data)
        if (data->bootloader_mode)
                dev_warn(dev, "Device in bootloader mode.\n");
 
+       /* Allocate and register a linear revmap irq_domain */
+       data->irqdomain = irq_domain_create_linear(fwnode, irq_count,
+                                                  &irq_domain_simple_ops,
+                                                  data);
+       if (!data->irqdomain) {
+               dev_err(&rmi_dev->dev, "Failed to create IRQ domain\n");
+               return -ENOMEM;
+       }
+
        data->irq_count = irq_count;
        data->num_of_irq_regs = (data->irq_count + 7) / 8;
 
@@ -1080,10 +1069,9 @@ int rmi_init_functions(struct rmi_driver_data *data)
 {
        struct rmi_device *rmi_dev = data->rmi_dev;
        struct device *dev = &rmi_dev->dev;
-       int irq_count;
+       int irq_count = 0;
        int retval;
 
-       irq_count = 0;
        rmi_dbg(RMI_DEBUG_CORE, dev, "%s: Creating functions.\n", __func__);
        retval = rmi_scan_pdt(rmi_dev, &irq_count, rmi_create_function);
        if (retval < 0) {
index 8a07ae147df690ee7796c3f9f897904fce6ac6dd..4edaa14fe878650c81e6267550869f8acc714b40 100644 (file)
@@ -681,9 +681,9 @@ static int rmi_f01_resume(struct rmi_function *fn)
        return 0;
 }
 
-static int rmi_f01_attention(struct rmi_function *fn,
-                            unsigned long *irq_bits)
+static irqreturn_t rmi_f01_attention(int irq, void *ctx)
 {
+       struct rmi_function *fn = ctx;
        struct rmi_device *rmi_dev = fn->rmi_dev;
        int error;
        u8 device_status;
@@ -692,7 +692,7 @@ static int rmi_f01_attention(struct rmi_function *fn,
        if (error) {
                dev_err(&fn->dev,
                        "Failed to read device status: %d.\n", error);
-               return error;
+               return IRQ_RETVAL(error);
        }
 
        if (RMI_F01_STATUS_BOOTLOADER(device_status))
@@ -704,11 +704,11 @@ static int rmi_f01_attention(struct rmi_function *fn,
                error = rmi_dev->driver->reset_handler(rmi_dev);
                if (error) {
                        dev_err(&fn->dev, "Device reset failed: %d\n", error);
-                       return error;
+                       return IRQ_RETVAL(error);
                }
        }
 
-       return 0;
+       return IRQ_HANDLED;
 }
 
 struct rmi_function_handler rmi_f01_handler = {
index 88822196d6b723fcf69efd9c3b685fb92dedcf7b..aaa1edc9552254609c1e2ba00008b48bf80f3a85 100644 (file)
@@ -244,8 +244,9 @@ static int rmi_f03_config(struct rmi_function *fn)
        return 0;
 }
 
-static int rmi_f03_attention(struct rmi_function *fn, unsigned long *irq_bits)
+static irqreturn_t rmi_f03_attention(int irq, void *ctx)
 {
+       struct rmi_function *fn = ctx;
        struct rmi_device *rmi_dev = fn->rmi_dev;
        struct rmi_driver_data *drvdata = dev_get_drvdata(&rmi_dev->dev);
        struct f03_data *f03 = dev_get_drvdata(&fn->dev);
@@ -262,7 +263,7 @@ static int rmi_f03_attention(struct rmi_function *fn, unsigned long *irq_bits)
                /* First grab the data passed by the transport device */
                if (drvdata->attn_data.size < ob_len) {
                        dev_warn(&fn->dev, "F03 interrupted, but data is missing!\n");
-                       return 0;
+                       return IRQ_HANDLED;
                }
 
                memcpy(obs, drvdata->attn_data.data, ob_len);
@@ -277,7 +278,7 @@ static int rmi_f03_attention(struct rmi_function *fn, unsigned long *irq_bits)
                                "%s: Failed to read F03 output buffers: %d\n",
                                __func__, error);
                        serio_interrupt(f03->serio, 0, SERIO_TIMEOUT);
-                       return error;
+                       return IRQ_RETVAL(error);
                }
        }
 
@@ -303,7 +304,7 @@ static int rmi_f03_attention(struct rmi_function *fn, unsigned long *irq_bits)
                serio_interrupt(f03->serio, ob_data, serio_flags);
        }
 
-       return 0;
+       return IRQ_HANDLED;
 }
 
 static void rmi_f03_remove(struct rmi_function *fn)
index 12a233251793c24c754224ae1b379de52db34e7d..df64d6aed4f7e10b8eb78eb78619a15d7bcaaf56 100644 (file)
@@ -570,9 +570,7 @@ static inline u8 rmi_f11_parse_finger_state(const u8 *f_state, u8 n_finger)
 }
 
 static void rmi_f11_finger_handler(struct f11_data *f11,
-                                  struct rmi_2d_sensor *sensor,
-                                  unsigned long *irq_bits, int num_irq_regs,
-                                  int size)
+                                  struct rmi_2d_sensor *sensor, int size)
 {
        const u8 *f_state = f11->data.f_state;
        u8 finger_state;
@@ -581,12 +579,7 @@ static void rmi_f11_finger_handler(struct f11_data *f11,
        int rel_fingers;
        int abs_size = sensor->nbr_fingers * RMI_F11_ABS_BYTES;
 
-       int abs_bits = bitmap_and(f11->result_bits, irq_bits, f11->abs_mask,
-                                 num_irq_regs * 8);
-       int rel_bits = bitmap_and(f11->result_bits, irq_bits, f11->rel_mask,
-                                 num_irq_regs * 8);
-
-       if (abs_bits) {
+       if (sensor->report_abs) {
                if (abs_size > size)
                        abs_fingers = size / RMI_F11_ABS_BYTES;
                else
@@ -604,19 +597,7 @@ static void rmi_f11_finger_handler(struct f11_data *f11,
                        rmi_f11_abs_pos_process(f11, sensor, &sensor->objs[i],
                                                        finger_state, i);
                }
-       }
 
-       if (rel_bits) {
-               if ((abs_size + sensor->nbr_fingers * RMI_F11_REL_BYTES) > size)
-                       rel_fingers = (size - abs_size) / RMI_F11_REL_BYTES;
-               else
-                       rel_fingers = sensor->nbr_fingers;
-
-               for (i = 0; i < rel_fingers; i++)
-                       rmi_f11_rel_pos_report(f11, i);
-       }
-
-       if (abs_bits) {
                /*
                 * the absolute part is made in 2 parts to allow the kernel
                 * tracking to take place.
@@ -638,7 +619,16 @@ static void rmi_f11_finger_handler(struct f11_data *f11,
                }
 
                input_mt_sync_frame(sensor->input);
+       } else if (sensor->report_rel) {
+               if ((abs_size + sensor->nbr_fingers * RMI_F11_REL_BYTES) > size)
+                       rel_fingers = (size - abs_size) / RMI_F11_REL_BYTES;
+               else
+                       rel_fingers = sensor->nbr_fingers;
+
+               for (i = 0; i < rel_fingers; i++)
+                       rmi_f11_rel_pos_report(f11, i);
        }
+
 }
 
 static int f11_2d_construct_data(struct f11_data *f11)
@@ -1276,8 +1266,9 @@ static int rmi_f11_config(struct rmi_function *fn)
        return 0;
 }
 
-static int rmi_f11_attention(struct rmi_function *fn, unsigned long *irq_bits)
+static irqreturn_t rmi_f11_attention(int irq, void *ctx)
 {
+       struct rmi_function *fn = ctx;
        struct rmi_device *rmi_dev = fn->rmi_dev;
        struct rmi_driver_data *drvdata = dev_get_drvdata(&rmi_dev->dev);
        struct f11_data *f11 = dev_get_drvdata(&fn->dev);
@@ -1303,13 +1294,12 @@ static int rmi_f11_attention(struct rmi_function *fn, unsigned long *irq_bits)
                                data_base_addr, f11->sensor.data_pkt,
                                f11->sensor.pkt_size);
                if (error < 0)
-                       return error;
+                       return IRQ_RETVAL(error);
        }
 
-       rmi_f11_finger_handler(f11, &f11->sensor, irq_bits,
-                               drvdata->num_of_irq_regs, valid_bytes);
+       rmi_f11_finger_handler(f11, &f11->sensor, valid_bytes);
 
-       return 0;
+       return IRQ_HANDLED;
 }
 
 static int rmi_f11_resume(struct rmi_function *fn)
index a3d1aa88f2a9ce27fcd1f89d2f87d58b21686fce..5c7f489157792bf32da34e982b715824ec17eaff 100644 (file)
@@ -197,10 +197,10 @@ static void rmi_f12_process_objects(struct f12_data *f12, u8 *data1, int size)
                rmi_2d_sensor_abs_report(sensor, &sensor->objs[i], i);
 }
 
-static int rmi_f12_attention(struct rmi_function *fn,
-                            unsigned long *irq_nr_regs)
+static irqreturn_t rmi_f12_attention(int irq, void *ctx)
 {
        int retval;
+       struct rmi_function *fn = ctx;
        struct rmi_device *rmi_dev = fn->rmi_dev;
        struct rmi_driver_data *drvdata = dev_get_drvdata(&rmi_dev->dev);
        struct f12_data *f12 = dev_get_drvdata(&fn->dev);
@@ -222,7 +222,7 @@ static int rmi_f12_attention(struct rmi_function *fn,
                if (retval < 0) {
                        dev_err(&fn->dev, "Failed to read object data. Code: %d.\n",
                                retval);
-                       return retval;
+                       return IRQ_RETVAL(retval);
                }
        }
 
@@ -232,7 +232,7 @@ static int rmi_f12_attention(struct rmi_function *fn,
 
        input_mt_sync_frame(sensor->input);
 
-       return 0;
+       return IRQ_HANDLED;
 }
 
 static int rmi_f12_write_control_regs(struct rmi_function *fn)
index 82e0f0d43d55271c92c774ba325b1bc40099f83e..5e3ed5ac0c3e40b3919b59493293720877907f1a 100644 (file)
@@ -122,8 +122,9 @@ static void rmi_f30_report_button(struct rmi_function *fn,
        }
 }
 
-static int rmi_f30_attention(struct rmi_function *fn, unsigned long *irq_bits)
+static irqreturn_t rmi_f30_attention(int irq, void *ctx)
 {
+       struct rmi_function *fn = ctx;
        struct f30_data *f30 = dev_get_drvdata(&fn->dev);
        struct rmi_driver_data *drvdata = dev_get_drvdata(&fn->rmi_dev->dev);
        int error;
@@ -134,7 +135,7 @@ static int rmi_f30_attention(struct rmi_function *fn, unsigned long *irq_bits)
                if (drvdata->attn_data.size < f30->register_count) {
                        dev_warn(&fn->dev,
                                 "F30 interrupted, but data is missing\n");
-                       return 0;
+                       return IRQ_HANDLED;
                }
                memcpy(f30->data_regs, drvdata->attn_data.data,
                        f30->register_count);
@@ -147,7 +148,7 @@ static int rmi_f30_attention(struct rmi_function *fn, unsigned long *irq_bits)
                        dev_err(&fn->dev,
                                "%s: Failed to read F30 data registers: %d\n",
                                __func__, error);
-                       return error;
+                       return IRQ_RETVAL(error);
                }
        }
 
@@ -159,7 +160,7 @@ static int rmi_f30_attention(struct rmi_function *fn, unsigned long *irq_bits)
                        rmi_f03_commit_buttons(f30->f03);
        }
 
-       return 0;
+       return IRQ_HANDLED;
 }
 
 static int rmi_f30_config(struct rmi_function *fn)
index f1f5ac539d5d56b2d554e2aa7bdb50fd0af0d5e3..87a7d4ba382d7210b294f8168adb5083c15b80ee 100644 (file)
@@ -100,8 +100,9 @@ static int rmi_f34_command(struct f34_data *f34, u8 command,
        return 0;
 }
 
-static int rmi_f34_attention(struct rmi_function *fn, unsigned long *irq_bits)
+static irqreturn_t rmi_f34_attention(int irq, void *ctx)
 {
+       struct rmi_function *fn = ctx;
        struct f34_data *f34 = dev_get_drvdata(&fn->dev);
        int ret;
        u8 status;
@@ -126,7 +127,7 @@ static int rmi_f34_attention(struct rmi_function *fn, unsigned long *irq_bits)
                        complete(&f34->v7.cmd_done);
        }
 
-       return 0;
+       return IRQ_HANDLED;
 }
 
 static int rmi_f34_write_blocks(struct f34_data *f34, const void *data,
index e8a59d1640192b75e6f83db0e2ad355064c19ff9..a6f515bcab2228a8783f10dbf10fae30462fd852 100644 (file)
@@ -610,11 +610,6 @@ error:
        mutex_unlock(&f54->data_mutex);
 }
 
-static int rmi_f54_attention(struct rmi_function *fn, unsigned long *irqbits)
-{
-       return 0;
-}
-
 static int rmi_f54_config(struct rmi_function *fn)
 {
        struct rmi_driver *drv = fn->rmi_dev->driver;
@@ -756,6 +751,5 @@ struct rmi_function_handler rmi_f54_handler = {
        .func = 0x54,
        .probe = rmi_f54_probe,
        .config = rmi_f54_config,
-       .attention = rmi_f54_attention,
        .remove = rmi_f54_remove,
 };
index b353d494ad404888bd2884527fe771937cb1416f..136f6e7bf797767256e66c1c083cb80c55cd7a1b 100644 (file)
@@ -527,6 +527,13 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "N24_25BU"),
                },
        },
+       {
+               /* Lenovo LaVie Z */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+                       DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo LaVie Z"),
+               },
+       },
        { }
 };
 
index ff7043f74a3d32286a6b8cdbed91f1bc3f0be12f..d196ac3d8b8cda8e1cf405101ed5603473db821d 100644 (file)
@@ -603,6 +603,7 @@ static const struct acpi_device_id silead_ts_acpi_match[] = {
        { "GSL3692", 0 },
        { "MSSL1680", 0 },
        { "MSSL0001", 0 },
+       { "MSSL0002", 0 },
        { }
 };
 MODULE_DEVICE_TABLE(acpi, silead_ts_acpi_match);
index e055d228bfb94057893a8a080dd7bbc709aeb6bf..689ffe5383706dd062cd9ce7aac9bc631c7656ba 100644 (file)
@@ -142,7 +142,6 @@ config DMAR_TABLE
 config INTEL_IOMMU
        bool "Support for Intel IOMMU using DMA Remapping Devices"
        depends on PCI_MSI && ACPI && (X86 || IA64_GENERIC)
-       select DMA_DIRECT_OPS
        select IOMMU_API
        select IOMMU_IOVA
        select NEED_DMA_MAP_STATE
index 14e4b37224284976a1cb8890e5d13ae5337350cc..115ff26e9cede3494a75d59ee7c87655c72f4090 100644 (file)
@@ -31,7 +31,6 @@
 #include <linux/pci.h>
 #include <linux/dmar.h>
 #include <linux/dma-mapping.h>
-#include <linux/dma-direct.h>
 #include <linux/mempool.h>
 #include <linux/memory.h>
 #include <linux/cpu.h>
@@ -485,14 +484,37 @@ static int dmar_forcedac;
 static int intel_iommu_strict;
 static int intel_iommu_superpage = 1;
 static int intel_iommu_ecs = 1;
+static int intel_iommu_pasid28;
 static int iommu_identity_mapping;
 
 #define IDENTMAP_ALL           1
 #define IDENTMAP_GFX           2
 #define IDENTMAP_AZALIA                4
 
-#define ecs_enabled(iommu)     (intel_iommu_ecs && ecap_ecs(iommu->ecap))
-#define pasid_enabled(iommu)   (ecs_enabled(iommu) && ecap_pasid(iommu->ecap))
+/* Broadwell and Skylake have broken ECS support — normal so-called "second
+ * level" translation of DMA requests-without-PASID doesn't actually happen
+ * unless you also set the NESTE bit in an extended context-entry. Which of
+ * course means that SVM doesn't work because it's trying to do nested
+ * translation of the physical addresses it finds in the process page tables,
+ * through the IOVA->phys mapping found in the "second level" page tables.
+ *
+ * The VT-d specification was retroactively changed to change the definition
+ * of the capability bits and pretend that Broadwell/Skylake never happened...
+ * but unfortunately the wrong bit was changed. It's ECS which is broken, but
+ * for some reason it was the PASID capability bit which was redefined (from
+ * bit 28 on BDW/SKL to bit 40 in future).
+ *
+ * So our test for ECS needs to eschew those implementations which set the old
+ * PASID capabiity bit 28, since those are the ones on which ECS is broken.
+ * Unless we are working around the 'pasid28' limitations, that is, by putting
+ * the device into passthrough mode for normal DMA and thus masking the bug.
+ */
+#define ecs_enabled(iommu) (intel_iommu_ecs && ecap_ecs(iommu->ecap) && \
+                           (intel_iommu_pasid28 || !ecap_broken_pasid(iommu->ecap)))
+/* PASID support is thus enabled if ECS is enabled and *either* of the old
+ * or new capability bits are set. */
+#define pasid_enabled(iommu) (ecs_enabled(iommu) &&                    \
+                             (ecap_pasid(iommu->ecap) || ecap_broken_pasid(iommu->ecap)))
 
 int intel_iommu_gfx_mapped;
 EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
@@ -555,6 +577,11 @@ static int __init intel_iommu_setup(char *str)
                        printk(KERN_INFO
                                "Intel-IOMMU: disable extended context table support\n");
                        intel_iommu_ecs = 0;
+               } else if (!strncmp(str, "pasid28", 7)) {
+                       printk(KERN_INFO
+                               "Intel-IOMMU: enable pre-production PASID support\n");
+                       intel_iommu_pasid28 = 1;
+                       iommu_identity_mapping |= IDENTMAP_GFX;
                } else if (!strncmp(str, "tboot_noforce", 13)) {
                        printk(KERN_INFO
                                "Intel-IOMMU: not forcing on after tboot. This could expose security risk for tboot\n");
@@ -3713,30 +3740,61 @@ static void *intel_alloc_coherent(struct device *dev, size_t size,
                                  dma_addr_t *dma_handle, gfp_t flags,
                                  unsigned long attrs)
 {
-       void *vaddr;
+       struct page *page = NULL;
+       int order;
 
-       vaddr = dma_direct_alloc(dev, size, dma_handle, flags, attrs);
-       if (iommu_no_mapping(dev) || !vaddr)
-               return vaddr;
+       size = PAGE_ALIGN(size);
+       order = get_order(size);
 
-       *dma_handle = __intel_map_single(dev, virt_to_phys(vaddr),
-                       PAGE_ALIGN(size), DMA_BIDIRECTIONAL,
-                       dev->coherent_dma_mask);
-       if (!*dma_handle)
-               goto out_free_pages;
-       return vaddr;
+       if (!iommu_no_mapping(dev))
+               flags &= ~(GFP_DMA | GFP_DMA32);
+       else if (dev->coherent_dma_mask < dma_get_required_mask(dev)) {
+               if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
+                       flags |= GFP_DMA;
+               else
+                       flags |= GFP_DMA32;
+       }
+
+       if (gfpflags_allow_blocking(flags)) {
+               unsigned int count = size >> PAGE_SHIFT;
+
+               page = dma_alloc_from_contiguous(dev, count, order, flags);
+               if (page && iommu_no_mapping(dev) &&
+                   page_to_phys(page) + size > dev->coherent_dma_mask) {
+                       dma_release_from_contiguous(dev, page, count);
+                       page = NULL;
+               }
+       }
+
+       if (!page)
+               page = alloc_pages(flags, order);
+       if (!page)
+               return NULL;
+       memset(page_address(page), 0, size);
+
+       *dma_handle = __intel_map_single(dev, page_to_phys(page), size,
+                                        DMA_BIDIRECTIONAL,
+                                        dev->coherent_dma_mask);
+       if (*dma_handle)
+               return page_address(page);
+       if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
+               __free_pages(page, order);
 
-out_free_pages:
-       dma_direct_free(dev, size, vaddr, *dma_handle, attrs);
        return NULL;
 }
 
 static void intel_free_coherent(struct device *dev, size_t size, void *vaddr,
                                dma_addr_t dma_handle, unsigned long attrs)
 {
-       if (!iommu_no_mapping(dev))
-               intel_unmap(dev, dma_handle, PAGE_ALIGN(size));
-       dma_direct_free(dev, size, vaddr, dma_handle, attrs);
+       int order;
+       struct page *page = virt_to_page(vaddr);
+
+       size = PAGE_ALIGN(size);
+       order = get_order(size);
+
+       intel_unmap(dev, dma_handle, size);
+       if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
+               __free_pages(page, order);
 }
 
 static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist,
index e9233db16e039f08c43083eb99cc4bf3a2d744ae..d564d21245c5c3aa7ccafc39dc0964170e1de6e0 100644 (file)
@@ -8,7 +8,7 @@ config ARM_GIC
        bool
        select IRQ_DOMAIN
        select IRQ_DOMAIN_HIERARCHY
-       select MULTI_IRQ_HANDLER
+       select GENERIC_IRQ_MULTI_HANDLER
        select GENERIC_IRQ_EFFECTIVE_AFF_MASK
 
 config ARM_GIC_PM
@@ -34,7 +34,7 @@ config GIC_NON_BANKED
 config ARM_GIC_V3
        bool
        select IRQ_DOMAIN
-       select MULTI_IRQ_HANDLER
+       select GENERIC_IRQ_MULTI_HANDLER
        select IRQ_DOMAIN_HIERARCHY
        select PARTITION_PERCPU
        select GENERIC_IRQ_EFFECTIVE_AFF_MASK
@@ -66,7 +66,7 @@ config ARM_NVIC
 config ARM_VIC
        bool
        select IRQ_DOMAIN
-       select MULTI_IRQ_HANDLER
+       select GENERIC_IRQ_MULTI_HANDLER
 
 config ARM_VIC_NR
        int
@@ -93,14 +93,14 @@ config ATMEL_AIC_IRQ
        bool
        select GENERIC_IRQ_CHIP
        select IRQ_DOMAIN
-       select MULTI_IRQ_HANDLER
+       select GENERIC_IRQ_MULTI_HANDLER
        select SPARSE_IRQ
 
 config ATMEL_AIC5_IRQ
        bool
        select GENERIC_IRQ_CHIP
        select IRQ_DOMAIN
-       select MULTI_IRQ_HANDLER
+       select GENERIC_IRQ_MULTI_HANDLER
        select SPARSE_IRQ
 
 config I8259
@@ -137,7 +137,7 @@ config DW_APB_ICTL
 config FARADAY_FTINTC010
        bool
        select IRQ_DOMAIN
-       select MULTI_IRQ_HANDLER
+       select GENERIC_IRQ_MULTI_HANDLER
        select SPARSE_IRQ
 
 config HISILICON_IRQ_MBIGEN
@@ -162,7 +162,7 @@ config CLPS711X_IRQCHIP
        bool
        depends on ARCH_CLPS711X
        select IRQ_DOMAIN
-       select MULTI_IRQ_HANDLER
+       select GENERIC_IRQ_MULTI_HANDLER
        select SPARSE_IRQ
        default y
 
@@ -181,7 +181,7 @@ config OMAP_IRQCHIP
 config ORION_IRQCHIP
        bool
        select IRQ_DOMAIN
-       select MULTI_IRQ_HANDLER
+       select GENERIC_IRQ_MULTI_HANDLER
 
 config PIC32_EVIC
        bool
index 0f52d44b3f6997c8c9e4e6f6f1a7da7b43d3e7c5..f5fe0100f9ffd043d251d96ce473775bfdafd3b4 100644 (file)
@@ -199,7 +199,7 @@ static int gicv2m_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
 
 fail:
        irq_domain_free_irqs_parent(domain, virq, nr_irqs);
-       gicv2m_unalloc_msi(v2m, hwirq, get_count_order(nr_irqs));
+       gicv2m_unalloc_msi(v2m, hwirq, nr_irqs);
        return err;
 }
 
index 4eca5c763766b50824734ae9af88da824badfeb2..606efa64adff5bb58d2f0f40ab4067aa14bb2f48 100644 (file)
@@ -45,6 +45,9 @@ static int its_fsl_mc_msi_prepare(struct irq_domain *msi_domain,
         */
        info->scratchpad[0].ul = mc_bus_dev->icid;
        msi_info = msi_get_domain_info(msi_domain->parent);
+
+       /* Allocate at least 32 MSIs, and always as a power of 2 */
+       nvec = max_t(int, 32, roundup_pow_of_two(nvec));
        return msi_info->ops->msi_prepare(msi_domain->parent, dev, nvec, info);
 }
 
index 25a98de5cfb2831fa61d33fd3be3ef30d3f4e534..8d6d009d1d586f9271c508138bdbc55c064c8f9e 100644 (file)
@@ -66,7 +66,7 @@ static int its_pci_msi_prepare(struct irq_domain *domain, struct device *dev,
 {
        struct pci_dev *pdev, *alias_dev;
        struct msi_domain_info *msi_info;
-       int alias_count = 0;
+       int alias_count = 0, minnvec = 1;
 
        if (!dev_is_pci(dev))
                return -EINVAL;
@@ -86,8 +86,18 @@ static int its_pci_msi_prepare(struct irq_domain *domain, struct device *dev,
        /* ITS specific DeviceID, as the core ITS ignores dev. */
        info->scratchpad[0].ul = pci_msi_domain_get_msi_rid(domain, pdev);
 
-       return msi_info->ops->msi_prepare(domain->parent,
-                                         dev, max(nvec, alias_count), info);
+       /*
+        * Always allocate a power of 2, and special case device 0 for
+        * broken systems where the DevID is not wired (and all devices
+        * appear as DevID 0). For that reason, we generously allocate a
+        * minimum of 32 MSIs for DevID 0. If you want more because all
+        * your devices are aliasing to DevID 0, consider fixing your HW.
+        */
+       nvec = max(nvec, alias_count);
+       if (!info->scratchpad[0].ul)
+               minnvec = 32;
+       nvec = max_t(int, minnvec, roundup_pow_of_two(nvec));
+       return msi_info->ops->msi_prepare(domain->parent, dev, nvec, info);
 }
 
 static struct msi_domain_ops its_pci_msi_ops = {
index 8881a053c173edfdb11ad322b9b60f9b61ef57aa..7b8e87b493fe5defd6ebd5968f92c856c8fdcc8c 100644 (file)
@@ -73,6 +73,8 @@ static int its_pmsi_prepare(struct irq_domain *domain, struct device *dev,
        /* ITS specific DeviceID, as the core ITS ignores dev. */
        info->scratchpad[0].ul = dev_id;
 
+       /* Allocate at least 32 MSIs, and always as a power of 2 */
+       nvec = max_t(int, 32, roundup_pow_of_two(nvec));
        return msi_info->ops->msi_prepare(domain->parent,
                                          dev, nvec, info);
 }
index 5377d7e2afba62b518671267b5d29c4963c2e5e6..316a57530f6d108ace5345f0e4077e8f20771924 100644 (file)
@@ -23,6 +23,8 @@
 #include <linux/dma-iommu.h>
 #include <linux/interrupt.h>
 #include <linux/irqdomain.h>
+#include <linux/list.h>
+#include <linux/list_sort.h>
 #include <linux/log2.h>
 #include <linux/mm.h>
 #include <linux/msi.h>
@@ -160,7 +162,7 @@ static struct {
 } vpe_proxy;
 
 static LIST_HEAD(its_nodes);
-static DEFINE_SPINLOCK(its_lock);
+static DEFINE_RAW_SPINLOCK(its_lock);
 static struct rdists *gic_rdists;
 static struct irq_domain *its_parent;
 
@@ -182,6 +184,22 @@ static struct its_collection *dev_event_to_col(struct its_device *its_dev,
        return its->collections + its_dev->event_map.col_map[event];
 }
 
+static struct its_collection *valid_col(struct its_collection *col)
+{
+       if (WARN_ON_ONCE(col->target_address & GENMASK_ULL(0, 15)))
+               return NULL;
+
+       return col;
+}
+
+static struct its_vpe *valid_vpe(struct its_node *its, struct its_vpe *vpe)
+{
+       if (valid_col(its->collections + vpe->col_idx))
+               return vpe;
+
+       return NULL;
+}
+
 /*
  * ITS command descriptors - parameters to be encoded in a command
  * block.
@@ -439,7 +457,7 @@ static struct its_collection *its_build_mapti_cmd(struct its_node *its,
 
        its_fixup_cmd(cmd);
 
-       return col;
+       return valid_col(col);
 }
 
 static struct its_collection *its_build_movi_cmd(struct its_node *its,
@@ -458,7 +476,7 @@ static struct its_collection *its_build_movi_cmd(struct its_node *its,
 
        its_fixup_cmd(cmd);
 
-       return col;
+       return valid_col(col);
 }
 
 static struct its_collection *its_build_discard_cmd(struct its_node *its,
@@ -476,7 +494,7 @@ static struct its_collection *its_build_discard_cmd(struct its_node *its,
 
        its_fixup_cmd(cmd);
 
-       return col;
+       return valid_col(col);
 }
 
 static struct its_collection *its_build_inv_cmd(struct its_node *its,
@@ -494,7 +512,7 @@ static struct its_collection *its_build_inv_cmd(struct its_node *its,
 
        its_fixup_cmd(cmd);
 
-       return col;
+       return valid_col(col);
 }
 
 static struct its_collection *its_build_int_cmd(struct its_node *its,
@@ -512,7 +530,7 @@ static struct its_collection *its_build_int_cmd(struct its_node *its,
 
        its_fixup_cmd(cmd);
 
-       return col;
+       return valid_col(col);
 }
 
 static struct its_collection *its_build_clear_cmd(struct its_node *its,
@@ -530,7 +548,7 @@ static struct its_collection *its_build_clear_cmd(struct its_node *its,
 
        its_fixup_cmd(cmd);
 
-       return col;
+       return valid_col(col);
 }
 
 static struct its_collection *its_build_invall_cmd(struct its_node *its,
@@ -554,7 +572,7 @@ static struct its_vpe *its_build_vinvall_cmd(struct its_node *its,
 
        its_fixup_cmd(cmd);
 
-       return desc->its_vinvall_cmd.vpe;
+       return valid_vpe(its, desc->its_vinvall_cmd.vpe);
 }
 
 static struct its_vpe *its_build_vmapp_cmd(struct its_node *its,
@@ -576,7 +594,7 @@ static struct its_vpe *its_build_vmapp_cmd(struct its_node *its,
 
        its_fixup_cmd(cmd);
 
-       return desc->its_vmapp_cmd.vpe;
+       return valid_vpe(its, desc->its_vmapp_cmd.vpe);
 }
 
 static struct its_vpe *its_build_vmapti_cmd(struct its_node *its,
@@ -599,7 +617,7 @@ static struct its_vpe *its_build_vmapti_cmd(struct its_node *its,
 
        its_fixup_cmd(cmd);
 
-       return desc->its_vmapti_cmd.vpe;
+       return valid_vpe(its, desc->its_vmapti_cmd.vpe);
 }
 
 static struct its_vpe *its_build_vmovi_cmd(struct its_node *its,
@@ -622,7 +640,7 @@ static struct its_vpe *its_build_vmovi_cmd(struct its_node *its,
 
        its_fixup_cmd(cmd);
 
-       return desc->its_vmovi_cmd.vpe;
+       return valid_vpe(its, desc->its_vmovi_cmd.vpe);
 }
 
 static struct its_vpe *its_build_vmovp_cmd(struct its_node *its,
@@ -640,7 +658,7 @@ static struct its_vpe *its_build_vmovp_cmd(struct its_node *its,
 
        its_fixup_cmd(cmd);
 
-       return desc->its_vmovp_cmd.vpe;
+       return valid_vpe(its, desc->its_vmovp_cmd.vpe);
 }
 
 static u64 its_cmd_ptr_to_offset(struct its_node *its,
@@ -1405,112 +1423,176 @@ static struct irq_chip its_irq_chip = {
        .irq_set_vcpu_affinity  = its_irq_set_vcpu_affinity,
 };
 
+
 /*
  * How we allocate LPIs:
  *
- * The GIC has id_bits bits for interrupt identifiers. From there, we
- * must subtract 8192 which are reserved for SGIs/PPIs/SPIs. Then, as
- * we allocate LPIs by chunks of 32, we can shift the whole thing by 5
- * bits to the right.
+ * lpi_range_list contains ranges of LPIs that are to available to
+ * allocate from. To allocate LPIs, just pick the first range that
+ * fits the required allocation, and reduce it by the required
+ * amount. Once empty, remove the range from the list.
  *
- * This gives us (((1UL << id_bits) - 8192) >> 5) possible allocations.
+ * To free a range of LPIs, add a free range to the list, sort it and
+ * merge the result if the new range happens to be adjacent to an
+ * already free block.
+ *
+ * The consequence of the above is that allocation is cost is low, but
+ * freeing is expensive. We assumes that freeing rarely occurs.
  */
-#define IRQS_PER_CHUNK_SHIFT   5
-#define IRQS_PER_CHUNK         (1UL << IRQS_PER_CHUNK_SHIFT)
-#define ITS_MAX_LPI_NRBITS     16 /* 64K LPIs */
 
-static unsigned long *lpi_bitmap;
-static u32 lpi_chunks;
-static DEFINE_SPINLOCK(lpi_lock);
+static DEFINE_MUTEX(lpi_range_lock);
+static LIST_HEAD(lpi_range_list);
+
+struct lpi_range {
+       struct list_head        entry;
+       u32                     base_id;
+       u32                     span;
+};
+
+static struct lpi_range *mk_lpi_range(u32 base, u32 span)
+{
+       struct lpi_range *range;
+
+       range = kzalloc(sizeof(*range), GFP_KERNEL);
+       if (range) {
+               INIT_LIST_HEAD(&range->entry);
+               range->base_id = base;
+               range->span = span;
+       }
 
-static int its_lpi_to_chunk(int lpi)
+       return range;
+}
+
+static int lpi_range_cmp(void *priv, struct list_head *a, struct list_head *b)
 {
-       return (lpi - 8192) >> IRQS_PER_CHUNK_SHIFT;
+       struct lpi_range *ra, *rb;
+
+       ra = container_of(a, struct lpi_range, entry);
+       rb = container_of(b, struct lpi_range, entry);
+
+       return rb->base_id - ra->base_id;
 }
 
-static int its_chunk_to_lpi(int chunk)
+static void merge_lpi_ranges(void)
 {
-       return (chunk << IRQS_PER_CHUNK_SHIFT) + 8192;
+       struct lpi_range *range, *tmp;
+
+       list_for_each_entry_safe(range, tmp, &lpi_range_list, entry) {
+               if (!list_is_last(&range->entry, &lpi_range_list) &&
+                   (tmp->base_id == (range->base_id + range->span))) {
+                       tmp->base_id = range->base_id;
+                       tmp->span += range->span;
+                       list_del(&range->entry);
+                       kfree(range);
+               }
+       }
 }
 
-static int __init its_lpi_init(u32 id_bits)
+static int alloc_lpi_range(u32 nr_lpis, u32 *base)
 {
-       lpi_chunks = its_lpi_to_chunk(1UL << id_bits);
+       struct lpi_range *range, *tmp;
+       int err = -ENOSPC;
 
-       lpi_bitmap = kcalloc(BITS_TO_LONGS(lpi_chunks), sizeof(long),
-                            GFP_KERNEL);
-       if (!lpi_bitmap) {
-               lpi_chunks = 0;
-               return -ENOMEM;
+       mutex_lock(&lpi_range_lock);
+
+       list_for_each_entry_safe(range, tmp, &lpi_range_list, entry) {
+               if (range->span >= nr_lpis) {
+                       *base = range->base_id;
+                       range->base_id += nr_lpis;
+                       range->span -= nr_lpis;
+
+                       if (range->span == 0) {
+                               list_del(&range->entry);
+                               kfree(range);
+                       }
+
+                       err = 0;
+                       break;
+               }
        }
 
-       pr_info("ITS: Allocated %d chunks for LPIs\n", (int)lpi_chunks);
-       return 0;
+       mutex_unlock(&lpi_range_lock);
+
+       pr_debug("ITS: alloc %u:%u\n", *base, nr_lpis);
+       return err;
 }
 
-static unsigned long *its_lpi_alloc_chunks(int nr_irqs, int *base, int *nr_ids)
+static int free_lpi_range(u32 base, u32 nr_lpis)
 {
-       unsigned long *bitmap = NULL;
-       int chunk_id;
-       int nr_chunks;
-       int i;
+       struct lpi_range *new;
+       int err = 0;
 
-       nr_chunks = DIV_ROUND_UP(nr_irqs, IRQS_PER_CHUNK);
+       mutex_lock(&lpi_range_lock);
 
-       spin_lock(&lpi_lock);
+       new = mk_lpi_range(base, nr_lpis);
+       if (!new) {
+               err = -ENOMEM;
+               goto out;
+       }
+
+       list_add(&new->entry, &lpi_range_list);
+       list_sort(NULL, &lpi_range_list, lpi_range_cmp);
+       merge_lpi_ranges();
+out:
+       mutex_unlock(&lpi_range_lock);
+       return err;
+}
+
+static int __init its_lpi_init(u32 id_bits)
+{
+       u32 lpis = (1UL << id_bits) - 8192;
+       u32 numlpis;
+       int err;
+
+       numlpis = 1UL << GICD_TYPER_NUM_LPIS(gic_rdists->gicd_typer);
+
+       if (numlpis > 2 && !WARN_ON(numlpis > lpis)) {
+               lpis = numlpis;
+               pr_info("ITS: Using hypervisor restricted LPI range [%u]\n",
+                       lpis);
+       }
+
+       /*
+        * Initializing the allocator is just the same as freeing the
+        * full range of LPIs.
+        */
+       err = free_lpi_range(8192, lpis);
+       pr_debug("ITS: Allocator initialized for %u LPIs\n", lpis);
+       return err;
+}
+
+static unsigned long *its_lpi_alloc(int nr_irqs, u32 *base, int *nr_ids)
+{
+       unsigned long *bitmap = NULL;
+       int err = 0;
 
        do {
-               chunk_id = bitmap_find_next_zero_area(lpi_bitmap, lpi_chunks,
-                                                     0, nr_chunks, 0);
-               if (chunk_id < lpi_chunks)
+               err = alloc_lpi_range(nr_irqs, base);
+               if (!err)
                        break;
 
-               nr_chunks--;
-       } while (nr_chunks > 0);
+               nr_irqs /= 2;
+       } while (nr_irqs > 0);
 
-       if (!nr_chunks)
+       if (err)
                goto out;
 
-       bitmap = kcalloc(BITS_TO_LONGS(nr_chunks * IRQS_PER_CHUNK),
-                        sizeof(long),
-                        GFP_ATOMIC);
+       bitmap = kcalloc(BITS_TO_LONGS(nr_irqs), sizeof (long), GFP_ATOMIC);
        if (!bitmap)
                goto out;
 
-       for (i = 0; i < nr_chunks; i++)
-               set_bit(chunk_id + i, lpi_bitmap);
-
-       *base = its_chunk_to_lpi(chunk_id);
-       *nr_ids = nr_chunks * IRQS_PER_CHUNK;
+       *nr_ids = nr_irqs;
 
 out:
-       spin_unlock(&lpi_lock);
-
        if (!bitmap)
                *base = *nr_ids = 0;
 
        return bitmap;
 }
 
-static void its_lpi_free_chunks(unsigned long *bitmap, int base, int nr_ids)
+static void its_lpi_free(unsigned long *bitmap, u32 base, u32 nr_ids)
 {
-       int lpi;
-
-       spin_lock(&lpi_lock);
-
-       for (lpi = base; lpi < (base + nr_ids); lpi += IRQS_PER_CHUNK) {
-               int chunk = its_lpi_to_chunk(lpi);
-
-               BUG_ON(chunk > lpi_chunks);
-               if (test_bit(chunk, lpi_bitmap)) {
-                       clear_bit(chunk, lpi_bitmap);
-               } else {
-                       pr_err("Bad LPI chunk %d\n", chunk);
-               }
-       }
-
-       spin_unlock(&lpi_lock);
-
+       WARN_ON(free_lpi_range(base, nr_ids));
        kfree(bitmap);
 }
 
@@ -1543,7 +1625,7 @@ static int __init its_alloc_lpi_tables(void)
 {
        phys_addr_t paddr;
 
-       lpi_id_bits = min_t(u32, gic_rdists->id_bits, ITS_MAX_LPI_NRBITS);
+       lpi_id_bits = GICD_TYPER_ID_BITS(gic_rdists->gicd_typer);
        gic_rdists->prop_page = its_allocate_prop_table(GFP_NOWAIT);
        if (!gic_rdists->prop_page) {
                pr_err("Failed to allocate PROPBASE\n");
@@ -1824,11 +1906,16 @@ static int its_alloc_tables(struct its_node *its)
 
 static int its_alloc_collections(struct its_node *its)
 {
+       int i;
+
        its->collections = kcalloc(nr_cpu_ids, sizeof(*its->collections),
                                   GFP_KERNEL);
        if (!its->collections)
                return -ENOMEM;
 
+       for (i = 0; i < nr_cpu_ids; i++)
+               its->collections[i].target_address = ~0ULL;
+
        return 0;
 }
 
@@ -1976,12 +2063,12 @@ static void its_cpu_init_collections(void)
 {
        struct its_node *its;
 
-       spin_lock(&its_lock);
+       raw_spin_lock(&its_lock);
 
        list_for_each_entry(its, &its_nodes, entry)
                its_cpu_init_collection(its);
 
-       spin_unlock(&its_lock);
+       raw_spin_unlock(&its_lock);
 }
 
 static struct its_device *its_find_device(struct its_node *its, u32 dev_id)
@@ -2113,17 +2200,20 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
        if (!its_alloc_device_table(its, dev_id))
                return NULL;
 
+       if (WARN_ON(!is_power_of_2(nvecs)))
+               nvecs = roundup_pow_of_two(nvecs);
+
        dev = kzalloc(sizeof(*dev), GFP_KERNEL);
        /*
-        * We allocate at least one chunk worth of LPIs bet device,
-        * and thus that many ITEs. The device may require less though.
+        * Even if the device wants a single LPI, the ITT must be
+        * sized as a power of two (and you need at least one bit...).
         */
-       nr_ites = max(IRQS_PER_CHUNK, roundup_pow_of_two(nvecs));
+       nr_ites = max(2, nvecs);
        sz = nr_ites * its->ite_size;
        sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1;
        itt = kzalloc(sz, GFP_KERNEL);
        if (alloc_lpis) {
-               lpi_map = its_lpi_alloc_chunks(nvecs, &lpi_base, &nr_lpis);
+               lpi_map = its_lpi_alloc(nvecs, &lpi_base, &nr_lpis);
                if (lpi_map)
                        col_map = kcalloc(nr_lpis, sizeof(*col_map),
                                          GFP_KERNEL);
@@ -2310,7 +2400,14 @@ static int its_irq_domain_activate(struct irq_domain *domain,
                cpu_mask = cpumask_of_node(its_dev->its->numa_node);
 
        /* Bind the LPI to the first possible CPU */
-       cpu = cpumask_first(cpu_mask);
+       cpu = cpumask_first_and(cpu_mask, cpu_online_mask);
+       if (cpu >= nr_cpu_ids) {
+               if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144)
+                       return -EINVAL;
+
+               cpu = cpumask_first(cpu_online_mask);
+       }
+
        its_dev->event_map.col_map[event] = cpu;
        irq_data_update_effective_affinity(d, cpumask_of(cpu));
 
@@ -2351,9 +2448,9 @@ static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq,
        /* If all interrupts have been freed, start mopping the floor */
        if (bitmap_empty(its_dev->event_map.lpi_map,
                         its_dev->event_map.nr_lpis)) {
-               its_lpi_free_chunks(its_dev->event_map.lpi_map,
-                                   its_dev->event_map.lpi_base,
-                                   its_dev->event_map.nr_lpis);
+               its_lpi_free(its_dev->event_map.lpi_map,
+                            its_dev->event_map.lpi_base,
+                            its_dev->event_map.nr_lpis);
                kfree(its_dev->event_map.col_map);
 
                /* Unmap device/itt */
@@ -2752,7 +2849,7 @@ static void its_vpe_irq_domain_free(struct irq_domain *domain,
        }
 
        if (bitmap_empty(vm->db_bitmap, vm->nr_db_lpis)) {
-               its_lpi_free_chunks(vm->db_bitmap, vm->db_lpi_base, vm->nr_db_lpis);
+               its_lpi_free(vm->db_bitmap, vm->db_lpi_base, vm->nr_db_lpis);
                its_free_prop_table(vm->vprop_page);
        }
 }
@@ -2767,18 +2864,18 @@ static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq
 
        BUG_ON(!vm);
 
-       bitmap = its_lpi_alloc_chunks(nr_irqs, &base, &nr_ids);
+       bitmap = its_lpi_alloc(roundup_pow_of_two(nr_irqs), &base, &nr_ids);
        if (!bitmap)
                return -ENOMEM;
 
        if (nr_ids < nr_irqs) {
-               its_lpi_free_chunks(bitmap, base, nr_ids);
+               its_lpi_free(bitmap, base, nr_ids);
                return -ENOMEM;
        }
 
        vprop_page = its_allocate_prop_table(GFP_KERNEL);
        if (!vprop_page) {
-               its_lpi_free_chunks(bitmap, base, nr_ids);
+               its_lpi_free(bitmap, base, nr_ids);
                return -ENOMEM;
        }
 
@@ -2805,7 +2902,7 @@ static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq
                if (i > 0)
                        its_vpe_irq_domain_free(domain, virq, i - 1);
 
-               its_lpi_free_chunks(bitmap, base, nr_ids);
+               its_lpi_free(bitmap, base, nr_ids);
                its_free_prop_table(vprop_page);
        }
 
@@ -3042,7 +3139,7 @@ static int its_save_disable(void)
        struct its_node *its;
        int err = 0;
 
-       spin_lock(&its_lock);
+       raw_spin_lock(&its_lock);
        list_for_each_entry(its, &its_nodes, entry) {
                void __iomem *base;
 
@@ -3074,7 +3171,7 @@ err:
                        writel_relaxed(its->ctlr_save, base + GITS_CTLR);
                }
        }
-       spin_unlock(&its_lock);
+       raw_spin_unlock(&its_lock);
 
        return err;
 }
@@ -3084,7 +3181,7 @@ static void its_restore_enable(void)
        struct its_node *its;
        int ret;
 
-       spin_lock(&its_lock);
+       raw_spin_lock(&its_lock);
        list_for_each_entry(its, &its_nodes, entry) {
                void __iomem *base;
                int i;
@@ -3136,7 +3233,7 @@ static void its_restore_enable(void)
                    GITS_TYPER_HCC(gic_read_typer(base + GITS_TYPER)))
                        its_cpu_init_collection(its);
        }
-       spin_unlock(&its_lock);
+       raw_spin_unlock(&its_lock);
 }
 
 static struct syscore_ops its_syscore_ops = {
@@ -3370,9 +3467,9 @@ static int __init its_probe_one(struct resource *res,
        if (err)
                goto out_free_tables;
 
-       spin_lock(&its_lock);
+       raw_spin_lock(&its_lock);
        list_add(&its->entry, &its_nodes);
-       spin_unlock(&its_lock);
+       raw_spin_unlock(&its_lock);
 
        return 0;
 
@@ -3399,6 +3496,16 @@ static int redist_disable_lpis(void)
        u64 timeout = USEC_PER_SEC;
        u64 val;
 
+       /*
+        * If coming via a CPU hotplug event, we don't need to disable
+        * LPIs before trying to re-enable them. They are already
+        * configured and all is well in the world. Detect this case
+        * by checking the allocation of the pending table for the
+        * current CPU.
+        */
+       if (gic_data_rdist()->pend_page)
+               return 0;
+
        if (!gic_rdists_supports_plpis()) {
                pr_info("CPU%d: LPIs not supported\n", smp_processor_id());
                return -ENXIO;
index 76ea56d779a15825654cdf13573ac263fc14d717..e214181b77b7720568c072c0b0cb07bc81766565 100644 (file)
@@ -877,7 +877,7 @@ static struct irq_chip gic_eoimode1_chip = {
        .flags                  = IRQCHIP_SET_TYPE_MASKED,
 };
 
-#define GIC_ID_NR              (1U << gic_data.rdists.id_bits)
+#define GIC_ID_NR      (1U << GICD_TYPER_ID_BITS(gic_data.rdists.gicd_typer))
 
 static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
                              irq_hw_number_t hw)
@@ -1091,7 +1091,7 @@ static int __init gic_init_bases(void __iomem *dist_base,
         * The GIC only supports up to 1020 interrupt sources (SGI+PPI+SPI)
         */
        typer = readl_relaxed(gic_data.dist_base + GICD_TYPER);
-       gic_data.rdists.id_bits = GICD_TYPER_ID_BITS(typer);
+       gic_data.rdists.gicd_typer = typer;
        gic_irqs = GICD_TYPER_IRQS(typer);
        if (gic_irqs > 1020)
                gic_irqs = 1020;
index fc5953dea509af10b38abf783dac4fbd83efdaaa..2ff08986b536195ca97eab893d31a594abc2490b 100644 (file)
@@ -165,6 +165,7 @@ static int __init intc_1chip_of_init(struct device_node *node,
        return ingenic_intc_of_init(node, 1);
 }
 IRQCHIP_DECLARE(jz4740_intc, "ingenic,jz4740-intc", intc_1chip_of_init);
+IRQCHIP_DECLARE(jz4725b_intc, "ingenic,jz4725b-intc", intc_1chip_of_init);
 
 static int __init intc_2chip_of_init(struct device_node *node,
        struct device_node *parent)
index 1ec3bfe56693ab39831e464048b1959a04250e6d..c671b3212010e6de583e5a5211fc2a20064200f2 100644 (file)
@@ -93,8 +93,12 @@ static void ls_scfg_msi_compose_msg(struct irq_data *data, struct msi_msg *msg)
        msg->address_lo = lower_32_bits(msi_data->msiir_addr);
        msg->data = data->hwirq;
 
-       if (msi_affinity_flag)
-               msg->data |= cpumask_first(data->common->affinity);
+       if (msi_affinity_flag) {
+               const struct cpumask *mask;
+
+               mask = irq_data_get_effective_affinity_mask(data);
+               msg->data |= cpumask_first(mask);
+       }
 
        iommu_dma_map_msi_msg(data->irq, msg);
 }
@@ -121,7 +125,7 @@ static int ls_scfg_msi_set_affinity(struct irq_data *irq_data,
                return -EINVAL;
        }
 
-       cpumask_copy(irq_data->common->affinity, mask);
+       irq_data_update_effective_affinity(irq_data, cpumask_of(cpu));
 
        return IRQ_SET_MASK_OK;
 }
index 3a7e8905a97e93e42fa9b7646a3ada306c18c4be..3df527fcf4e154b44bed2cc3c65787fef503db6c 100644 (file)
@@ -159,6 +159,7 @@ static const struct stm32_exti_bank *stm32mp1_exti_banks[] = {
 };
 
 static const struct stm32_desc_irq stm32mp1_desc_irq[] = {
+       { .exti = 0, .irq_parent = 6 },
        { .exti = 1, .irq_parent = 7 },
        { .exti = 2, .irq_parent = 8 },
        { .exti = 3, .irq_parent = 9 },
index 98f90aadd141b03c42bedd070b66030be7983d86..18c0a1281914fa3218761bd20b2a2e0c85e8aae6 100644 (file)
@@ -588,7 +588,7 @@ static const struct proto_ops data_sock_ops = {
        .getname        = data_sock_getname,
        .sendmsg        = mISDN_sock_sendmsg,
        .recvmsg        = mISDN_sock_recvmsg,
-       .poll_mask      = datagram_poll_mask,
+       .poll           = datagram_poll,
        .listen         = sock_no_listen,
        .shutdown       = sock_no_shutdown,
        .setsockopt     = data_sock_setsockopt,
index 10c08982185a572ff05683461d514e86c8920f96..9c03f35d9df113c6eb6608f4b48b85447635aca9 100644 (file)
@@ -4,7 +4,7 @@
 
 menuconfig NVM
        bool "Open-Channel SSD target support"
-       depends on BLOCK && HAS_DMA && PCI
+       depends on BLOCK && PCI
        select BLK_DEV_NVME
        help
          Say Y here to get to enable Open-channel SSDs.
index ab13fcec3fca046c3da6fd621f0e0db9c47b1bf9..75df4c9d8b541de480dfea8d823e0eff389d9ccd 100644 (file)
@@ -588,7 +588,7 @@ static const char *raid10_md_layout_to_format(int layout)
 }
 
 /* Return md raid10 algorithm for @name */
-static const int raid10_name_to_format(const char *name)
+static int raid10_name_to_format(const char *name)
 {
        if (!strcasecmp(name, "near"))
                return ALGORITHM_RAID10_NEAR;
index 938766794c2ef3b6caf538a0fa787447eadb160c..3d0e2c198f0614dbaf22db657a2bfc9336f89ebd 100644 (file)
@@ -885,9 +885,7 @@ EXPORT_SYMBOL_GPL(dm_table_set_type);
 static int device_supports_dax(struct dm_target *ti, struct dm_dev *dev,
                               sector_t start, sector_t len, void *data)
 {
-       struct request_queue *q = bdev_get_queue(dev->bdev);
-
-       return q && blk_queue_dax(q);
+       return bdev_dax_supported(dev->bdev, PAGE_SIZE);
 }
 
 static bool dm_table_supports_dax(struct dm_table *t)
@@ -1907,6 +1905,9 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
 
        if (dm_table_supports_dax(t))
                blk_queue_flag_set(QUEUE_FLAG_DAX, q);
+       else
+               blk_queue_flag_clear(QUEUE_FLAG_DAX, q);
+
        if (dm_table_supports_dax_write_cache(t))
                dax_write_cache(t->md->dax_dev, true);
 
index 36ef284ad086b881324771d4f882dc6fa96d6dde..72142021b5c9a0410cfb6ccb04a93d613376fb53 100644 (file)
@@ -776,7 +776,6 @@ static int __write_changed_details(struct dm_pool_metadata *pmd)
 static int __commit_transaction(struct dm_pool_metadata *pmd)
 {
        int r;
-       size_t metadata_len, data_len;
        struct thin_disk_superblock *disk_super;
        struct dm_block *sblock;
 
@@ -797,14 +796,6 @@ static int __commit_transaction(struct dm_pool_metadata *pmd)
        if (r < 0)
                return r;
 
-       r = dm_sm_root_size(pmd->metadata_sm, &metadata_len);
-       if (r < 0)
-               return r;
-
-       r = dm_sm_root_size(pmd->data_sm, &data_len);
-       if (r < 0)
-               return r;
-
        r = save_sm_roots(pmd);
        if (r < 0)
                return r;
index 7945238df1c0a67a8e525697f0e419c7594ed1ad..b900723bbd0fae4845a17ef67dadcf33dc5cc67b 100644 (file)
@@ -1386,6 +1386,8 @@ static void schedule_external_copy(struct thin_c *tc, dm_block_t virt_block,
 
 static void set_pool_mode(struct pool *pool, enum pool_mode new_mode);
 
+static void requeue_bios(struct pool *pool);
+
 static void check_for_space(struct pool *pool)
 {
        int r;
@@ -1398,8 +1400,10 @@ static void check_for_space(struct pool *pool)
        if (r)
                return;
 
-       if (nr_free)
+       if (nr_free) {
                set_pool_mode(pool, PM_WRITE);
+               requeue_bios(pool);
+       }
 }
 
 /*
@@ -1476,7 +1480,10 @@ static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
 
        r = dm_pool_alloc_data_block(pool->pmd, result);
        if (r) {
-               metadata_operation_failed(pool, "dm_pool_alloc_data_block", r);
+               if (r == -ENOSPC)
+                       set_pool_mode(pool, PM_OUT_OF_DATA_SPACE);
+               else
+                       metadata_operation_failed(pool, "dm_pool_alloc_data_block", r);
                return r;
        }
 
index 5961c7794ef37008f7a10f521517aded086f20f3..87107c995cb5be3a25b33b10b737ec1ac0dc3fea 100644 (file)
@@ -136,6 +136,7 @@ struct dm_writecache {
        struct dm_target *ti;
        struct dm_dev *dev;
        struct dm_dev *ssd_dev;
+       sector_t start_sector;
        void *memory_map;
        uint64_t memory_map_size;
        size_t metadata_sectors;
@@ -259,7 +260,7 @@ static int persistent_memory_claim(struct dm_writecache *wc)
        if (da != p) {
                long i;
                wc->memory_map = NULL;
-               pages = kvmalloc(p * sizeof(struct page *), GFP_KERNEL);
+               pages = kvmalloc_array(p, sizeof(struct page *), GFP_KERNEL);
                if (!pages) {
                        r = -ENOMEM;
                        goto err2;
@@ -293,6 +294,10 @@ static int persistent_memory_claim(struct dm_writecache *wc)
        }
 
        dax_read_unlock(id);
+
+       wc->memory_map += (size_t)wc->start_sector << SECTOR_SHIFT;
+       wc->memory_map_size -= (size_t)wc->start_sector << SECTOR_SHIFT;
+
        return 0;
 err3:
        kvfree(pages);
@@ -311,7 +316,7 @@ static int persistent_memory_claim(struct dm_writecache *wc)
 static void persistent_memory_release(struct dm_writecache *wc)
 {
        if (wc->memory_vmapped)
-               vunmap(wc->memory_map);
+               vunmap(wc->memory_map - ((size_t)wc->start_sector << SECTOR_SHIFT));
 }
 
 static struct page *persistent_memory_page(void *addr)
@@ -359,7 +364,7 @@ static void *memory_data(struct dm_writecache *wc, struct wc_entry *e)
 
 static sector_t cache_sector(struct dm_writecache *wc, struct wc_entry *e)
 {
-       return wc->metadata_sectors +
+       return wc->start_sector + wc->metadata_sectors +
                ((sector_t)e->index << (wc->block_size_bits - SECTOR_SHIFT));
 }
 
@@ -471,6 +476,7 @@ static void ssd_commit_flushed(struct dm_writecache *wc)
                if (unlikely(region.sector + region.count > wc->metadata_sectors))
                        region.count = wc->metadata_sectors - region.sector;
 
+               region.sector += wc->start_sector;
                atomic_inc(&endio.count);
                req.bi_op = REQ_OP_WRITE;
                req.bi_op_flags = REQ_SYNC;
@@ -859,7 +865,7 @@ static int writecache_alloc_entries(struct dm_writecache *wc)
 
        if (wc->entries)
                return 0;
-       wc->entries = vmalloc(sizeof(struct wc_entry) * wc->n_blocks);
+       wc->entries = vmalloc(array_size(sizeof(struct wc_entry), wc->n_blocks));
        if (!wc->entries)
                return -ENOMEM;
        for (b = 0; b < wc->n_blocks; b++) {
@@ -1481,9 +1487,9 @@ static void __writecache_writeback_pmem(struct dm_writecache *wc, struct writeba
                wb->bio.bi_iter.bi_sector = read_original_sector(wc, e);
                wb->page_offset = PAGE_SIZE;
                if (max_pages <= WB_LIST_INLINE ||
-                   unlikely(!(wb->wc_list = kmalloc(max_pages * sizeof(struct wc_entry *),
-                                                    GFP_NOIO | __GFP_NORETRY |
-                                                    __GFP_NOMEMALLOC | __GFP_NOWARN)))) {
+                   unlikely(!(wb->wc_list = kmalloc_array(max_pages, sizeof(struct wc_entry *),
+                                                          GFP_NOIO | __GFP_NORETRY |
+                                                          __GFP_NOMEMALLOC | __GFP_NOWARN)))) {
                        wb->wc_list = wb->wc_list_inline;
                        max_pages = WB_LIST_INLINE;
                }
@@ -1946,14 +1952,6 @@ static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv)
        }
        wc->memory_map_size = i_size_read(wc->ssd_dev->bdev->bd_inode);
 
-       if (WC_MODE_PMEM(wc)) {
-               r = persistent_memory_claim(wc);
-               if (r) {
-                       ti->error = "Unable to map persistent memory for cache";
-                       goto bad;
-               }
-       }
-
        /*
         * Parse the cache block size
         */
@@ -1982,7 +1980,16 @@ static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv)
 
        while (opt_params) {
                string = dm_shift_arg(&as), opt_params--;
-               if (!strcasecmp(string, "high_watermark") && opt_params >= 1) {
+               if (!strcasecmp(string, "start_sector") && opt_params >= 1) {
+                       unsigned long long start_sector;
+                       string = dm_shift_arg(&as), opt_params--;
+                       if (sscanf(string, "%llu%c", &start_sector, &dummy) != 1)
+                               goto invalid_optional;
+                       wc->start_sector = start_sector;
+                       if (wc->start_sector != start_sector ||
+                           wc->start_sector >= wc->memory_map_size >> SECTOR_SHIFT)
+                               goto invalid_optional;
+               } else if (!strcasecmp(string, "high_watermark") && opt_params >= 1) {
                        string = dm_shift_arg(&as), opt_params--;
                        if (sscanf(string, "%d%c", &high_wm_percent, &dummy) != 1)
                                goto invalid_optional;
@@ -2039,12 +2046,20 @@ invalid_optional:
                goto bad;
        }
 
-       if (!WC_MODE_PMEM(wc)) {
+       if (WC_MODE_PMEM(wc)) {
+               r = persistent_memory_claim(wc);
+               if (r) {
+                       ti->error = "Unable to map persistent memory for cache";
+                       goto bad;
+               }
+       } else {
                struct dm_io_region region;
                struct dm_io_request req;
                size_t n_blocks, n_metadata_blocks;
                uint64_t n_bitmap_bits;
 
+               wc->memory_map_size -= (uint64_t)wc->start_sector << SECTOR_SHIFT;
+
                bio_list_init(&wc->flush_list);
                wc->flush_thread = kthread_create(writecache_flush_thread, wc, "dm_writecache_flush");
                if (IS_ERR(wc->flush_thread)) {
@@ -2097,7 +2112,7 @@ invalid_optional:
                }
 
                region.bdev = wc->ssd_dev->bdev;
-               region.sector = 0;
+               region.sector = wc->start_sector;
                region.count = wc->metadata_sectors;
                req.bi_op = REQ_OP_READ;
                req.bi_op_flags = REQ_SYNC;
@@ -2265,7 +2280,7 @@ static void writecache_status(struct dm_target *ti, status_type_t type,
 
 static struct target_type writecache_target = {
        .name                   = "writecache",
-       .version                = {1, 0, 0},
+       .version                = {1, 1, 0},
        .module                 = THIS_MODULE,
        .ctr                    = writecache_ctr,
        .dtr                    = writecache_dtr,
index 3c0e45f4dcf5cdf06d79b0c9d107d7455a0b6ad7..a44183ff4be0a3bd4219a7bf5854622aeca79db2 100644 (file)
@@ -787,7 +787,7 @@ static int dmz_ctr(struct dm_target *ti, unsigned int argc, char **argv)
 
        /* Chunk BIO work */
        mutex_init(&dmz->chunk_lock);
-       INIT_RADIX_TREE(&dmz->chunk_rxtree, GFP_KERNEL);
+       INIT_RADIX_TREE(&dmz->chunk_rxtree, GFP_NOIO);
        dmz->chunk_wq = alloc_workqueue("dmz_cwq_%s", WQ_MEM_RECLAIM | WQ_UNBOUND,
                                        0, dev->name);
        if (!dmz->chunk_wq) {
index e65429a29c06e2554e8a0e23ba5a0b2a3b18a8c8..b0dd7027848b7de9f701469c6eb29b5d9c96e1df 100644 (file)
@@ -1056,8 +1056,7 @@ static long dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
        if (len < 1)
                goto out;
        nr_pages = min(len, nr_pages);
-       if (ti->type->direct_access)
-               ret = ti->type->direct_access(ti, pgoff, nr_pages, kaddr, pfn);
+       ret = ti->type->direct_access(ti, pgoff, nr_pages, kaddr, pfn);
 
  out:
        dm_put_live_table(md, srcu_idx);
@@ -1606,10 +1605,9 @@ static blk_qc_t __split_and_process_bio(struct mapped_device *md,
                                 * the usage of io->orig_bio in dm_remap_zone_report()
                                 * won't be affected by this reassignment.
                                 */
-                               struct bio *b = bio_clone_bioset(bio, GFP_NOIO,
-                                                                &md->queue->bio_split);
+                               struct bio *b = bio_split(bio, bio_sectors(bio) - ci.sector_count,
+                                                         GFP_NOIO, &md->queue->bio_split);
                                ci.io->orig_bio = b;
-                               bio_advance(bio, (bio_sectors(bio) - ci.sector_count) << 9);
                                bio_chain(b, bio);
                                ret = generic_make_request(bio);
                                break;
index 29b0cd9ec951ee4603279656e7148ba2a5b8d763..994aed2f9dfff4135170102265523045e893ac0a 100644 (file)
@@ -5547,7 +5547,8 @@ int md_run(struct mddev *mddev)
                else
                        pr_warn("md: personality for level %s is not loaded!\n",
                                mddev->clevel);
-               return -EINVAL;
+               err = -EINVAL;
+               goto abort;
        }
        spin_unlock(&pers_lock);
        if (mddev->level != pers->level) {
@@ -5560,7 +5561,8 @@ int md_run(struct mddev *mddev)
            pers->start_reshape == NULL) {
                /* This personality cannot handle reshaping... */
                module_put(pers->owner);
-               return -EINVAL;
+               err = -EINVAL;
+               goto abort;
        }
 
        if (pers->sync_request) {
@@ -5629,7 +5631,7 @@ int md_run(struct mddev *mddev)
                mddev->private = NULL;
                module_put(pers->owner);
                bitmap_destroy(mddev);
-               return err;
+               goto abort;
        }
        if (mddev->queue) {
                bool nonrot = true;
index 478cf446827f469c1d02d6f2918fcb8dd870f893..35bd3a62451b30fec0cca41fcdc687bb7920aa56 100644 (file)
@@ -3893,6 +3893,13 @@ static int raid10_run(struct mddev *mddev)
                            disk->rdev->saved_raid_disk < 0)
                                conf->fullsync = 1;
                }
+
+               if (disk->replacement &&
+                   !test_bit(In_sync, &disk->replacement->flags) &&
+                   disk->replacement->saved_raid_disk < 0) {
+                       conf->fullsync = 1;
+               }
+
                disk->recovery_disabled = mddev->recovery_disabled - 1;
        }
 
index edb35a5c57ea0c1b7f04a8947c2761ae7994372f..a99fc0ced7a7a0a986968a03622798edfdaa6b16 100644 (file)
@@ -728,9 +728,6 @@ EXPORT_SYMBOL_GPL(vsp1_du_setup_lif);
  */
 void vsp1_du_atomic_begin(struct device *dev, unsigned int pipe_index)
 {
-       struct vsp1_device *vsp1 = dev_get_drvdata(dev);
-
-       mutex_lock(&vsp1->drm->lock);
 }
 EXPORT_SYMBOL_GPL(vsp1_du_atomic_begin);
 
@@ -846,6 +843,7 @@ void vsp1_du_atomic_flush(struct device *dev, unsigned int pipe_index,
 
        drm_pipe->crc = cfg->crc;
 
+       mutex_lock(&vsp1->drm->lock);
        vsp1_du_pipeline_setup_inputs(vsp1, pipe);
        vsp1_du_pipeline_configure(pipe);
        mutex_unlock(&vsp1->drm->lock);
index 40826bba06b6d52c06bef7eb64bea6a719496dd0..81b150e5dfdb9422a997ae38b5f27f57f1f474e2 100644 (file)
@@ -174,6 +174,7 @@ static int lirc_bpf_detach(struct rc_dev *rcdev, struct bpf_prog *prog)
 
        rcu_assign_pointer(raw->progs, new_array);
        bpf_prog_array_free(old_array);
+       bpf_prog_put(prog);
 unlock:
        mutex_unlock(&ir_raw_handler_lock);
        return ret;
@@ -207,29 +208,19 @@ void lirc_bpf_free(struct rc_dev *rcdev)
        bpf_prog_array_free(rcdev->raw->progs);
 }
 
-int lirc_prog_attach(const union bpf_attr *attr)
+int lirc_prog_attach(const union bpf_attr *attr, struct bpf_prog *prog)
 {
-       struct bpf_prog *prog;
        struct rc_dev *rcdev;
        int ret;
 
        if (attr->attach_flags)
                return -EINVAL;
 
-       prog = bpf_prog_get_type(attr->attach_bpf_fd,
-                                BPF_PROG_TYPE_LIRC_MODE2);
-       if (IS_ERR(prog))
-               return PTR_ERR(prog);
-
        rcdev = rc_dev_get_from_fd(attr->target_fd);
-       if (IS_ERR(rcdev)) {
-               bpf_prog_put(prog);
+       if (IS_ERR(rcdev))
                return PTR_ERR(rcdev);
-       }
 
        ret = lirc_bpf_attach(rcdev, prog);
-       if (ret)
-               bpf_prog_put(prog);
 
        put_device(&rcdev->dev);
 
index 2e0066b1a31ce31cd46d11573fefbc65ccb89079..e7948908e78c80194cf40b194054afcfb13df46a 100644 (file)
@@ -30,13 +30,13 @@ static int ir_raw_event_thread(void *data)
                while (kfifo_out(&raw->kfifo, &ev, 1)) {
                        if (is_timing_event(ev)) {
                                if (ev.duration == 0)
-                                       dev_err(&dev->dev, "nonsensical timing event of duration 0");
+                                       dev_warn_once(&dev->dev, "nonsensical timing event of duration 0");
                                if (is_timing_event(raw->prev_ev) &&
                                    !is_transition(&ev, &raw->prev_ev))
-                                       dev_err(&dev->dev, "two consecutive events of type %s",
-                                               TO_STR(ev.pulse));
+                                       dev_warn_once(&dev->dev, "two consecutive events of type %s",
+                                                     TO_STR(ev.pulse));
                                if (raw->prev_ev.reset && ev.pulse == 0)
-                                       dev_err(&dev->dev, "timing event after reset should be pulse");
+                                       dev_warn_once(&dev->dev, "timing event after reset should be pulse");
                        }
                        list_for_each_entry(handler, &ir_raw_handler_list, list)
                                if (dev->enabled_protocols &
index 2e222d9ee01f584f0e858953753803d8fcde7ff4..ca68e1d2b2f989cf366bdecc999db5e388d50e11 100644 (file)
@@ -679,6 +679,14 @@ static void ir_timer_repeat(struct timer_list *t)
        spin_unlock_irqrestore(&dev->keylock, flags);
 }
 
+static unsigned int repeat_period(int protocol)
+{
+       if (protocol >= ARRAY_SIZE(protocols))
+               return 100;
+
+       return protocols[protocol].repeat_period;
+}
+
 /**
  * rc_repeat() - signals that a key is still pressed
  * @dev:       the struct rc_dev descriptor of the device
@@ -691,7 +699,7 @@ void rc_repeat(struct rc_dev *dev)
 {
        unsigned long flags;
        unsigned int timeout = nsecs_to_jiffies(dev->timeout) +
-               msecs_to_jiffies(protocols[dev->last_protocol].repeat_period);
+               msecs_to_jiffies(repeat_period(dev->last_protocol));
        struct lirc_scancode sc = {
                .scancode = dev->last_scancode, .rc_proto = dev->last_protocol,
                .keycode = dev->keypressed ? dev->last_keycode : KEY_RESERVED,
@@ -803,7 +811,7 @@ void rc_keydown(struct rc_dev *dev, enum rc_proto protocol, u32 scancode,
 
        if (dev->keypressed) {
                dev->keyup_jiffies = jiffies + nsecs_to_jiffies(dev->timeout) +
-                       msecs_to_jiffies(protocols[protocol].repeat_period);
+                       msecs_to_jiffies(repeat_period(protocol));
                mod_timer(&dev->timer_keyup, dev->keyup_jiffies);
        }
        spin_unlock_irqrestore(&dev->keylock, flags);
index 753b1a698fc4e3ef74a7ebb718a1997ae8a13a4d..8fd5ec4d60423e453b7ff5ca4319079097fda3a8 100644 (file)
@@ -67,10 +67,8 @@ static struct file *cxl_getfile(const char *name,
                                const struct file_operations *fops,
                                void *priv, int flags)
 {
-       struct qstr this;
-       struct path path;
        struct file *file;
-       struct inode *inode = NULL;
+       struct inode *inode;
        int rc;
 
        /* strongly inspired by anon_inode_getfile() */
@@ -91,27 +89,15 @@ static struct file *cxl_getfile(const char *name,
                goto err_fs;
        }
 
-       file = ERR_PTR(-ENOMEM);
-       this.name = name;
-       this.len = strlen(name);
-       this.hash = 0;
-       path.dentry = d_alloc_pseudo(cxl_vfs_mount->mnt_sb, &this);
-       if (!path.dentry)
+       file = alloc_file_pseudo(inode, cxl_vfs_mount, name,
+                                flags & (O_ACCMODE | O_NONBLOCK), fops);
+       if (IS_ERR(file))
                goto err_inode;
 
-       path.mnt = mntget(cxl_vfs_mount);
-       d_instantiate(path.dentry, inode);
-
-       file = alloc_file(&path, OPEN_FMODE(flags), fops);
-       if (IS_ERR(file))
-               goto err_dput;
-       file->f_flags = flags & (O_ACCMODE | O_NONBLOCK);
        file->private_data = priv;
 
        return file;
 
-err_dput:
-       path_put(&path);
 err_inode:
        iput(inode);
 err_fs:
index e05c3245930a1e3f94aad4c0a7f85e015759ec76..fa840666bdd1aeb20cca67fc5df9556fc73135aa 100644 (file)
@@ -507,35 +507,14 @@ static int remote_settings_file_close(struct inode *inode, struct file *file)
 static ssize_t remote_settings_file_read(struct file *file, char __user *buf, size_t count, loff_t *offset)
 {
        void __iomem *address = (void __iomem *)file->private_data;
-       unsigned char *page;
-       int retval;
        int len = 0;
        unsigned int value;
-
-       if (*offset < 0)
-               return -EINVAL;
-       if (count == 0 || count > 1024)
-               return 0;
-       if (*offset != 0)
-               return 0;
-
-       page = (unsigned char *)__get_free_page(GFP_KERNEL);
-       if (!page)
-               return -ENOMEM;
+       char lbuf[20];
 
        value = readl(address);
-       len = sprintf(page, "%d\n", value);
-
-       if (copy_to_user(buf, page, len)) {
-               retval = -EFAULT;
-               goto exit;
-       }
-       *offset += len;
-       retval = len;
+       len = snprintf(lbuf, sizeof(lbuf), "%d\n", value);
 
-exit:
-       free_page((unsigned long)page);
-       return retval;
+       return simple_read_from_buffer(buf, count, offset, lbuf, len);
 }
 
 static ssize_t remote_settings_file_write(struct file *file, const char __user *ubuff, size_t count, loff_t *offset)
index b0b8f18a85e3e132d7741e9daab53fe9270b1b8a..6649f0d56d2f0cdee1dde6d57ad68141ccce12bf 100644 (file)
@@ -310,8 +310,11 @@ int mei_irq_read_handler(struct mei_device *dev,
        if (&cl->link == &dev->file_list) {
                /* A message for not connected fixed address clients
                 * should be silently discarded
+                * On power down client may be force cleaned,
+                * silently discard such messages
                 */
-               if (hdr_is_fixed(mei_hdr)) {
+               if (hdr_is_fixed(mei_hdr) ||
+                   dev->dev_state == MEI_DEV_POWER_DOWN) {
                        mei_irq_discard_msg(dev, mei_hdr);
                        ret = 0;
                        goto reset_slots;
index efd733472a3531804225c5515ade4f4cf69fd707..56c6f79a5c5af83a862cf76352f172f5e02ee0b8 100644 (file)
@@ -467,7 +467,7 @@ static int vmballoon_send_batched_lock(struct vmballoon *b,
                unsigned int num_pages, bool is_2m_pages, unsigned int *target)
 {
        unsigned long status;
-       unsigned long pfn = page_to_pfn(b->page);
+       unsigned long pfn = PHYS_PFN(virt_to_phys(b->batch_page));
 
        STATS_INC(b->stats.lock[is_2m_pages]);
 
@@ -515,7 +515,7 @@ static bool vmballoon_send_batched_unlock(struct vmballoon *b,
                unsigned int num_pages, bool is_2m_pages, unsigned int *target)
 {
        unsigned long status;
-       unsigned long pfn = page_to_pfn(b->page);
+       unsigned long pfn = PHYS_PFN(virt_to_phys(b->batch_page));
 
        STATS_INC(b->stats.unlock[is_2m_pages]);
 
index ef05e00393782d16d77f2f1ecf16803f69461217..2a833686784b6b459d9744b366ef22cb5ca1279c 100644 (file)
@@ -27,8 +27,8 @@ struct mmc_gpio {
        bool override_cd_active_level;
        irqreturn_t (*cd_gpio_isr)(int irq, void *dev_id);
        char *ro_label;
-       char cd_label[0];
        u32 cd_debounce_delay_ms;
+       char cd_label[];
 };
 
 static irqreturn_t mmc_gpio_cd_irqt(int irq, void *dev_id)
index 623f4d27fa0161b1a938521c266176f544f2ff22..80dc2fd6576cf3f88afd695ad1f36ec1b4f52b41 100644 (file)
@@ -1065,8 +1065,8 @@ static void dw_mci_ctrl_thld(struct dw_mci *host, struct mmc_data *data)
         * It's used when HS400 mode is enabled.
         */
        if (data->flags & MMC_DATA_WRITE &&
-               !(host->timing != MMC_TIMING_MMC_HS400))
-               return;
+               host->timing != MMC_TIMING_MMC_HS400)
+               goto disable;
 
        if (data->flags & MMC_DATA_WRITE)
                enable = SDMMC_CARD_WR_THR_EN;
@@ -1074,7 +1074,8 @@ static void dw_mci_ctrl_thld(struct dw_mci *host, struct mmc_data *data)
                enable = SDMMC_CARD_RD_THR_EN;
 
        if (host->timing != MMC_TIMING_MMC_HS200 &&
-           host->timing != MMC_TIMING_UHS_SDR104)
+           host->timing != MMC_TIMING_UHS_SDR104 &&
+           host->timing != MMC_TIMING_MMC_HS400)
                goto disable;
 
        blksz_depth = blksz / (1 << host->data_shift);
index 75f781c11e897ff8e47ed92fb7995ba33f706cd0..de4e6e5bf304468123d657d8792a797690d666e6 100644 (file)
@@ -293,9 +293,10 @@ static void mxcmci_swap_buffers(struct mmc_data *data)
        int i;
 
        for_each_sg(data->sg, sg, data->sg_len, i) {
-               void *buf = kmap_atomic(sg_page(sg) + sg->offset;
+               void *buf = kmap_atomic(sg_page(sg) + sg->offset);
                buffer_swap32(buf, sg->length);
                kunmap_atomic(buf);
+       }
 }
 #else
 static inline void mxcmci_swap_buffers(struct mmc_data *data) {}
index f7f9773d161f1e5e2f58ae1f9a32c800ac5f3474..d032bd63444d10295826750b80a560c0335ddda5 100644 (file)
@@ -139,8 +139,7 @@ renesas_sdhi_internal_dmac_abort_dma(struct tmio_mmc_host *host) {
        renesas_sdhi_internal_dmac_dm_write(host, DM_CM_RST,
                                            RST_RESERVED_BITS | val);
 
-       if (host->data && host->data->flags & MMC_DATA_READ)
-               clear_bit(SDHI_INTERNAL_DMAC_RX_IN_USE, &global_flags);
+       clear_bit(SDHI_INTERNAL_DMAC_RX_IN_USE, &global_flags);
 
        renesas_sdhi_internal_dmac_enable_dma(host, true);
 }
@@ -164,17 +163,14 @@ renesas_sdhi_internal_dmac_start_dma(struct tmio_mmc_host *host,
                goto force_pio;
 
        /* This DMAC cannot handle if buffer is not 8-bytes alignment */
-       if (!IS_ALIGNED(sg_dma_address(sg), 8)) {
-               dma_unmap_sg(&host->pdev->dev, sg, host->sg_len,
-                            mmc_get_dma_dir(data));
-               goto force_pio;
-       }
+       if (!IS_ALIGNED(sg_dma_address(sg), 8))
+               goto force_pio_with_unmap;
 
        if (data->flags & MMC_DATA_READ) {
                dtran_mode |= DTRAN_MODE_CH_NUM_CH1;
                if (test_bit(SDHI_INTERNAL_DMAC_ONE_RX_ONLY, &global_flags) &&
                    test_and_set_bit(SDHI_INTERNAL_DMAC_RX_IN_USE, &global_flags))
-                       goto force_pio;
+                       goto force_pio_with_unmap;
        } else {
                dtran_mode |= DTRAN_MODE_CH_NUM_CH0;
        }
@@ -189,6 +185,9 @@ renesas_sdhi_internal_dmac_start_dma(struct tmio_mmc_host *host,
 
        return;
 
+force_pio_with_unmap:
+       dma_unmap_sg(&host->pdev->dev, sg, host->sg_len, mmc_get_dma_dir(data));
+
 force_pio:
        host->force_pio = true;
        renesas_sdhi_internal_dmac_enable_dma(host, false);
index d6aef70d34fac0554d223bed4121e173dea1281d..4eb3d29ecde1078512f85d291904a63d5fa358c6 100644 (file)
@@ -312,6 +312,15 @@ static u32 esdhc_readl_le(struct sdhci_host *host, int reg)
 
                        if (imx_data->socdata->flags & ESDHC_FLAG_HS400)
                                val |= SDHCI_SUPPORT_HS400;
+
+                       /*
+                        * Do not advertise faster UHS modes if there are no
+                        * pinctrl states for 100MHz/200MHz.
+                        */
+                       if (IS_ERR_OR_NULL(imx_data->pins_100mhz) ||
+                           IS_ERR_OR_NULL(imx_data->pins_200mhz))
+                               val &= ~(SDHCI_SUPPORT_SDR50 | SDHCI_SUPPORT_DDR50
+                                        | SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_HS400);
                }
        }
 
@@ -1158,18 +1167,6 @@ sdhci_esdhc_imx_probe_dt(struct platform_device *pdev,
                                                ESDHC_PINCTRL_STATE_100MHZ);
                imx_data->pins_200mhz = pinctrl_lookup_state(imx_data->pinctrl,
                                                ESDHC_PINCTRL_STATE_200MHZ);
-               if (IS_ERR(imx_data->pins_100mhz) ||
-                               IS_ERR(imx_data->pins_200mhz)) {
-                       dev_warn(mmc_dev(host->mmc),
-                               "could not get ultra high speed state, work on normal mode\n");
-                       /*
-                        * fall back to not supporting uhs by specifying no
-                        * 1.8v quirk
-                        */
-                       host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V;
-               }
-       } else {
-               host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V;
        }
 
        /* call to generic mmc_of_parse to support additional capabilities */
index e7472590f2ed6416b800ef85c2efeae574cc6718..8e7f3e35ee3dc48eef93c126f2d161ad63b3dc12 100644 (file)
@@ -1446,6 +1446,7 @@ static int sunxi_mmc_runtime_resume(struct device *dev)
        sunxi_mmc_init_host(host);
        sunxi_mmc_set_bus_width(host, mmc->ios.bus_width);
        sunxi_mmc_set_clk(host, &mmc->ios);
+       enable_irq(host->irq);
 
        return 0;
 }
@@ -1455,6 +1456,12 @@ static int sunxi_mmc_runtime_suspend(struct device *dev)
        struct mmc_host *mmc = dev_get_drvdata(dev);
        struct sunxi_mmc_host *host = mmc_priv(mmc);
 
+       /*
+        * When clocks are off, it's possible receiving
+        * fake interrupts, which will stall the system.
+        * Disabling the irq  will prevent this.
+        */
+       disable_irq(host->irq);
        sunxi_mmc_reset_host(host);
        sunxi_mmc_disable(host);
 
index a0c655628d6d5283fd9b5cd8234b8b09857d9c75..1b64ac8c5bc86309061a5540d385eb6e2ca866cc 100644 (file)
@@ -2526,7 +2526,7 @@ static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
 
 struct ppb_lock {
        struct flchip *chip;
-       loff_t offset;
+       unsigned long adr;
        int locked;
 };
 
@@ -2544,8 +2544,9 @@ static int __maybe_unused do_ppb_xxlock(struct map_info *map,
        unsigned long timeo;
        int ret;
 
+       adr += chip->start;
        mutex_lock(&chip->mutex);
-       ret = get_chip(map, chip, adr + chip->start, FL_LOCKING);
+       ret = get_chip(map, chip, adr, FL_LOCKING);
        if (ret) {
                mutex_unlock(&chip->mutex);
                return ret;
@@ -2563,8 +2564,8 @@ static int __maybe_unused do_ppb_xxlock(struct map_info *map,
 
        if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
                chip->state = FL_LOCKING;
-               map_write(map, CMD(0xA0), chip->start + adr);
-               map_write(map, CMD(0x00), chip->start + adr);
+               map_write(map, CMD(0xA0), adr);
+               map_write(map, CMD(0x00), adr);
        } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
                /*
                 * Unlocking of one specific sector is not supported, so we
@@ -2602,7 +2603,7 @@ static int __maybe_unused do_ppb_xxlock(struct map_info *map,
        map_write(map, CMD(0x00), chip->start);
 
        chip->state = FL_READY;
-       put_chip(map, chip, adr + chip->start);
+       put_chip(map, chip, adr);
        mutex_unlock(&chip->mutex);
 
        return ret;
@@ -2659,9 +2660,9 @@ static int __maybe_unused cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs,
                 * sectors shall be unlocked, so lets keep their locking
                 * status at "unlocked" (locked=0) for the final re-locking.
                 */
-               if ((adr < ofs) || (adr >= (ofs + len))) {
+               if ((offset < ofs) || (offset >= (ofs + len))) {
                        sect[sectors].chip = &cfi->chips[chipnum];
-                       sect[sectors].offset = offset;
+                       sect[sectors].adr = adr;
                        sect[sectors].locked = do_ppb_xxlock(
                                map, &cfi->chips[chipnum], adr, 0,
                                DO_XXLOCK_ONEBLOCK_GETLOCK);
@@ -2675,6 +2676,8 @@ static int __maybe_unused cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs,
                        i++;
 
                if (adr >> cfi->chipshift) {
+                       if (offset >= (ofs + len))
+                               break;
                        adr = 0;
                        chipnum++;
 
@@ -2705,7 +2708,7 @@ static int __maybe_unused cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs,
         */
        for (i = 0; i < sectors; i++) {
                if (sect[i].locked)
-                       do_ppb_xxlock(map, sect[i].chip, sect[i].offset, 0,
+                       do_ppb_xxlock(map, sect[i].chip, sect[i].adr, 0,
                                      DO_XXLOCK_ONEBLOCK_LOCK);
        }
 
index 3a6f450d1093c4c59d8b79bfafbc0d0c8c744722..53febe8a68c3cdfadfad784bca3335879a86d1f8 100644 (file)
@@ -733,8 +733,8 @@ static struct flash_info dataflash_data[] = {
        { "AT45DB642x",  0x1f2800, 8192, 1056, 11, SUP_POW2PS},
        { "at45db642d",  0x1f2800, 8192, 1024, 10, SUP_POW2PS | IS_POW2PS},
 
-       { "AT45DB641E",  0x1f28000100, 32768, 264, 9, SUP_EXTID | SUP_POW2PS},
-       { "at45db641e",  0x1f28000100, 32768, 256, 8, SUP_EXTID | SUP_POW2PS | IS_POW2PS},
+       { "AT45DB641E",  0x1f28000100ULL, 32768, 264, 9, SUP_EXTID | SUP_POW2PS},
+       { "at45db641e",  0x1f28000100ULL, 32768, 256, 8, SUP_EXTID | SUP_POW2PS | IS_POW2PS},
 };
 
 static struct flash_info *jedec_lookup(struct spi_device *spi,
index cfd33e6ca77f903a6afc636e73f31ffb40d0d0bd..5869e90cc14b3c1f367b17a4f58bdb31c1e188ea 100644 (file)
@@ -123,7 +123,11 @@ static int denali_dt_probe(struct platform_device *pdev)
        if (ret)
                return ret;
 
-       denali->clk_x_rate = clk_get_rate(dt->clk);
+       /*
+        * Hardcode the clock rate for the backward compatibility.
+        * This works for both SOCFPGA and UniPhier.
+        */
+       denali->clk_x_rate = 200000000;
 
        ret = denali_init(denali);
        if (ret)
index 45786e707b7bd1ae5a4cba72825bcfb00fdda370..26cef218bb43ee1bd1fb2cf1dbadeb8ad38e2f8e 100644 (file)
@@ -48,7 +48,7 @@
 #define NFC_V1_V2_CONFIG               (host->regs + 0x0a)
 #define NFC_V1_V2_ECC_STATUS_RESULT    (host->regs + 0x0c)
 #define NFC_V1_V2_RSLTMAIN_AREA                (host->regs + 0x0e)
-#define NFC_V1_V2_RSLTSPARE_AREA       (host->regs + 0x10)
+#define NFC_V21_RSLTSPARE_AREA         (host->regs + 0x10)
 #define NFC_V1_V2_WRPROT               (host->regs + 0x12)
 #define NFC_V1_UNLOCKSTART_BLKADDR     (host->regs + 0x14)
 #define NFC_V1_UNLOCKEND_BLKADDR       (host->regs + 0x16)
@@ -1274,6 +1274,9 @@ static void preset_v2(struct mtd_info *mtd)
        writew(config1, NFC_V1_V2_CONFIG1);
        /* preset operation */
 
+       /* spare area size in 16-bit half-words */
+       writew(mtd->oobsize / 2, NFC_V21_RSLTSPARE_AREA);
+
        /* Unlock the internal RAM Buffer */
        writew(0x2, NFC_V1_V2_CONFIG);
 
index 10c4f9919850c3e7b56ed6bdb083a0fc35a0b7f5..b01d15ec4c56bfbdded578526d76e2ed12b65093 100644 (file)
@@ -440,7 +440,7 @@ static int nand_block_bad(struct mtd_info *mtd, loff_t ofs)
 
        for (; page < page_end; page++) {
                res = chip->ecc.read_oob(mtd, chip, page);
-               if (res)
+               if (res < 0)
                        return res;
 
                bad = chip->oob_poi[chip->badblockpos];
index 7ed1f87e742a7accbbeb441b02e199878b3ef035..49c546c97c6f9a370ff64636deaf778bce2c3ce0 100644 (file)
 
 #include <linux/mtd/rawnand.h>
 
+/*
+ * Macronix AC series does not support using SET/GET_FEATURES to change
+ * the timings unlike what is declared in the parameter page. Unflag
+ * this feature to avoid unnecessary downturns.
+ */
+static void macronix_nand_fix_broken_get_timings(struct nand_chip *chip)
+{
+       unsigned int i;
+       static const char * const broken_get_timings[] = {
+               "MX30LF1G18AC",
+               "MX30LF1G28AC",
+               "MX30LF2G18AC",
+               "MX30LF2G28AC",
+               "MX30LF4G18AC",
+               "MX30LF4G28AC",
+               "MX60LF8G18AC",
+       };
+
+       if (!chip->parameters.supports_set_get_features)
+               return;
+
+       for (i = 0; i < ARRAY_SIZE(broken_get_timings); i++) {
+               if (!strcmp(broken_get_timings[i], chip->parameters.model))
+                       break;
+       }
+
+       if (i == ARRAY_SIZE(broken_get_timings))
+               return;
+
+       bitmap_clear(chip->parameters.get_feature_list,
+                    ONFI_FEATURE_ADDR_TIMING_MODE, 1);
+       bitmap_clear(chip->parameters.set_feature_list,
+                    ONFI_FEATURE_ADDR_TIMING_MODE, 1);
+}
+
 static int macronix_nand_init(struct nand_chip *chip)
 {
        if (nand_is_slc(chip))
                chip->bbt_options |= NAND_BBT_SCAN2NDPAGE;
 
-       /*
-        * MX30LF2G18AC chip does not support using SET/GET_FEATURES to change
-        * the timings unlike what is declared in the parameter page. Unflag
-        * this feature to avoid unnecessary downturns.
-        */
-       if (chip->parameters.supports_set_get_features &&
-           !strcmp("MX30LF2G18AC", chip->parameters.model)) {
-               bitmap_clear(chip->parameters.get_feature_list,
-                            ONFI_FEATURE_ADDR_TIMING_MODE, 1);
-               bitmap_clear(chip->parameters.set_feature_list,
-                            ONFI_FEATURE_ADDR_TIMING_MODE, 1);
-       }
+       macronix_nand_fix_broken_get_timings(chip);
 
        return 0;
 }
index 0af45b134c0cf859902f3d138b305bf5836d526d..5ec4c90a637d549a644441461ffc7717c623a4bc 100644 (file)
@@ -66,7 +66,9 @@ static int micron_nand_onfi_init(struct nand_chip *chip)
 
        if (p->supports_set_get_features) {
                set_bit(ONFI_FEATURE_ADDR_READ_RETRY, p->set_feature_list);
+               set_bit(ONFI_FEATURE_ON_DIE_ECC, p->set_feature_list);
                set_bit(ONFI_FEATURE_ADDR_READ_RETRY, p->get_feature_list);
+               set_bit(ONFI_FEATURE_ON_DIE_ECC, p->get_feature_list);
        }
 
        return 0;
index c3f7aaa5d18f7de068f797b84b05b31be4248897..d7e10b36a0b94b476fb5068d634c58ee756fbc2c 100644 (file)
@@ -926,10 +926,12 @@ static ssize_t cqspi_write(struct spi_nor *nor, loff_t to,
        if (ret)
                return ret;
 
-       if (f_pdata->use_direct_mode)
+       if (f_pdata->use_direct_mode) {
                memcpy_toio(cqspi->ahb_base + to, buf, len);
-       else
+               ret = cqspi_wait_idle(cqspi);
+       } else {
                ret = cqspi_indirect_write_execute(nor, to, buf, len);
+       }
        if (ret)
                return ret;
 
index 63e3844c5becf5e973e10fa2aa533f668ac8e30b..217b790d22edc2fdd90d4f3c634cb2fa38bc095f 100644 (file)
@@ -1717,6 +1717,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
                goto err_upper_unlink;
        }
 
+       bond->nest_level = dev_get_nest_level(bond_dev) + 1;
+
        /* If the mode uses primary, then the following is handled by
         * bond_change_active_slave().
         */
@@ -1764,7 +1766,6 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
        if (bond_mode_can_use_xmit_hash(bond))
                bond_update_slave_arr(bond, NULL);
 
-       bond->nest_level = dev_get_nest_level(bond_dev);
 
        netdev_info(bond_dev, "Enslaving %s as %s interface with %s link\n",
                    slave_dev->name,
@@ -3415,6 +3416,13 @@ static void bond_fold_stats(struct rtnl_link_stats64 *_res,
        }
 }
 
+static int bond_get_nest_level(struct net_device *bond_dev)
+{
+       struct bonding *bond = netdev_priv(bond_dev);
+
+       return bond->nest_level;
+}
+
 static void bond_get_stats(struct net_device *bond_dev,
                           struct rtnl_link_stats64 *stats)
 {
@@ -3423,7 +3431,7 @@ static void bond_get_stats(struct net_device *bond_dev,
        struct list_head *iter;
        struct slave *slave;
 
-       spin_lock(&bond->stats_lock);
+       spin_lock_nested(&bond->stats_lock, bond_get_nest_level(bond_dev));
        memcpy(stats, &bond->bond_stats, sizeof(*stats));
 
        rcu_read_lock();
@@ -4227,6 +4235,7 @@ static const struct net_device_ops bond_netdev_ops = {
        .ndo_neigh_setup        = bond_neigh_setup,
        .ndo_vlan_rx_add_vid    = bond_vlan_rx_add_vid,
        .ndo_vlan_rx_kill_vid   = bond_vlan_rx_kill_vid,
+       .ndo_get_lock_subclass  = bond_get_nest_level,
 #ifdef CONFIG_NET_POLL_CONTROLLER
        .ndo_netpoll_setup      = bond_netpoll_setup,
        .ndo_netpoll_cleanup    = bond_netpoll_cleanup,
@@ -4725,6 +4734,7 @@ static int bond_init(struct net_device *bond_dev)
        if (!bond->wq)
                return -ENOMEM;
 
+       bond->nest_level = SINGLE_DEPTH_NESTING;
        netdev_lockdep_set_classes(bond_dev);
 
        list_add_tail(&bond->bond_list, &bn->dev_list);
index 98663c50ded0b4b3f784be23baa8fa0e19b419ee..4d5d01cb8141b89b18cf9d7affd8049a42c19e3b 100644 (file)
@@ -743,15 +743,20 @@ const struct bond_option *bond_opt_get(unsigned int option)
 static int bond_option_mode_set(struct bonding *bond,
                                const struct bond_opt_value *newval)
 {
-       if (!bond_mode_uses_arp(newval->value) && bond->params.arp_interval) {
-               netdev_dbg(bond->dev, "%s mode is incompatible with arp monitoring, start mii monitoring\n",
-                          newval->string);
-               /* disable arp monitoring */
-               bond->params.arp_interval = 0;
-               /* set miimon to default value */
-               bond->params.miimon = BOND_DEFAULT_MIIMON;
-               netdev_dbg(bond->dev, "Setting MII monitoring interval to %d\n",
-                          bond->params.miimon);
+       if (!bond_mode_uses_arp(newval->value)) {
+               if (bond->params.arp_interval) {
+                       netdev_dbg(bond->dev, "%s mode is incompatible with arp monitoring, start mii monitoring\n",
+                                  newval->string);
+                       /* disable arp monitoring */
+                       bond->params.arp_interval = 0;
+               }
+
+               if (!bond->params.miimon) {
+                       /* set miimon to default value */
+                       bond->params.miimon = BOND_DEFAULT_MIIMON;
+                       netdev_dbg(bond->dev, "Setting MII monitoring interval to %d\n",
+                                  bond->params.miimon);
+               }
        }
 
        if (newval->value == BOND_MODE_ALB)
index b397a33f3d32b5e3c28398a660c736d45a74179d..9b449400376bc536cd0d53ea4abfe13d14515ba6 100644 (file)
@@ -634,10 +634,12 @@ static int m_can_clk_start(struct m_can_priv *priv)
        int err;
 
        err = pm_runtime_get_sync(priv->device);
-       if (err)
+       if (err < 0) {
                pm_runtime_put_noidle(priv->device);
+               return err;
+       }
 
-       return err;
+       return 0;
 }
 
 static void m_can_clk_stop(struct m_can_priv *priv)
@@ -1109,7 +1111,8 @@ static void m_can_chip_config(struct net_device *dev)
 
        } else {
        /* Version 3.1.x or 3.2.x */
-               cccr &= ~(CCCR_TEST | CCCR_MON | CCCR_BRSE | CCCR_FDOE);
+               cccr &= ~(CCCR_TEST | CCCR_MON | CCCR_BRSE | CCCR_FDOE |
+                         CCCR_NISO);
 
                /* Only 3.2.x has NISO Bit implemented */
                if (priv->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO)
@@ -1642,8 +1645,6 @@ static int m_can_plat_probe(struct platform_device *pdev)
        priv->can.clock.freq = clk_get_rate(cclk);
        priv->mram_base = mram_addr;
 
-       m_can_of_parse_mram(priv, mram_config_vals);
-
        platform_set_drvdata(pdev, dev);
        SET_NETDEV_DEV(dev, &pdev->dev);
 
@@ -1666,6 +1667,8 @@ static int m_can_plat_probe(struct platform_device *pdev)
                goto clk_disable;
        }
 
+       m_can_of_parse_mram(priv, mram_config_vals);
+
        devm_can_led_init(dev);
 
        of_can_transceiver(dev);
@@ -1687,8 +1690,6 @@ failed_ret:
        return ret;
 }
 
-/* TODO: runtime PM with power down or sleep mode  */
-
 static __maybe_unused int m_can_suspend(struct device *dev)
 {
        struct net_device *ndev = dev_get_drvdata(dev);
@@ -1715,8 +1716,6 @@ static __maybe_unused int m_can_resume(struct device *dev)
 
        pinctrl_pm_select_default_state(dev);
 
-       m_can_init_ram(priv);
-
        priv->can.state = CAN_STATE_ERROR_ACTIVE;
 
        if (netif_running(ndev)) {
@@ -1726,6 +1725,7 @@ static __maybe_unused int m_can_resume(struct device *dev)
                if (ret)
                        return ret;
 
+               m_can_init_ram(priv);
                m_can_start(ndev);
                netif_device_attach(ndev);
                netif_start_queue(ndev);
index c7427bdd3a4bff957aaef3fdb1d2f8ed0ead41cb..2949a381a94dceb2674f150ad5feaf580a201d25 100644 (file)
@@ -86,6 +86,11 @@ static u32 mpc52xx_can_get_clock(struct platform_device *ofdev,
                return 0;
        }
        cdm = of_iomap(np_cdm, 0);
+       if (!cdm) {
+               of_node_put(np_cdm);
+               dev_err(&ofdev->dev, "can't map clock node!\n");
+               return 0;
+       }
 
        if (in_8(&cdm->ipb_clk_sel) & 0x1)
                freq *= 2;
index b9e28578bc7bd7463485c316020af6e9cdaf9d02..455a3797a20065d264a837dcc89e91453a2a93ba 100644 (file)
@@ -58,6 +58,10 @@ MODULE_LICENSE("GPL v2");
 #define PCIEFD_REG_SYS_VER1            0x0040  /* version reg #1 */
 #define PCIEFD_REG_SYS_VER2            0x0044  /* version reg #2 */
 
+#define PCIEFD_FW_VERSION(x, y, z)     (((u32)(x) << 24) | \
+                                        ((u32)(y) << 16) | \
+                                        ((u32)(z) << 8))
+
 /* System Control Registers Bits */
 #define PCIEFD_SYS_CTL_TS_RST          0x00000001      /* timestamp clock */
 #define PCIEFD_SYS_CTL_CLK_EN          0x00000002      /* system clock */
@@ -782,6 +786,21 @@ static int peak_pciefd_probe(struct pci_dev *pdev,
                 "%ux CAN-FD PCAN-PCIe FPGA v%u.%u.%u:\n", can_count,
                 hw_ver_major, hw_ver_minor, hw_ver_sub);
 
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+       /* FW < v3.3.0 DMA logic doesn't handle correctly the mix of 32-bit and
+        * 64-bit logical addresses: this workaround forces usage of 32-bit
+        * DMA addresses only when such a fw is detected.
+        */
+       if (PCIEFD_FW_VERSION(hw_ver_major, hw_ver_minor, hw_ver_sub) <
+           PCIEFD_FW_VERSION(3, 3, 0)) {
+               err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+               if (err)
+                       dev_warn(&pdev->dev,
+                                "warning: can't set DMA mask %llxh (err %d)\n",
+                                DMA_BIT_MASK(32), err);
+       }
+#endif
+
        /* stop system clock */
        pciefd_sys_writereg(pciefd, PCIEFD_SYS_CTL_CLK_EN,
                            PCIEFD_REG_SYS_CTL_CLR);
index 12ff0020ecd60904b65b89a633cd2a60bf880ed2..b7dfd4109d24ef3db5c03d78fb078f7f7cf6e3ab 100644 (file)
@@ -1072,6 +1072,7 @@ static void ems_usb_disconnect(struct usb_interface *intf)
                usb_free_urb(dev->intr_urb);
 
                kfree(dev->intr_in_buffer);
+               kfree(dev->tx_msg_buffer);
        }
 }
 
index 89aec07c225f58d26a80ce4795afbaa6c19d9d84..5a24039733efd23255142c4abc0d2b758d188554 100644 (file)
@@ -2,6 +2,7 @@
  *
  * Copyright (C) 2012 - 2014 Xilinx, Inc.
  * Copyright (C) 2009 PetaLogix. All rights reserved.
+ * Copyright (C) 2017 Sandvik Mining and Construction Oy
  *
  * Description:
  * This driver is developed for Axi CAN IP and for Zynq CANPS Controller.
 #include <linux/module.h>
 #include <linux/netdevice.h>
 #include <linux/of.h>
+#include <linux/of_device.h>
 #include <linux/platform_device.h>
 #include <linux/skbuff.h>
+#include <linux/spinlock.h>
 #include <linux/string.h>
 #include <linux/types.h>
 #include <linux/can/dev.h>
@@ -101,7 +104,7 @@ enum xcan_reg {
 #define XCAN_INTR_ALL          (XCAN_IXR_TXOK_MASK | XCAN_IXR_BSOFF_MASK |\
                                 XCAN_IXR_WKUP_MASK | XCAN_IXR_SLP_MASK | \
                                 XCAN_IXR_RXNEMP_MASK | XCAN_IXR_ERROR_MASK | \
-                                XCAN_IXR_ARBLST_MASK | XCAN_IXR_RXOK_MASK)
+                                XCAN_IXR_RXOFLW_MASK | XCAN_IXR_ARBLST_MASK)
 
 /* CAN register bit shift - XCAN_<REG>_<BIT>_SHIFT */
 #define XCAN_BTR_SJW_SHIFT             7  /* Synchronous jump width */
@@ -118,6 +121,7 @@ enum xcan_reg {
 /**
  * struct xcan_priv - This definition define CAN driver instance
  * @can:                       CAN private data structure.
+ * @tx_lock:                   Lock for synchronizing TX interrupt handling
  * @tx_head:                   Tx CAN packets ready to send on the queue
  * @tx_tail:                   Tx CAN packets successfully sended on the queue
  * @tx_max:                    Maximum number packets the driver can send
@@ -132,6 +136,7 @@ enum xcan_reg {
  */
 struct xcan_priv {
        struct can_priv can;
+       spinlock_t tx_lock;
        unsigned int tx_head;
        unsigned int tx_tail;
        unsigned int tx_max;
@@ -159,6 +164,11 @@ static const struct can_bittiming_const xcan_bittiming_const = {
        .brp_inc = 1,
 };
 
+#define XCAN_CAP_WATERMARK     0x0001
+struct xcan_devtype_data {
+       unsigned int caps;
+};
+
 /**
  * xcan_write_reg_le - Write a value to the device register little endian
  * @priv:      Driver private data structure
@@ -238,6 +248,10 @@ static int set_reset_mode(struct net_device *ndev)
                usleep_range(500, 10000);
        }
 
+       /* reset clears FIFOs */
+       priv->tx_head = 0;
+       priv->tx_tail = 0;
+
        return 0;
 }
 
@@ -392,6 +406,7 @@ static int xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev)
        struct net_device_stats *stats = &ndev->stats;
        struct can_frame *cf = (struct can_frame *)skb->data;
        u32 id, dlc, data[2] = {0, 0};
+       unsigned long flags;
 
        if (can_dropped_invalid_skb(ndev, skb))
                return NETDEV_TX_OK;
@@ -439,6 +454,9 @@ static int xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev)
                data[1] = be32_to_cpup((__be32 *)(cf->data + 4));
 
        can_put_echo_skb(skb, ndev, priv->tx_head % priv->tx_max);
+
+       spin_lock_irqsave(&priv->tx_lock, flags);
+
        priv->tx_head++;
 
        /* Write the Frame to Xilinx CAN TX FIFO */
@@ -454,10 +472,16 @@ static int xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev)
                stats->tx_bytes += cf->can_dlc;
        }
 
+       /* Clear TX-FIFO-empty interrupt for xcan_tx_interrupt() */
+       if (priv->tx_max > 1)
+               priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXFEMP_MASK);
+
        /* Check if the TX buffer is full */
        if ((priv->tx_head - priv->tx_tail) == priv->tx_max)
                netif_stop_queue(ndev);
 
+       spin_unlock_irqrestore(&priv->tx_lock, flags);
+
        return NETDEV_TX_OK;
 }
 
@@ -529,6 +553,123 @@ static int xcan_rx(struct net_device *ndev)
        return 1;
 }
 
+/**
+ * xcan_current_error_state - Get current error state from HW
+ * @ndev:      Pointer to net_device structure
+ *
+ * Checks the current CAN error state from the HW. Note that this
+ * only checks for ERROR_PASSIVE and ERROR_WARNING.
+ *
+ * Return:
+ * ERROR_PASSIVE or ERROR_WARNING if either is active, ERROR_ACTIVE
+ * otherwise.
+ */
+static enum can_state xcan_current_error_state(struct net_device *ndev)
+{
+       struct xcan_priv *priv = netdev_priv(ndev);
+       u32 status = priv->read_reg(priv, XCAN_SR_OFFSET);
+
+       if ((status & XCAN_SR_ESTAT_MASK) == XCAN_SR_ESTAT_MASK)
+               return CAN_STATE_ERROR_PASSIVE;
+       else if (status & XCAN_SR_ERRWRN_MASK)
+               return CAN_STATE_ERROR_WARNING;
+       else
+               return CAN_STATE_ERROR_ACTIVE;
+}
+
+/**
+ * xcan_set_error_state - Set new CAN error state
+ * @ndev:      Pointer to net_device structure
+ * @new_state: The new CAN state to be set
+ * @cf:                Error frame to be populated or NULL
+ *
+ * Set new CAN error state for the device, updating statistics and
+ * populating the error frame if given.
+ */
+static void xcan_set_error_state(struct net_device *ndev,
+                                enum can_state new_state,
+                                struct can_frame *cf)
+{
+       struct xcan_priv *priv = netdev_priv(ndev);
+       u32 ecr = priv->read_reg(priv, XCAN_ECR_OFFSET);
+       u32 txerr = ecr & XCAN_ECR_TEC_MASK;
+       u32 rxerr = (ecr & XCAN_ECR_REC_MASK) >> XCAN_ESR_REC_SHIFT;
+
+       priv->can.state = new_state;
+
+       if (cf) {
+               cf->can_id |= CAN_ERR_CRTL;
+               cf->data[6] = txerr;
+               cf->data[7] = rxerr;
+       }
+
+       switch (new_state) {
+       case CAN_STATE_ERROR_PASSIVE:
+               priv->can.can_stats.error_passive++;
+               if (cf)
+                       cf->data[1] = (rxerr > 127) ?
+                                       CAN_ERR_CRTL_RX_PASSIVE :
+                                       CAN_ERR_CRTL_TX_PASSIVE;
+               break;
+       case CAN_STATE_ERROR_WARNING:
+               priv->can.can_stats.error_warning++;
+               if (cf)
+                       cf->data[1] |= (txerr > rxerr) ?
+                                       CAN_ERR_CRTL_TX_WARNING :
+                                       CAN_ERR_CRTL_RX_WARNING;
+               break;
+       case CAN_STATE_ERROR_ACTIVE:
+               if (cf)
+                       cf->data[1] |= CAN_ERR_CRTL_ACTIVE;
+               break;
+       default:
+               /* non-ERROR states are handled elsewhere */
+               WARN_ON(1);
+               break;
+       }
+}
+
+/**
+ * xcan_update_error_state_after_rxtx - Update CAN error state after RX/TX
+ * @ndev:      Pointer to net_device structure
+ *
+ * If the device is in a ERROR-WARNING or ERROR-PASSIVE state, check if
+ * the performed RX/TX has caused it to drop to a lesser state and set
+ * the interface state accordingly.
+ */
+static void xcan_update_error_state_after_rxtx(struct net_device *ndev)
+{
+       struct xcan_priv *priv = netdev_priv(ndev);
+       enum can_state old_state = priv->can.state;
+       enum can_state new_state;
+
+       /* changing error state due to successful frame RX/TX can only
+        * occur from these states
+        */
+       if (old_state != CAN_STATE_ERROR_WARNING &&
+           old_state != CAN_STATE_ERROR_PASSIVE)
+               return;
+
+       new_state = xcan_current_error_state(ndev);
+
+       if (new_state != old_state) {
+               struct sk_buff *skb;
+               struct can_frame *cf;
+
+               skb = alloc_can_err_skb(ndev, &cf);
+
+               xcan_set_error_state(ndev, new_state, skb ? cf : NULL);
+
+               if (skb) {
+                       struct net_device_stats *stats = &ndev->stats;
+
+                       stats->rx_packets++;
+                       stats->rx_bytes += cf->can_dlc;
+                       netif_rx(skb);
+               }
+       }
+}
+
 /**
  * xcan_err_interrupt - error frame Isr
  * @ndev:      net_device pointer
@@ -544,16 +685,12 @@ static void xcan_err_interrupt(struct net_device *ndev, u32 isr)
        struct net_device_stats *stats = &ndev->stats;
        struct can_frame *cf;
        struct sk_buff *skb;
-       u32 err_status, status, txerr = 0, rxerr = 0;
+       u32 err_status;
 
        skb = alloc_can_err_skb(ndev, &cf);
 
        err_status = priv->read_reg(priv, XCAN_ESR_OFFSET);
        priv->write_reg(priv, XCAN_ESR_OFFSET, err_status);
-       txerr = priv->read_reg(priv, XCAN_ECR_OFFSET) & XCAN_ECR_TEC_MASK;
-       rxerr = ((priv->read_reg(priv, XCAN_ECR_OFFSET) &
-                       XCAN_ECR_REC_MASK) >> XCAN_ESR_REC_SHIFT);
-       status = priv->read_reg(priv, XCAN_SR_OFFSET);
 
        if (isr & XCAN_IXR_BSOFF_MASK) {
                priv->can.state = CAN_STATE_BUS_OFF;
@@ -563,28 +700,10 @@ static void xcan_err_interrupt(struct net_device *ndev, u32 isr)
                can_bus_off(ndev);
                if (skb)
                        cf->can_id |= CAN_ERR_BUSOFF;
-       } else if ((status & XCAN_SR_ESTAT_MASK) == XCAN_SR_ESTAT_MASK) {
-               priv->can.state = CAN_STATE_ERROR_PASSIVE;
-               priv->can.can_stats.error_passive++;
-               if (skb) {
-                       cf->can_id |= CAN_ERR_CRTL;
-                       cf->data[1] = (rxerr > 127) ?
-                                       CAN_ERR_CRTL_RX_PASSIVE :
-                                       CAN_ERR_CRTL_TX_PASSIVE;
-                       cf->data[6] = txerr;
-                       cf->data[7] = rxerr;
-               }
-       } else if (status & XCAN_SR_ERRWRN_MASK) {
-               priv->can.state = CAN_STATE_ERROR_WARNING;
-               priv->can.can_stats.error_warning++;
-               if (skb) {
-                       cf->can_id |= CAN_ERR_CRTL;
-                       cf->data[1] |= (txerr > rxerr) ?
-                                       CAN_ERR_CRTL_TX_WARNING :
-                                       CAN_ERR_CRTL_RX_WARNING;
-                       cf->data[6] = txerr;
-                       cf->data[7] = rxerr;
-               }
+       } else {
+               enum can_state new_state = xcan_current_error_state(ndev);
+
+               xcan_set_error_state(ndev, new_state, skb ? cf : NULL);
        }
 
        /* Check for Arbitration lost interrupt */
@@ -600,7 +719,6 @@ static void xcan_err_interrupt(struct net_device *ndev, u32 isr)
        if (isr & XCAN_IXR_RXOFLW_MASK) {
                stats->rx_over_errors++;
                stats->rx_errors++;
-               priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK);
                if (skb) {
                        cf->can_id |= CAN_ERR_CRTL;
                        cf->data[1] |= CAN_ERR_CRTL_RX_OVERFLOW;
@@ -709,26 +827,20 @@ static int xcan_rx_poll(struct napi_struct *napi, int quota)
 
        isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
        while ((isr & XCAN_IXR_RXNEMP_MASK) && (work_done < quota)) {
-               if (isr & XCAN_IXR_RXOK_MASK) {
-                       priv->write_reg(priv, XCAN_ICR_OFFSET,
-                               XCAN_IXR_RXOK_MASK);
-                       work_done += xcan_rx(ndev);
-               } else {
-                       priv->write_reg(priv, XCAN_ICR_OFFSET,
-                               XCAN_IXR_RXNEMP_MASK);
-                       break;
-               }
+               work_done += xcan_rx(ndev);
                priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_RXNEMP_MASK);
                isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
        }
 
-       if (work_done)
+       if (work_done) {
                can_led_event(ndev, CAN_LED_EVENT_RX);
+               xcan_update_error_state_after_rxtx(ndev);
+       }
 
        if (work_done < quota) {
                napi_complete_done(napi, work_done);
                ier = priv->read_reg(priv, XCAN_IER_OFFSET);
-               ier |= (XCAN_IXR_RXOK_MASK | XCAN_IXR_RXNEMP_MASK);
+               ier |= XCAN_IXR_RXNEMP_MASK;
                priv->write_reg(priv, XCAN_IER_OFFSET, ier);
        }
        return work_done;
@@ -743,18 +855,71 @@ static void xcan_tx_interrupt(struct net_device *ndev, u32 isr)
 {
        struct xcan_priv *priv = netdev_priv(ndev);
        struct net_device_stats *stats = &ndev->stats;
+       unsigned int frames_in_fifo;
+       int frames_sent = 1; /* TXOK => at least 1 frame was sent */
+       unsigned long flags;
+       int retries = 0;
+
+       /* Synchronize with xmit as we need to know the exact number
+        * of frames in the FIFO to stay in sync due to the TXFEMP
+        * handling.
+        * This also prevents a race between netif_wake_queue() and
+        * netif_stop_queue().
+        */
+       spin_lock_irqsave(&priv->tx_lock, flags);
+
+       frames_in_fifo = priv->tx_head - priv->tx_tail;
+
+       if (WARN_ON_ONCE(frames_in_fifo == 0)) {
+               /* clear TXOK anyway to avoid getting back here */
+               priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK);
+               spin_unlock_irqrestore(&priv->tx_lock, flags);
+               return;
+       }
+
+       /* Check if 2 frames were sent (TXOK only means that at least 1
+        * frame was sent).
+        */
+       if (frames_in_fifo > 1) {
+               WARN_ON(frames_in_fifo > priv->tx_max);
+
+               /* Synchronize TXOK and isr so that after the loop:
+                * (1) isr variable is up-to-date at least up to TXOK clear
+                *     time. This avoids us clearing a TXOK of a second frame
+                *     but not noticing that the FIFO is now empty and thus
+                *     marking only a single frame as sent.
+                * (2) No TXOK is left. Having one could mean leaving a
+                *     stray TXOK as we might process the associated frame
+                *     via TXFEMP handling as we read TXFEMP *after* TXOK
+                *     clear to satisfy (1).
+                */
+               while ((isr & XCAN_IXR_TXOK_MASK) && !WARN_ON(++retries == 100)) {
+                       priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK);
+                       isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
+               }
 
-       while ((priv->tx_head - priv->tx_tail > 0) &&
-                       (isr & XCAN_IXR_TXOK_MASK)) {
+               if (isr & XCAN_IXR_TXFEMP_MASK) {
+                       /* nothing in FIFO anymore */
+                       frames_sent = frames_in_fifo;
+               }
+       } else {
+               /* single frame in fifo, just clear TXOK */
                priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK);
+       }
+
+       while (frames_sent--) {
                can_get_echo_skb(ndev, priv->tx_tail %
                                        priv->tx_max);
                priv->tx_tail++;
                stats->tx_packets++;
-               isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
        }
-       can_led_event(ndev, CAN_LED_EVENT_TX);
+
        netif_wake_queue(ndev);
+
+       spin_unlock_irqrestore(&priv->tx_lock, flags);
+
+       can_led_event(ndev, CAN_LED_EVENT_TX);
+       xcan_update_error_state_after_rxtx(ndev);
 }
 
 /**
@@ -773,6 +938,7 @@ static irqreturn_t xcan_interrupt(int irq, void *dev_id)
        struct net_device *ndev = (struct net_device *)dev_id;
        struct xcan_priv *priv = netdev_priv(ndev);
        u32 isr, ier;
+       u32 isr_errors;
 
        /* Get the interrupt status from Xilinx CAN */
        isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
@@ -791,18 +957,17 @@ static irqreturn_t xcan_interrupt(int irq, void *dev_id)
                xcan_tx_interrupt(ndev, isr);
 
        /* Check for the type of error interrupt and Processing it */
-       if (isr & (XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK |
-                       XCAN_IXR_BSOFF_MASK | XCAN_IXR_ARBLST_MASK)) {
-               priv->write_reg(priv, XCAN_ICR_OFFSET, (XCAN_IXR_ERROR_MASK |
-                               XCAN_IXR_RXOFLW_MASK | XCAN_IXR_BSOFF_MASK |
-                               XCAN_IXR_ARBLST_MASK));
+       isr_errors = isr & (XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK |
+                           XCAN_IXR_BSOFF_MASK | XCAN_IXR_ARBLST_MASK);
+       if (isr_errors) {
+               priv->write_reg(priv, XCAN_ICR_OFFSET, isr_errors);
                xcan_err_interrupt(ndev, isr);
        }
 
        /* Check for the type of receive interrupt and Processing it */
-       if (isr & (XCAN_IXR_RXNEMP_MASK | XCAN_IXR_RXOK_MASK)) {
+       if (isr & XCAN_IXR_RXNEMP_MASK) {
                ier = priv->read_reg(priv, XCAN_IER_OFFSET);
-               ier &= ~(XCAN_IXR_RXNEMP_MASK | XCAN_IXR_RXOK_MASK);
+               ier &= ~XCAN_IXR_RXNEMP_MASK;
                priv->write_reg(priv, XCAN_IER_OFFSET, ier);
                napi_schedule(&priv->napi);
        }
@@ -819,13 +984,9 @@ static irqreturn_t xcan_interrupt(int irq, void *dev_id)
 static void xcan_chip_stop(struct net_device *ndev)
 {
        struct xcan_priv *priv = netdev_priv(ndev);
-       u32 ier;
 
        /* Disable interrupts and leave the can in configuration mode */
-       ier = priv->read_reg(priv, XCAN_IER_OFFSET);
-       ier &= ~XCAN_INTR_ALL;
-       priv->write_reg(priv, XCAN_IER_OFFSET, ier);
-       priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK);
+       set_reset_mode(ndev);
        priv->can.state = CAN_STATE_STOPPED;
 }
 
@@ -958,10 +1119,15 @@ static const struct net_device_ops xcan_netdev_ops = {
  */
 static int __maybe_unused xcan_suspend(struct device *dev)
 {
-       if (!device_may_wakeup(dev))
-               return pm_runtime_force_suspend(dev);
+       struct net_device *ndev = dev_get_drvdata(dev);
 
-       return 0;
+       if (netif_running(ndev)) {
+               netif_stop_queue(ndev);
+               netif_device_detach(ndev);
+               xcan_chip_stop(ndev);
+       }
+
+       return pm_runtime_force_suspend(dev);
 }
 
 /**
@@ -973,11 +1139,27 @@ static int __maybe_unused xcan_suspend(struct device *dev)
  */
 static int __maybe_unused xcan_resume(struct device *dev)
 {
-       if (!device_may_wakeup(dev))
-               return pm_runtime_force_resume(dev);
+       struct net_device *ndev = dev_get_drvdata(dev);
+       int ret;
 
-       return 0;
+       ret = pm_runtime_force_resume(dev);
+       if (ret) {
+               dev_err(dev, "pm_runtime_force_resume failed on resume\n");
+               return ret;
+       }
+
+       if (netif_running(ndev)) {
+               ret = xcan_chip_start(ndev);
+               if (ret) {
+                       dev_err(dev, "xcan_chip_start failed on resume\n");
+                       return ret;
+               }
+
+               netif_device_attach(ndev);
+               netif_start_queue(ndev);
+       }
 
+       return 0;
 }
 
 /**
@@ -992,14 +1174,6 @@ static int __maybe_unused xcan_runtime_suspend(struct device *dev)
        struct net_device *ndev = dev_get_drvdata(dev);
        struct xcan_priv *priv = netdev_priv(ndev);
 
-       if (netif_running(ndev)) {
-               netif_stop_queue(ndev);
-               netif_device_detach(ndev);
-       }
-
-       priv->write_reg(priv, XCAN_MSR_OFFSET, XCAN_MSR_SLEEP_MASK);
-       priv->can.state = CAN_STATE_SLEEPING;
-
        clk_disable_unprepare(priv->bus_clk);
        clk_disable_unprepare(priv->can_clk);
 
@@ -1018,7 +1192,6 @@ static int __maybe_unused xcan_runtime_resume(struct device *dev)
        struct net_device *ndev = dev_get_drvdata(dev);
        struct xcan_priv *priv = netdev_priv(ndev);
        int ret;
-       u32 isr, status;
 
        ret = clk_prepare_enable(priv->bus_clk);
        if (ret) {
@@ -1032,27 +1205,6 @@ static int __maybe_unused xcan_runtime_resume(struct device *dev)
                return ret;
        }
 
-       priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK);
-       isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
-       status = priv->read_reg(priv, XCAN_SR_OFFSET);
-
-       if (netif_running(ndev)) {
-               if (isr & XCAN_IXR_BSOFF_MASK) {
-                       priv->can.state = CAN_STATE_BUS_OFF;
-                       priv->write_reg(priv, XCAN_SRR_OFFSET,
-                                       XCAN_SRR_RESET_MASK);
-               } else if ((status & XCAN_SR_ESTAT_MASK) ==
-                                       XCAN_SR_ESTAT_MASK) {
-                       priv->can.state = CAN_STATE_ERROR_PASSIVE;
-               } else if (status & XCAN_SR_ERRWRN_MASK) {
-                       priv->can.state = CAN_STATE_ERROR_WARNING;
-               } else {
-                       priv->can.state = CAN_STATE_ERROR_ACTIVE;
-               }
-               netif_device_attach(ndev);
-               netif_start_queue(ndev);
-       }
-
        return 0;
 }
 
@@ -1061,6 +1213,18 @@ static const struct dev_pm_ops xcan_dev_pm_ops = {
        SET_RUNTIME_PM_OPS(xcan_runtime_suspend, xcan_runtime_resume, NULL)
 };
 
+static const struct xcan_devtype_data xcan_zynq_data = {
+       .caps = XCAN_CAP_WATERMARK,
+};
+
+/* Match table for OF platform binding */
+static const struct of_device_id xcan_of_match[] = {
+       { .compatible = "xlnx,zynq-can-1.0", .data = &xcan_zynq_data },
+       { .compatible = "xlnx,axi-can-1.00.a", },
+       { /* end of list */ },
+};
+MODULE_DEVICE_TABLE(of, xcan_of_match);
+
 /**
  * xcan_probe - Platform registration call
  * @pdev:      Handle to the platform device structure
@@ -1075,8 +1239,10 @@ static int xcan_probe(struct platform_device *pdev)
        struct resource *res; /* IO mem resources */
        struct net_device *ndev;
        struct xcan_priv *priv;
+       const struct of_device_id *of_id;
+       int caps = 0;
        void __iomem *addr;
-       int ret, rx_max, tx_max;
+       int ret, rx_max, tx_max, tx_fifo_depth;
 
        /* Get the virtual base address for the device */
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1086,7 +1252,8 @@ static int xcan_probe(struct platform_device *pdev)
                goto err;
        }
 
-       ret = of_property_read_u32(pdev->dev.of_node, "tx-fifo-depth", &tx_max);
+       ret = of_property_read_u32(pdev->dev.of_node, "tx-fifo-depth",
+                                  &tx_fifo_depth);
        if (ret < 0)
                goto err;
 
@@ -1094,6 +1261,30 @@ static int xcan_probe(struct platform_device *pdev)
        if (ret < 0)
                goto err;
 
+       of_id = of_match_device(xcan_of_match, &pdev->dev);
+       if (of_id) {
+               const struct xcan_devtype_data *devtype_data = of_id->data;
+
+               if (devtype_data)
+                       caps = devtype_data->caps;
+       }
+
+       /* There is no way to directly figure out how many frames have been
+        * sent when the TXOK interrupt is processed. If watermark programming
+        * is supported, we can have 2 frames in the FIFO and use TXFEMP
+        * to determine if 1 or 2 frames have been sent.
+        * Theoretically we should be able to use TXFWMEMP to determine up
+        * to 3 frames, but it seems that after putting a second frame in the
+        * FIFO, with watermark at 2 frames, it can happen that TXFWMEMP (less
+        * than 2 frames in FIFO) is set anyway with no TXOK (a frame was
+        * sent), which is not a sensible state - possibly TXFWMEMP is not
+        * completely synchronized with the rest of the bits?
+        */
+       if (caps & XCAN_CAP_WATERMARK)
+               tx_max = min(tx_fifo_depth, 2);
+       else
+               tx_max = 1;
+
        /* Create a CAN device instance */
        ndev = alloc_candev(sizeof(struct xcan_priv), tx_max);
        if (!ndev)
@@ -1108,6 +1299,7 @@ static int xcan_probe(struct platform_device *pdev)
                                        CAN_CTRLMODE_BERR_REPORTING;
        priv->reg_base = addr;
        priv->tx_max = tx_max;
+       spin_lock_init(&priv->tx_lock);
 
        /* Get IRQ for the device */
        ndev->irq = platform_get_irq(pdev, 0);
@@ -1172,9 +1364,9 @@ static int xcan_probe(struct platform_device *pdev)
 
        pm_runtime_put(&pdev->dev);
 
-       netdev_dbg(ndev, "reg_base=0x%p irq=%d clock=%d, tx fifo depth:%d\n",
+       netdev_dbg(ndev, "reg_base=0x%p irq=%d clock=%d, tx fifo depth: actual %d, using %d\n",
                        priv->reg_base, ndev->irq, priv->can.clock.freq,
-                       priv->tx_max);
+                       tx_fifo_depth, priv->tx_max);
 
        return 0;
 
@@ -1208,14 +1400,6 @@ static int xcan_remove(struct platform_device *pdev)
        return 0;
 }
 
-/* Match table for OF platform binding */
-static const struct of_device_id xcan_of_match[] = {
-       { .compatible = "xlnx,zynq-can-1.0", },
-       { .compatible = "xlnx,axi-can-1.00.a", },
-       { /* end of list */ },
-};
-MODULE_DEVICE_TABLE(of, xcan_of_match);
-
 static struct platform_driver xcan_driver = {
        .probe = xcan_probe,
        .remove = xcan_remove,
index 437cd6eb4faa39338108b73b43abaa89a559607c..bb28c701381a6117d81837c0bc322f1bde31f4ee 100644 (file)
@@ -343,6 +343,7 @@ static const struct irq_domain_ops mv88e6xxx_g1_irq_domain_ops = {
        .xlate  = irq_domain_xlate_twocell,
 };
 
+/* To be called with reg_lock held */
 static void mv88e6xxx_g1_irq_free_common(struct mv88e6xxx_chip *chip)
 {
        int irq, virq;
@@ -362,9 +363,15 @@ static void mv88e6xxx_g1_irq_free_common(struct mv88e6xxx_chip *chip)
 
 static void mv88e6xxx_g1_irq_free(struct mv88e6xxx_chip *chip)
 {
-       mv88e6xxx_g1_irq_free_common(chip);
-
+       /*
+        * free_irq must be called without reg_lock taken because the irq
+        * handler takes this lock, too.
+        */
        free_irq(chip->irq, chip);
+
+       mutex_lock(&chip->reg_lock);
+       mv88e6xxx_g1_irq_free_common(chip);
+       mutex_unlock(&chip->reg_lock);
 }
 
 static int mv88e6xxx_g1_irq_setup_common(struct mv88e6xxx_chip *chip)
@@ -469,10 +476,12 @@ static int mv88e6xxx_irq_poll_setup(struct mv88e6xxx_chip *chip)
 
 static void mv88e6xxx_irq_poll_free(struct mv88e6xxx_chip *chip)
 {
-       mv88e6xxx_g1_irq_free_common(chip);
-
        kthread_cancel_delayed_work_sync(&chip->irq_poll_work);
        kthread_destroy_worker(chip->kworker);
+
+       mutex_lock(&chip->reg_lock);
+       mv88e6xxx_g1_irq_free_common(chip);
+       mutex_unlock(&chip->reg_lock);
 }
 
 int mv88e6xxx_wait(struct mv88e6xxx_chip *chip, int addr, int reg, u16 mask)
@@ -2608,7 +2617,6 @@ static const struct mv88e6xxx_ops mv88e6085_ops = {
        .rmu_disable = mv88e6085_g1_rmu_disable,
        .vtu_getnext = mv88e6352_g1_vtu_getnext,
        .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
-       .serdes_power = mv88e6341_serdes_power,
 };
 
 static const struct mv88e6xxx_ops mv88e6095_ops = {
@@ -2774,6 +2782,7 @@ static const struct mv88e6xxx_ops mv88e6141_ops = {
        .reset = mv88e6352_g1_reset,
        .vtu_getnext = mv88e6352_g1_vtu_getnext,
        .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
+       .serdes_power = mv88e6341_serdes_power,
        .gpio_ops = &mv88e6352_gpio_ops,
 };
 
@@ -2951,7 +2960,6 @@ static const struct mv88e6xxx_ops mv88e6175_ops = {
        .reset = mv88e6352_g1_reset,
        .vtu_getnext = mv88e6352_g1_vtu_getnext,
        .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
-       .serdes_power = mv88e6341_serdes_power,
 };
 
 static const struct mv88e6xxx_ops mv88e6176_ops = {
@@ -3327,6 +3335,7 @@ static const struct mv88e6xxx_ops mv88e6341_ops = {
        .reset = mv88e6352_g1_reset,
        .vtu_getnext = mv88e6352_g1_vtu_getnext,
        .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
+       .serdes_power = mv88e6341_serdes_power,
        .gpio_ops = &mv88e6352_gpio_ops,
        .avb_ops = &mv88e6390_avb_ops,
 };
@@ -4506,12 +4515,10 @@ out_g2_irq:
        if (chip->info->g2_irqs > 0)
                mv88e6xxx_g2_irq_free(chip);
 out_g1_irq:
-       mutex_lock(&chip->reg_lock);
        if (chip->irq > 0)
                mv88e6xxx_g1_irq_free(chip);
        else
                mv88e6xxx_irq_poll_free(chip);
-       mutex_unlock(&chip->reg_lock);
 out:
        if (pdata)
                dev_put(pdata->netdev);
@@ -4539,12 +4546,10 @@ static void mv88e6xxx_remove(struct mdio_device *mdiodev)
        if (chip->info->g2_irqs > 0)
                mv88e6xxx_g2_irq_free(chip);
 
-       mutex_lock(&chip->reg_lock);
        if (chip->irq > 0)
                mv88e6xxx_g1_irq_free(chip);
        else
                mv88e6xxx_irq_poll_free(chip);
-       mutex_unlock(&chip->reg_lock);
 }
 
 static const struct of_device_id mv88e6xxx_of_match[] = {
index 5b7658bcf0209546a4577d708123231c55f97960..5c3ef9fc8207e3de01b86e4c538633ecc8a9f273 100644 (file)
@@ -32,7 +32,7 @@ config EL3
 
 config 3C515
        tristate "3c515 ISA \"Fast EtherLink\""
-       depends on ISA && ISA_DMA_API
+       depends on ISA && ISA_DMA_API && !PPC32
        ---help---
          If you have a 3Com ISA EtherLink XL "Corkscrew" 3c515 Fast Ethernet
          network card, say Y here.
index b6d735bf80117e27496e54d29f23ad32c215c2ac..342ae08ec3c29832ae5be0da8d93e59d6441cab1 100644 (file)
@@ -153,9 +153,6 @@ static void dayna_block_input(struct net_device *dev, int count,
 static void dayna_block_output(struct net_device *dev, int count,
                               const unsigned char *buf, int start_page);
 
-#define memcpy_fromio(a, b, c) memcpy((a), (void *)(b), (c))
-#define memcpy_toio(a, b, c)   memcpy((void *)(a), (b), (c))
-
 #define memcmp_withio(a, b, c) memcmp((a), (void *)(b), (c))
 
 /* Slow Sane (16-bit chunk memory read/write) Cabletron uses this */
@@ -239,7 +236,7 @@ static enum mac8390_access mac8390_testio(unsigned long membase)
        unsigned long outdata = 0xA5A0B5B0;
        unsigned long indata =  0x00000000;
        /* Try writing 32 bits */
-       memcpy_toio(membase, &outdata, 4);
+       memcpy_toio((void __iomem *)membase, &outdata, 4);
        /* Now compare them */
        if (memcmp_withio(&outdata, membase, 4) == 0)
                return ACCESS_32;
@@ -711,7 +708,7 @@ static void sane_get_8390_hdr(struct net_device *dev,
                              struct e8390_pkt_hdr *hdr, int ring_page)
 {
        unsigned long hdr_start = (ring_page - WD_START_PG)<<8;
-       memcpy_fromio(hdr, dev->mem_start + hdr_start, 4);
+       memcpy_fromio(hdr, (void __iomem *)dev->mem_start + hdr_start, 4);
        /* Fix endianness */
        hdr->count = swab16(hdr->count);
 }
@@ -725,13 +722,16 @@ static void sane_block_input(struct net_device *dev, int count,
        if (xfer_start + count > ei_status.rmem_end) {
                /* We must wrap the input move. */
                int semi_count = ei_status.rmem_end - xfer_start;
-               memcpy_fromio(skb->data, dev->mem_start + xfer_base,
+               memcpy_fromio(skb->data,
+                             (void __iomem *)dev->mem_start + xfer_base,
                              semi_count);
                count -= semi_count;
-               memcpy_fromio(skb->data + semi_count, ei_status.rmem_start,
-                             count);
+               memcpy_fromio(skb->data + semi_count,
+                             (void __iomem *)ei_status.rmem_start, count);
        } else {
-               memcpy_fromio(skb->data, dev->mem_start + xfer_base, count);
+               memcpy_fromio(skb->data,
+                             (void __iomem *)dev->mem_start + xfer_base,
+                             count);
        }
 }
 
@@ -740,7 +740,7 @@ static void sane_block_output(struct net_device *dev, int count,
 {
        long shmem = (start_page - WD_START_PG)<<8;
 
-       memcpy_toio(dev->mem_start + shmem, buf, count);
+       memcpy_toio((void __iomem *)dev->mem_start + shmem, buf, count);
 }
 
 /* dayna block input/output */
index 1b9d3130af4d64f844498d19b93adf8da0f2ebc6..17f12c18d225a50a21bba97eabf0670ac9f135e5 100644 (file)
@@ -333,6 +333,7 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
 
        memset(&io_sq->desc_addr, 0x0, sizeof(io_sq->desc_addr));
 
+       io_sq->dma_addr_bits = ena_dev->dma_addr_bits;
        io_sq->desc_entry_size =
                (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
                sizeof(struct ena_eth_io_tx_desc) :
index d5c15e8bb3de706b12d343ee1a50477b23ab3d3f..9e5cf5583c87cc137a2a1fb2ccd13aa26074cf34 100644 (file)
@@ -44,7 +44,7 @@ config AMD8111_ETH
 
 config LANCE
        tristate "AMD LANCE and PCnet (AT1500 and NE2100) support"
-       depends on ISA && ISA_DMA_API && !ARM
+       depends on ISA && ISA_DMA_API && !ARM && !PPC32
        ---help---
          If you have a network (Ethernet) card of this type, say Y here.
          Some LinkSys cards are of this type.
@@ -138,7 +138,7 @@ config PCMCIA_NMCLAN
 
 config NI65
        tristate "NI6510 support"
-       depends on ISA && ISA_DMA_API && !ARM
+       depends on ISA && ISA_DMA_API && !ARM && !PPC32
        ---help---
          If you have a network (Ethernet) card of this type, say Y here.
 
@@ -173,7 +173,7 @@ config SUNLANCE
 
 config AMD_XGBE
        tristate "AMD 10GbE Ethernet driver"
-       depends on ((OF_NET && OF_ADDRESS) || ACPI || PCI) && HAS_IOMEM && HAS_DMA
+       depends on ((OF_NET && OF_ADDRESS) || ACPI || PCI) && HAS_IOMEM
        depends on X86 || ARM64 || COMPILE_TEST
        select BITREVERSE
        select CRC32
index 4b5d625de8f0be4edf28c72c47718ca107d3433d..8a3a60bb26888f0659c72a54d26afbfd9029843f 100644 (file)
@@ -1111,14 +1111,14 @@ static void xgbe_phy_adjust_link(struct xgbe_prv_data *pdata)
 
                if (pdata->tx_pause != pdata->phy.tx_pause) {
                        new_state = 1;
-                       pdata->hw_if.config_tx_flow_control(pdata);
                        pdata->tx_pause = pdata->phy.tx_pause;
+                       pdata->hw_if.config_tx_flow_control(pdata);
                }
 
                if (pdata->rx_pause != pdata->phy.rx_pause) {
                        new_state = 1;
-                       pdata->hw_if.config_rx_flow_control(pdata);
                        pdata->rx_pause = pdata->phy.rx_pause;
+                       pdata->hw_if.config_rx_flow_control(pdata);
                }
 
                /* Speed support */
index 1205861b631896a0fc6b19ac608483d8e4b27d4e..eedd3f3dd22e220186578235c9f5f0b0072e80f6 100644 (file)
@@ -1,6 +1,5 @@
 config NET_XGENE_V2
        tristate "APM X-Gene SoC Ethernet-v2 Driver"
-       depends on HAS_DMA
        depends on ARCH_XGENE || COMPILE_TEST
        help
          This is the Ethernet driver for the on-chip ethernet interface
index afccb033177b39233a333994835713d577339c2f..e4e33c900b577161e77974bd62c45030cb2762e8 100644 (file)
@@ -1,6 +1,5 @@
 config NET_XGENE
        tristate "APM X-Gene SoC Ethernet Driver"
-       depends on HAS_DMA
        depends on ARCH_XGENE || COMPILE_TEST
        select PHYLIB
        select MDIO_XGENE
index fc7383106946ca6461f62ea305be0f03bb59c227..91eb8910b1c992b1b7876f05a26753a5cf79c100 100644 (file)
@@ -63,8 +63,6 @@
 
 #define AQ_CFG_NAPI_WEIGHT     64U
 
-#define AQ_CFG_MULTICAST_ADDRESS_MAX     32U
-
 /*#define AQ_CFG_MAC_ADDR_PERMANENT {0x30, 0x0E, 0xE3, 0x12, 0x34, 0x56}*/
 
 #define AQ_NIC_FC_OFF    0U
index a2d416b24ffc251c71d002a9befe825d5c585fbc..2c6ebd91a9f2782e87472e497447b60974a7a571 100644 (file)
@@ -98,6 +98,8 @@ struct aq_stats_s {
 #define AQ_HW_MEDIA_TYPE_TP    1U
 #define AQ_HW_MEDIA_TYPE_FIBRE 2U
 
+#define AQ_HW_MULTICAST_ADDRESS_MAX     32U
+
 struct aq_hw_s {
        atomic_t flags;
        u8 rbl_enabled:1;
@@ -177,7 +179,7 @@ struct aq_hw_ops {
                                    unsigned int packet_filter);
 
        int (*hw_multicast_list_set)(struct aq_hw_s *self,
-                                    u8 ar_mac[AQ_CFG_MULTICAST_ADDRESS_MAX]
+                                    u8 ar_mac[AQ_HW_MULTICAST_ADDRESS_MAX]
                                     [ETH_ALEN],
                                     u32 count);
 
index ba5fe8c4125d85d0050c2c5b269cba2c320d3127..e3ae29e523f0e26738b0ab80a2f3ac431083d13b 100644 (file)
@@ -135,17 +135,10 @@ err_exit:
 static void aq_ndev_set_multicast_settings(struct net_device *ndev)
 {
        struct aq_nic_s *aq_nic = netdev_priv(ndev);
-       int err = 0;
 
-       err = aq_nic_set_packet_filter(aq_nic, ndev->flags);
-       if (err < 0)
-               return;
+       aq_nic_set_packet_filter(aq_nic, ndev->flags);
 
-       if (netdev_mc_count(ndev)) {
-               err = aq_nic_set_multicast_list(aq_nic, ndev);
-               if (err < 0)
-                       return;
-       }
+       aq_nic_set_multicast_list(aq_nic, ndev);
 }
 
 static const struct net_device_ops aq_ndev_ops = {
index 1a1a6380c128c4522b330907cc16258f0e012189..7a22d0257e04ccf07ef87cae18d5d4f87630660a 100644 (file)
@@ -563,34 +563,41 @@ err_exit:
 
 int aq_nic_set_multicast_list(struct aq_nic_s *self, struct net_device *ndev)
 {
+       unsigned int packet_filter = self->packet_filter;
        struct netdev_hw_addr *ha = NULL;
        unsigned int i = 0U;
 
-       self->mc_list.count = 0U;
-
-       netdev_for_each_mc_addr(ha, ndev) {
-               ether_addr_copy(self->mc_list.ar[i++], ha->addr);
-               ++self->mc_list.count;
+       self->mc_list.count = 0;
+       if (netdev_uc_count(ndev) > AQ_HW_MULTICAST_ADDRESS_MAX) {
+               packet_filter |= IFF_PROMISC;
+       } else {
+               netdev_for_each_uc_addr(ha, ndev) {
+                       ether_addr_copy(self->mc_list.ar[i++], ha->addr);
 
-               if (i >= AQ_CFG_MULTICAST_ADDRESS_MAX)
-                       break;
+                       if (i >= AQ_HW_MULTICAST_ADDRESS_MAX)
+                               break;
+               }
        }
 
-       if (i >= AQ_CFG_MULTICAST_ADDRESS_MAX) {
-               /* Number of filters is too big: atlantic does not support this.
-                * Force all multi filter to support this.
-                * With this we disable all UC filters and setup "all pass"
-                * multicast mask
-                */
-               self->packet_filter |= IFF_ALLMULTI;
-               self->aq_nic_cfg.mc_list_count = 0;
-               return self->aq_hw_ops->hw_packet_filter_set(self->aq_hw,
-                                                            self->packet_filter);
+       if (i + netdev_mc_count(ndev) > AQ_HW_MULTICAST_ADDRESS_MAX) {
+               packet_filter |= IFF_ALLMULTI;
        } else {
-               return self->aq_hw_ops->hw_multicast_list_set(self->aq_hw,
-                                                   self->mc_list.ar,
-                                                   self->mc_list.count);
+               netdev_for_each_mc_addr(ha, ndev) {
+                       ether_addr_copy(self->mc_list.ar[i++], ha->addr);
+
+                       if (i >= AQ_HW_MULTICAST_ADDRESS_MAX)
+                               break;
+               }
+       }
+
+       if (i > 0 && i < AQ_HW_MULTICAST_ADDRESS_MAX) {
+               packet_filter |= IFF_MULTICAST;
+               self->mc_list.count = i;
+               self->aq_hw_ops->hw_multicast_list_set(self->aq_hw,
+                                                      self->mc_list.ar,
+                                                      self->mc_list.count);
        }
+       return aq_nic_set_packet_filter(self, packet_filter);
 }
 
 int aq_nic_set_mtu(struct aq_nic_s *self, int new_mtu)
index faa533a0ec474116b7d84369c947a7f0f1bfa853..fecfc401f95df041f56f348c1082147377464059 100644 (file)
@@ -75,7 +75,7 @@ struct aq_nic_s {
        struct aq_hw_link_status_s link_status;
        struct {
                u32 count;
-               u8 ar[AQ_CFG_MULTICAST_ADDRESS_MAX][ETH_ALEN];
+               u8 ar[AQ_HW_MULTICAST_ADDRESS_MAX][ETH_ALEN];
        } mc_list;
 
        struct pci_dev *pdev;
index 67e2f9fb9402f3ed419ee46c47a7f6bd4d8e1ffc..8cc6abadc03b90e88fb58b09a53e7da3702710e5 100644 (file)
@@ -765,7 +765,7 @@ static int hw_atl_a0_hw_packet_filter_set(struct aq_hw_s *self,
 
 static int hw_atl_a0_hw_multicast_list_set(struct aq_hw_s *self,
                                           u8 ar_mac
-                                          [AQ_CFG_MULTICAST_ADDRESS_MAX]
+                                          [AQ_HW_MULTICAST_ADDRESS_MAX]
                                           [ETH_ALEN],
                                           u32 count)
 {
index 819f6bcf9b4ee76e620691ae3861a1fad213eca9..3bdab972420bce0e846b707136268fe155dc3290 100644 (file)
@@ -762,7 +762,7 @@ static int hw_atl_b0_hw_packet_filter_set(struct aq_hw_s *self,
 
        hw_atl_rpfl2promiscuous_mode_en_set(self, IS_FILTER_ENABLED(IFF_PROMISC));
        hw_atl_rpfl2multicast_flr_en_set(self,
-                                        IS_FILTER_ENABLED(IFF_MULTICAST), 0);
+                                        IS_FILTER_ENABLED(IFF_ALLMULTI), 0);
 
        hw_atl_rpfl2_accept_all_mc_packets_set(self,
                                               IS_FILTER_ENABLED(IFF_ALLMULTI));
@@ -784,7 +784,7 @@ static int hw_atl_b0_hw_packet_filter_set(struct aq_hw_s *self,
 
 static int hw_atl_b0_hw_multicast_list_set(struct aq_hw_s *self,
                                           u8 ar_mac
-                                          [AQ_CFG_MULTICAST_ADDRESS_MAX]
+                                          [AQ_HW_MULTICAST_ADDRESS_MAX]
                                           [ETH_ALEN],
                                           u32 count)
 {
@@ -812,7 +812,7 @@ static int hw_atl_b0_hw_multicast_list_set(struct aq_hw_s *self,
 
                hw_atl_rpfl2_uc_flr_en_set(self,
                                           (self->aq_nic_cfg->is_mc_list_enabled),
-                                   HW_ATL_B0_MAC_MIN + i);
+                                          HW_ATL_B0_MAC_MIN + i);
        }
 
        err = aq_hw_err_from_flags(self);
index e743ddf46343302fe69c4c562c7cba239fe06dd9..5d0ab8e74b680cc6e75de6e91b79115b4637daa7 100644 (file)
@@ -24,7 +24,8 @@ config ARC_EMAC_CORE
 config ARC_EMAC
        tristate "ARC EMAC support"
        select ARC_EMAC_CORE
-       depends on OF_IRQ && OF_NET && HAS_DMA && (ARC || COMPILE_TEST)
+       depends on OF_IRQ && OF_NET
+       depends on ARC || COMPILE_TEST
        ---help---
          On some legacy ARC (Synopsys) FPGA boards such as ARCAngel4/ML50x
          non-standard on-chip ethernet device ARC EMAC 10/100 is used.
@@ -33,7 +34,8 @@ config ARC_EMAC
 config EMAC_ROCKCHIP
        tristate "Rockchip EMAC support"
        select ARC_EMAC_CORE
-       depends on OF_IRQ && OF_NET && REGULATOR && HAS_DMA && (ARCH_ROCKCHIP || COMPILE_TEST)
+       depends on OF_IRQ && OF_NET && REGULATOR
+       depends on ARCH_ROCKCHIP || COMPILE_TEST
        ---help---
          Support for Rockchip RK3036/RK3066/RK3188 EMAC ethernet controllers.
          This selects Rockchip SoC glue layer support for the
index 567ee54504bcd6eba897009259f691b74b77609e..5e5022fa1d047be078be911bc4f6cd0631f04de7 100644 (file)
@@ -1897,13 +1897,19 @@ static int alx_resume(struct device *dev)
        struct pci_dev *pdev = to_pci_dev(dev);
        struct alx_priv *alx = pci_get_drvdata(pdev);
        struct alx_hw *hw = &alx->hw;
+       int err;
 
        alx_reset_phy(hw);
 
        if (!netif_running(alx->dev))
                return 0;
        netif_device_attach(alx->dev);
-       return __alx_open(alx, true);
+
+       rtnl_lock();
+       err = __alx_open(alx, true);
+       rtnl_unlock();
+
+       return err;
 }
 
 static SIMPLE_DEV_PM_OPS(alx_pm_ops, alx_suspend, alx_resume);
index 94270f654b3b534b88ed3296f7556de0186de123..7087b88550db5fbbbfb909aee8c5a999db5bad57 100644 (file)
@@ -1686,6 +1686,7 @@ static struct sk_buff *atl1c_alloc_skb(struct atl1c_adapter *adapter)
        skb = build_skb(page_address(page) + adapter->rx_page_offset,
                        adapter->rx_frag_size);
        if (likely(skb)) {
+               skb_reserve(skb, NET_SKB_PAD);
                adapter->rx_page_offset += adapter->rx_frag_size;
                if (adapter->rx_page_offset >= PAGE_SIZE)
                        adapter->rx_page = NULL;
index af75156919edfead9bbe1e223b92d45d4fdd444e..4c3bfde6e8de00f2010b1329e05c8b36a16e158f 100644 (file)
@@ -157,7 +157,6 @@ config BGMAC
 config BGMAC_BCMA
        tristate "Broadcom iProc GBit BCMA support"
        depends on BCMA && BCMA_HOST_SOC
-       depends on HAS_DMA
        depends on BCM47XX || ARCH_BCM_5301X || COMPILE_TEST
        select BGMAC
        select PHYLIB
@@ -170,7 +169,6 @@ config BGMAC_BCMA
 
 config BGMAC_PLATFORM
        tristate "Broadcom iProc GBit platform support"
-       depends on HAS_DMA
        depends on ARCH_BCM_IPROC || COMPILE_TEST
        depends on OF
        select BGMAC
index d5fca2e5a9bc34ad6edfa295e378dfe12078c0e5..a1f60f89e05944458e98e7faa2292960368c5ef8 100644 (file)
@@ -1946,8 +1946,8 @@ static int bcm_sysport_open(struct net_device *dev)
        if (!priv->is_lite)
                priv->crc_fwd = !!(umac_readl(priv, UMAC_CMD) & CMD_CRC_FWD);
        else
-               priv->crc_fwd = !!(gib_readl(priv, GIB_CONTROL) &
-                                  GIB_FCS_STRIP);
+               priv->crc_fwd = !((gib_readl(priv, GIB_CONTROL) &
+                                 GIB_FCS_STRIP) >> GIB_FCS_STRIP_SHIFT);
 
        phydev = of_phy_connect(dev, priv->phy_dn, bcm_sysport_adj_link,
                                0, priv->phy_interface);
index d6e5d0cbf3a3b3c526d347add087c9cef428776a..cf440b91fd04a331a7dce529d740a22686b96dfd 100644 (file)
@@ -278,7 +278,8 @@ struct bcm_rsb {
 #define  GIB_GTX_CLK_EXT_CLK           (0 << GIB_GTX_CLK_SEL_SHIFT)
 #define  GIB_GTX_CLK_125MHZ            (1 << GIB_GTX_CLK_SEL_SHIFT)
 #define  GIB_GTX_CLK_250MHZ            (2 << GIB_GTX_CLK_SEL_SHIFT)
-#define  GIB_FCS_STRIP                 (1 << 6)
+#define  GIB_FCS_STRIP_SHIFT           6
+#define  GIB_FCS_STRIP                 (1 << GIB_FCS_STRIP_SHIFT)
 #define  GIB_LCL_LOOP_EN               (1 << 7)
 #define  GIB_LCL_LOOP_TXEN             (1 << 8)
 #define  GIB_RMT_LOOP_EN               (1 << 9)
index d847e1b9c37b5afff33e799e919e3ff39b5cd1e8..be1506169076f0a89f6a621d01dce81afe720ba7 100644 (file)
@@ -1533,6 +1533,7 @@ struct bnx2x {
        struct link_vars        link_vars;
        u32                     link_cnt;
        struct bnx2x_link_report_data last_reported_link;
+       bool                    force_link_down;
 
        struct mdio_if_info     mdio;
 
index 8cd73ff5debc276aec53d1f056fe3040875b2c0a..af7b5a4d8ba044800b0eb229d8c989c564515e94 100644 (file)
@@ -1261,6 +1261,11 @@ void __bnx2x_link_report(struct bnx2x *bp)
 {
        struct bnx2x_link_report_data cur_data;
 
+       if (bp->force_link_down) {
+               bp->link_vars.link_up = 0;
+               return;
+       }
+
        /* reread mf_cfg */
        if (IS_PF(bp) && !CHIP_IS_E1(bp))
                bnx2x_read_mf_cfg(bp);
@@ -2817,6 +2822,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
                bp->pending_max = 0;
        }
 
+       bp->force_link_down = false;
        if (bp->port.pmf) {
                rc = bnx2x_initial_phy_init(bp, load_mode);
                if (rc)
index da18aa239acb19ab87c107063ef240ac4a3b9261..a4a90b6cdb467038457fca98e8ab9f25dd72cde8 100644 (file)
@@ -3388,14 +3388,18 @@ static int bnx2x_set_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info)
                        DP(BNX2X_MSG_ETHTOOL,
                           "rss re-configured, UDP 4-tupple %s\n",
                           udp_rss_requested ? "enabled" : "disabled");
-                       return bnx2x_rss(bp, &bp->rss_conf_obj, false, true);
+                       if (bp->state == BNX2X_STATE_OPEN)
+                               return bnx2x_rss(bp, &bp->rss_conf_obj, false,
+                                                true);
                } else if ((info->flow_type == UDP_V6_FLOW) &&
                           (bp->rss_conf_obj.udp_rss_v6 != udp_rss_requested)) {
                        bp->rss_conf_obj.udp_rss_v6 = udp_rss_requested;
                        DP(BNX2X_MSG_ETHTOOL,
                           "rss re-configured, UDP 4-tupple %s\n",
                           udp_rss_requested ? "enabled" : "disabled");
-                       return bnx2x_rss(bp, &bp->rss_conf_obj, false, true);
+                       if (bp->state == BNX2X_STATE_OPEN)
+                               return bnx2x_rss(bp, &bp->rss_conf_obj, false,
+                                                true);
                }
                return 0;
 
@@ -3509,7 +3513,10 @@ static int bnx2x_set_rxfh(struct net_device *dev, const u32 *indir,
                bp->rss_conf_obj.ind_table[i] = indir[i] + bp->fp->cl_id;
        }
 
-       return bnx2x_config_rss_eth(bp, false);
+       if (bp->state == BNX2X_STATE_OPEN)
+               return bnx2x_config_rss_eth(bp, false);
+
+       return 0;
 }
 
 /**
index 5b1ed240bf18be0963cc580ab4256b6adc924046..57348f2b49a31fd5b1ef5a67d2ba1e7945768ab0 100644 (file)
@@ -10279,6 +10279,12 @@ static void bnx2x_sp_rtnl_task(struct work_struct *work)
                bp->sp_rtnl_state = 0;
                smp_mb();
 
+               /* Immediately indicate link as down */
+               bp->link_vars.link_up = 0;
+               bp->force_link_down = true;
+               netif_carrier_off(bp->dev);
+               BNX2X_ERR("Indicating link is down due to Tx-timeout\n");
+
                bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
                /* When ret value shows failure of allocation failure,
                 * the nic is rebooted again. If open still fails, a error
index 176fc9f4d7defe6a9d5b513902c97f56d732b323..4394c1162be4fde931aa822d69a008b89f54efb8 100644 (file)
@@ -5712,7 +5712,9 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
        }
        vnic->uc_filter_count = 1;
 
-       vnic->rx_mask = CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
+       vnic->rx_mask = 0;
+       if (bp->dev->flags & IFF_BROADCAST)
+               vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
 
        if ((bp->dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp))
                vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
@@ -5917,7 +5919,7 @@ unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
        return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_cp_rings);
 }
 
-void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs)
+static void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs)
 {
        bp->hw_resc.max_irqs = max_irqs;
 }
@@ -6888,7 +6890,7 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
                rc = bnxt_request_irq(bp);
                if (rc) {
                        netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc);
-                       goto open_err;
+                       goto open_err_irq;
                }
        }
 
@@ -6928,6 +6930,8 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
 open_err:
        bnxt_debug_dev_exit(bp);
        bnxt_disable_napi(bp);
+
+open_err_irq:
        bnxt_del_napi(bp);
 
 open_err_free_mem:
@@ -7214,13 +7218,16 @@ static void bnxt_set_rx_mode(struct net_device *dev)
 
        mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS |
                  CFA_L2_SET_RX_MASK_REQ_MASK_MCAST |
-                 CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST);
+                 CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST |
+                 CFA_L2_SET_RX_MASK_REQ_MASK_BCAST);
 
        if ((dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp))
                mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
 
        uc_update = bnxt_uc_list_updated(bp);
 
+       if (dev->flags & IFF_BROADCAST)
+               mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
        if (dev->flags & IFF_ALLMULTI) {
                mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
                vnic->mc_list_count = 0;
@@ -8502,11 +8509,11 @@ int bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared)
        int rx, tx, cp;
 
        _bnxt_get_max_rings(bp, &rx, &tx, &cp);
+       *max_rx = rx;
+       *max_tx = tx;
        if (!rx || !tx || !cp)
                return -ENOMEM;
 
-       *max_rx = rx;
-       *max_tx = tx;
        return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared);
 }
 
@@ -8520,8 +8527,11 @@ static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx,
                /* Not enough rings, try disabling agg rings. */
                bp->flags &= ~BNXT_FLAG_AGG_RINGS;
                rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
-               if (rc)
+               if (rc) {
+                       /* set BNXT_FLAG_AGG_RINGS back for consistency */
+                       bp->flags |= BNXT_FLAG_AGG_RINGS;
                        return rc;
+               }
                bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
                bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
                bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
index 9b14eb610b9f653b61092d74b3ab9257a84383d9..91575ef97c8cb119d9407530f4b6f5472d72724c 100644 (file)
@@ -1470,7 +1470,6 @@ void bnxt_set_max_func_stat_ctxs(struct bnxt *bp, unsigned int max);
 unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp);
 void bnxt_set_max_func_cp_rings(struct bnxt *bp, unsigned int max);
 unsigned int bnxt_get_max_func_irqs(struct bnxt *bp);
-void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max);
 int bnxt_get_avail_msix(struct bnxt *bp, int num);
 int bnxt_reserve_rings(struct bnxt *bp);
 void bnxt_tx_disable(struct bnxt *bp);
index 795f45024c209e65591a3e9fe60814315ebb3cb0..491bd40a254d8dad8810d983505b69efe2d011b1 100644 (file)
 #define BNXT_FID_INVALID                       0xffff
 #define VLAN_TCI(vid, prio)    ((vid) | ((prio) << VLAN_PRIO_SHIFT))
 
+#define is_vlan_pcp_wildcarded(vlan_tci_mask)  \
+       ((ntohs(vlan_tci_mask) & VLAN_PRIO_MASK) == 0x0000)
+#define is_vlan_pcp_exactmatch(vlan_tci_mask)  \
+       ((ntohs(vlan_tci_mask) & VLAN_PRIO_MASK) == VLAN_PRIO_MASK)
+#define is_vlan_pcp_zero(vlan_tci)     \
+       ((ntohs(vlan_tci) & VLAN_PRIO_MASK) == 0x0000)
+#define is_vid_exactmatch(vlan_tci_mask)       \
+       ((ntohs(vlan_tci_mask) & VLAN_VID_MASK) == VLAN_VID_MASK)
+
 /* Return the dst fid of the func for flow forwarding
  * For PFs: src_fid is the fid of the PF
  * For VF-reps: src_fid the fid of the VF
@@ -389,6 +398,21 @@ static bool is_exactmatch(void *mask, int len)
        return true;
 }
 
+static bool is_vlan_tci_allowed(__be16  vlan_tci_mask,
+                               __be16  vlan_tci)
+{
+       /* VLAN priority must be either exactly zero or fully wildcarded and
+        * VLAN id must be exact match.
+        */
+       if (is_vid_exactmatch(vlan_tci_mask) &&
+           ((is_vlan_pcp_exactmatch(vlan_tci_mask) &&
+             is_vlan_pcp_zero(vlan_tci)) ||
+            is_vlan_pcp_wildcarded(vlan_tci_mask)))
+               return true;
+
+       return false;
+}
+
 static bool bits_set(void *key, int len)
 {
        const u8 *p = key;
@@ -803,9 +827,9 @@ static bool bnxt_tc_can_offload(struct bnxt *bp, struct bnxt_tc_flow *flow)
        /* Currently VLAN fields cannot be partial wildcard */
        if (bits_set(&flow->l2_key.inner_vlan_tci,
                     sizeof(flow->l2_key.inner_vlan_tci)) &&
-           !is_exactmatch(&flow->l2_mask.inner_vlan_tci,
-                          sizeof(flow->l2_mask.inner_vlan_tci))) {
-               netdev_info(bp->dev, "Wildcard match unsupported for VLAN TCI\n");
+           !is_vlan_tci_allowed(flow->l2_mask.inner_vlan_tci,
+                                flow->l2_key.inner_vlan_tci)) {
+               netdev_info(bp->dev, "Unsupported VLAN TCI\n");
                return false;
        }
        if (bits_set(&flow->l2_key.inner_vlan_tpid,
index 347e4f946eb222ce5c8e1e14777c9d6555eb48dc..840f6e505f733208955bedee497ecf51397d487d 100644 (file)
@@ -169,7 +169,6 @@ static int bnxt_req_msix_vecs(struct bnxt_en_dev *edev, int ulp_id,
                edev->ulp_tbl[ulp_id].msix_requested = avail_msix;
        }
        bnxt_fill_msix_vecs(bp, ent);
-       bnxt_set_max_func_irqs(bp, bnxt_get_max_func_irqs(bp) - avail_msix);
        bnxt_set_max_func_cp_rings(bp, max_cp_rings - avail_msix);
        edev->flags |= BNXT_EN_FLAG_MSIX_REQUESTED;
        return avail_msix;
@@ -192,7 +191,6 @@ static int bnxt_free_msix_vecs(struct bnxt_en_dev *edev, int ulp_id)
        msix_requested = edev->ulp_tbl[ulp_id].msix_requested;
        bnxt_set_max_func_cp_rings(bp, max_cp_rings + msix_requested);
        edev->ulp_tbl[ulp_id].msix_requested = 0;
-       bnxt_set_max_func_irqs(bp, bnxt_get_max_func_irqs(bp) + msix_requested);
        edev->flags &= ~BNXT_EN_FLAG_MSIX_REQUESTED;
        if (netif_running(dev)) {
                bnxt_close_nic(bp, true, false);
index 30273a7717e2df797890da57e229ce31e9d957e2..4fd829b5e65d14b56337e63fc480dd72c8420eeb 100644 (file)
@@ -660,7 +660,7 @@ static int cnic_init_id_tbl(struct cnic_id_tbl *id_tbl, u32 size, u32 start_id,
        id_tbl->max = size;
        id_tbl->next = next;
        spin_lock_init(&id_tbl->lock);
-       id_tbl->table = kcalloc(DIV_ROUND_UP(size, 32), 4, GFP_KERNEL);
+       id_tbl->table = kcalloc(BITS_TO_LONGS(size), sizeof(long), GFP_KERNEL);
        if (!id_tbl->table)
                return -ENOMEM;
 
index 3be87efdc93d6347da8417ddcd101ed90cc12d8c..aa1374d0af9313dfdbf6a7f8dfeea92e2fee7013 100644 (file)
@@ -6,11 +6,15 @@
  * Copyright (C) 2004 Sun Microsystems Inc.
  * Copyright (C) 2005-2016 Broadcom Corporation.
  * Copyright (C) 2016-2017 Broadcom Limited.
+ * Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom"
+ * refers to Broadcom Inc. and/or its subsidiaries.
  *
  * Firmware is:
  *     Derived from proprietary unpublished source code,
  *     Copyright (C) 2000-2016 Broadcom Corporation.
  *     Copyright (C) 2016-2017 Broadcom Ltd.
+ *     Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom"
+ *     refers to Broadcom Inc. and/or its subsidiaries.
  *
  *     Permission is hereby granted for the distribution of this firmware
  *     data in hexadecimal or equivalent format, provided this copyright
@@ -9290,6 +9294,15 @@ static int tg3_chip_reset(struct tg3 *tp)
 
        tg3_restore_clk(tp);
 
+       /* Increase the core clock speed to fix tx timeout issue for 5762
+        * with 100Mbps link speed.
+        */
+       if (tg3_asic_rev(tp) == ASIC_REV_5762) {
+               val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
+               tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
+                    TG3_CPMU_MAC_ORIDE_ENABLE);
+       }
+
        /* Reprobe ASF enable state.  */
        tg3_flag_clear(tp, ENABLE_ASF);
        tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
index 1d61aa3efda177c64c69465f0b72c0df5221ba37..a772a33b685c5eb8c28137107eb33cb4b6ffeb1d 100644 (file)
@@ -7,6 +7,8 @@
  * Copyright (C) 2004 Sun Microsystems Inc.
  * Copyright (C) 2007-2016 Broadcom Corporation.
  * Copyright (C) 2016-2017 Broadcom Limited.
+ * Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom"
+ * refers to Broadcom Inc. and/or its subsidiaries.
  */
 
 #ifndef _T3_H
index 86659823b2592e20d16e1fbc0640d45c99508f47..3d45f4c92cf6e5d3f091ae654e5312165956d19f 100644 (file)
 #define GEM_DCFG6              0x0294 /* Design Config 6 */
 #define GEM_DCFG7              0x0298 /* Design Config 7 */
 #define GEM_DCFG8              0x029C /* Design Config 8 */
+#define GEM_DCFG10             0x02A4 /* Design Config 10 */
 
 #define GEM_TXBDCTRL   0x04cc /* TX Buffer Descriptor control register */
 #define GEM_RXBDCTRL   0x04d0 /* RX Buffer Descriptor control register */
 #define GEM_SCR2CMP_OFFSET                     0
 #define GEM_SCR2CMP_SIZE                       8
 
+/* Bitfields in DCFG10 */
+#define GEM_TXBD_RDBUFF_OFFSET                 12
+#define GEM_TXBD_RDBUFF_SIZE                   4
+#define GEM_RXBD_RDBUFF_OFFSET                 8
+#define GEM_RXBD_RDBUFF_SIZE                   4
+
 /* Bitfields in TISUBN */
 #define GEM_SUBNSINCR_OFFSET                   0
 #define GEM_SUBNSINCR_SIZE                     16
 #define MACB_CAPS_USRIO_DISABLED               0x00000010
 #define MACB_CAPS_JUMBO                                0x00000020
 #define MACB_CAPS_GEM_HAS_PTP                  0x00000040
+#define MACB_CAPS_BD_RD_PREFETCH               0x00000080
 #define MACB_CAPS_FIFO_MODE                    0x10000000
 #define MACB_CAPS_GIGABIT_MODE_AVAILABLE       0x20000000
 #define MACB_CAPS_SG_DISABLED                  0x40000000
@@ -1203,6 +1211,9 @@ struct macb {
        unsigned int max_tuples;
 
        struct tasklet_struct   hresp_err_tasklet;
+
+       int     rx_bd_rd_prefetch;
+       int     tx_bd_rd_prefetch;
 };
 
 #ifdef CONFIG_MACB_USE_HWSTAMP
index 3e93df5d4e3b2573f88cc427e7eefc6d1930e3ff..a6c911bb5ce22588276a9f92561947ff5bff2726 100644 (file)
@@ -1811,23 +1811,25 @@ static void macb_free_consistent(struct macb *bp)
 {
        struct macb_queue *queue;
        unsigned int q;
+       int size;
 
-       queue = &bp->queues[0];
        bp->macbgem_ops.mog_free_rx_buffers(bp);
-       if (queue->rx_ring) {
-               dma_free_coherent(&bp->pdev->dev, RX_RING_BYTES(bp),
-                               queue->rx_ring, queue->rx_ring_dma);
-               queue->rx_ring = NULL;
-       }
 
        for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
                kfree(queue->tx_skb);
                queue->tx_skb = NULL;
                if (queue->tx_ring) {
-                       dma_free_coherent(&bp->pdev->dev, TX_RING_BYTES(bp),
+                       size = TX_RING_BYTES(bp) + bp->tx_bd_rd_prefetch;
+                       dma_free_coherent(&bp->pdev->dev, size,
                                          queue->tx_ring, queue->tx_ring_dma);
                        queue->tx_ring = NULL;
                }
+               if (queue->rx_ring) {
+                       size = RX_RING_BYTES(bp) + bp->rx_bd_rd_prefetch;
+                       dma_free_coherent(&bp->pdev->dev, size,
+                                         queue->rx_ring, queue->rx_ring_dma);
+                       queue->rx_ring = NULL;
+               }
        }
 }
 
@@ -1874,7 +1876,7 @@ static int macb_alloc_consistent(struct macb *bp)
        int size;
 
        for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
-               size = TX_RING_BYTES(bp);
+               size = TX_RING_BYTES(bp) + bp->tx_bd_rd_prefetch;
                queue->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
                                                    &queue->tx_ring_dma,
                                                    GFP_KERNEL);
@@ -1890,7 +1892,7 @@ static int macb_alloc_consistent(struct macb *bp)
                if (!queue->tx_skb)
                        goto out_err;
 
-               size = RX_RING_BYTES(bp);
+               size = RX_RING_BYTES(bp) + bp->rx_bd_rd_prefetch;
                queue->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
                                                 &queue->rx_ring_dma, GFP_KERNEL);
                if (!queue->rx_ring)
@@ -3726,6 +3728,8 @@ static int at91ether_init(struct platform_device *pdev)
        int err;
        u32 reg;
 
+       bp->queues[0].bp = bp;
+
        dev->netdev_ops = &at91ether_netdev_ops;
        dev->ethtool_ops = &macb_ethtool_ops;
 
@@ -3795,7 +3799,7 @@ static const struct macb_config np4_config = {
 static const struct macb_config zynqmp_config = {
        .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE |
                        MACB_CAPS_JUMBO |
-                       MACB_CAPS_GEM_HAS_PTP,
+                       MACB_CAPS_GEM_HAS_PTP | MACB_CAPS_BD_RD_PREFETCH,
        .dma_burst_length = 16,
        .clk_init = macb_clk_init,
        .init = macb_init,
@@ -3856,7 +3860,7 @@ static int macb_probe(struct platform_device *pdev)
        void __iomem *mem;
        const char *mac;
        struct macb *bp;
-       int err;
+       int err, val;
 
        regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        mem = devm_ioremap_resource(&pdev->dev, regs);
@@ -3945,6 +3949,18 @@ static int macb_probe(struct platform_device *pdev)
        else
                dev->max_mtu = ETH_DATA_LEN;
 
+       if (bp->caps & MACB_CAPS_BD_RD_PREFETCH) {
+               val = GEM_BFEXT(RXBD_RDBUFF, gem_readl(bp, DCFG10));
+               if (val)
+                       bp->rx_bd_rd_prefetch = (2 << (val - 1)) *
+                                               macb_dma_desc_get_size(bp);
+
+               val = GEM_BFEXT(TXBD_RDBUFF, gem_readl(bp, DCFG10));
+               if (val)
+                       bp->tx_bd_rd_prefetch = (2 << (val - 1)) *
+                                               macb_dma_desc_get_size(bp);
+       }
+
        mac = of_get_mac_address(np);
        if (mac) {
                ether_addr_copy(bp->dev->dev_addr, mac);
index 2220c771092b46e8fb583d46ea99d5829e1793d0..678835136bf8069326067feaa46f8465db4e38d4 100644 (file)
@@ -170,10 +170,7 @@ static int gem_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
 
        if (delta > TSU_NSEC_MAX_VAL) {
                gem_tsu_get_time(&bp->ptp_clock_info, &now);
-               if (sign)
-                       now = timespec64_sub(now, then);
-               else
-                       now = timespec64_add(now, then);
+               now = timespec64_add(now, then);
 
                gem_tsu_set_time(&bp->ptp_clock_info,
                                 (const struct timespec64 *)&now);
index 07d2201530d26c85e26cf0987553451acad936a6..9fdd496b90ff47cb0f1147777ae7b9ca0071076d 100644 (file)
@@ -1,6 +1,6 @@
 config NET_CALXEDA_XGMAC
        tristate "Calxeda 1G/10G XGMAC Ethernet driver"
-       depends on HAS_IOMEM && HAS_DMA
+       depends on HAS_IOMEM
        depends on ARCH_HIGHBANK || COMPILE_TEST
        select CRC32
        help
index 043e3c11c42bd407d47561bec2a2e0acd525f12b..92d88c5f76fb8b68e9f8b35ada37d4a77d68f739 100644 (file)
@@ -15,7 +15,7 @@ if NET_VENDOR_CAVIUM
 
 config THUNDER_NIC_PF
        tristate "Thunder Physical function driver"
-       depends on 64BIT
+       depends on 64BIT && PCI
        select THUNDER_NIC_BGX
        ---help---
          This driver supports Thunder's NIC physical function.
@@ -28,13 +28,13 @@ config THUNDER_NIC_PF
 config THUNDER_NIC_VF
        tristate "Thunder Virtual function driver"
        imply CAVIUM_PTP
-       depends on 64BIT
+       depends on 64BIT && PCI
        ---help---
          This driver supports Thunder's NIC virtual function
 
 config THUNDER_NIC_BGX
        tristate "Thunder MAC interface driver (BGX)"
-       depends on 64BIT
+       depends on 64BIT && PCI
        select PHYLIB
        select MDIO_THUNDER
        select THUNDER_NIC_RGX
@@ -44,7 +44,7 @@ config        THUNDER_NIC_BGX
 
 config THUNDER_NIC_RGX
        tristate "Thunder MAC interface driver (RGX)"
-       depends on 64BIT
+       depends on 64BIT && PCI
        select PHYLIB
        select MDIO_THUNDER
        ---help---
@@ -53,7 +53,7 @@ config        THUNDER_NIC_RGX
 
 config CAVIUM_PTP
        tristate "Cavium PTP coprocessor as PTP clock"
-       depends on 64BIT
+       depends on 64BIT && PCI
        imply PTP_1588_CLOCK
        default y
        ---help---
@@ -65,7 +65,7 @@ config CAVIUM_PTP
 
 config LIQUIDIO
        tristate "Cavium LiquidIO support"
-       depends on 64BIT
+       depends on 64BIT && PCI
        depends on MAY_USE_DEVLINK
        imply PTP_1588_CLOCK
        select FW_LOADER
index 8a815bb5717732331293e9fba5b00d3ca23aaf88..7e8454d3b1ad3f382f778c27058695c34b9f13cb 100644 (file)
@@ -91,6 +91,9 @@ static int octeon_console_debug_enabled(u32 console)
  */
 #define LIO_SYNC_OCTEON_TIME_INTERVAL_MS 60000
 
+/* time to wait for possible in-flight requests in milliseconds */
+#define WAIT_INFLIGHT_REQUEST  msecs_to_jiffies(1000)
+
 struct lio_trusted_vf_ctx {
        struct completion complete;
        int status;
@@ -259,7 +262,7 @@ static inline void pcierror_quiesce_device(struct octeon_device *oct)
        force_io_queues_off(oct);
 
        /* To allow for in-flight requests */
-       schedule_timeout_uninterruptible(100);
+       schedule_timeout_uninterruptible(WAIT_INFLIGHT_REQUEST);
 
        if (wait_for_pending_requests(oct))
                dev_err(&oct->pci_dev->dev, "There were pending requests\n");
index 3f6afb54a5eb188061dcad1ce4679465d408db86..bb43ddb7539e719d0cbff780e5ddf17c756dbe05 100644 (file)
@@ -643,13 +643,21 @@ static int octeon_mgmt_set_mac_address(struct net_device *netdev, void *addr)
 static int octeon_mgmt_change_mtu(struct net_device *netdev, int new_mtu)
 {
        struct octeon_mgmt *p = netdev_priv(netdev);
-       int size_without_fcs = new_mtu + OCTEON_MGMT_RX_HEADROOM;
+       int max_packet = new_mtu + ETH_HLEN + ETH_FCS_LEN;
 
        netdev->mtu = new_mtu;
 
-       cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_MAX, size_without_fcs);
+       /* HW lifts the limit if the frame is VLAN tagged
+        * (+4 bytes per each tag, up to two tags)
+        */
+       cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_MAX, max_packet);
+       /* Set the hardware to truncate packets larger than the MTU. The jabber
+        * register must be set to a multiple of 8 bytes, so round up. JABBER is
+        * an unconditional limit, so we need to account for two possible VLAN
+        * tags.
+        */
        cvmx_write_csr(p->agl + AGL_GMX_RX_JABBER,
-                      (size_without_fcs + 7) & 0xfff8);
+                      (max_packet + 7 + VLAN_HLEN * 2) & 0xfff8);
 
        return 0;
 }
index 5d08d2aeb1722cf16976d115634ed3dd283a5b8b..e337da6ba2a4c16973f4728c9fd9564da162942c 100644 (file)
@@ -1083,6 +1083,8 @@ static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid)
        lmac->dmacs_count = (RX_DMAC_COUNT / bgx->lmac_count);
        lmac->dmacs = kcalloc(lmac->dmacs_count, sizeof(*lmac->dmacs),
                              GFP_KERNEL);
+       if (!lmac->dmacs)
+               return -ENOMEM;
 
        /* Enable lmac */
        bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, CMR_EN);
index 7b795edd9d3a9543271d29acf0cc35d760a6b065..a19172dbe6be272d9a168302bab18f551a687a17 100644 (file)
@@ -51,6 +51,7 @@
 #include <linux/sched.h>
 #include <linux/slab.h>
 #include <linux/uaccess.h>
+#include <linux/nospec.h>
 
 #include "common.h"
 #include "cxgb3_ioctl.h"
@@ -2268,6 +2269,7 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
 
                if (t.qset_idx >= nqsets)
                        return -EINVAL;
+               t.qset_idx = array_index_nospec(t.qset_idx, nqsets);
 
                q = &adapter->params.sge.qset[q1 + t.qset_idx];
                t.rspq_size = q->rspq_size;
index 00fc5f1afb1d0024b6477d6c63408afcd9858ad9..7dddb9e748b8146fbcbb441153ab16669305599e 100644 (file)
@@ -1038,10 +1038,8 @@ static void mk_act_open_req(struct filter_entry *f, struct sk_buff *skb,
        OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, qid_filterid));
        req->local_port = cpu_to_be16(f->fs.val.lport);
        req->peer_port = cpu_to_be16(f->fs.val.fport);
-       req->local_ip = f->fs.val.lip[0] | f->fs.val.lip[1] << 8 |
-               f->fs.val.lip[2] << 16 | f->fs.val.lip[3] << 24;
-       req->peer_ip = f->fs.val.fip[0] | f->fs.val.fip[1] << 8 |
-               f->fs.val.fip[2] << 16 | f->fs.val.fip[3] << 24;
+       memcpy(&req->local_ip, f->fs.val.lip, 4);
+       memcpy(&req->peer_ip, f->fs.val.fip, 4);
        req->opt0 = cpu_to_be64(NAGLE_V(f->fs.newvlan == VLAN_REMOVE ||
                                        f->fs.newvlan == VLAN_REWRITE) |
                                DELACK_V(f->fs.hitcnts) |
index dd04a2f89ce62db6ea9bca433023d9aac4b10e23..a8926e97935eb01ecdea10cfe760b13d3409634c 100644 (file)
@@ -263,7 +263,7 @@ static void dcb_tx_queue_prio_enable(struct net_device *dev, int enable)
                                "Can't %s DCB Priority on port %d, TX Queue %d: err=%d\n",
                                enable ? "set" : "unset", pi->port_id, i, -err);
                else
-                       txq->dcb_prio = value;
+                       txq->dcb_prio = enable ? value : 0;
        }
 }
 
@@ -3072,6 +3072,7 @@ static void cxgb_del_udp_tunnel(struct net_device *netdev,
 
                adapter->geneve_port = 0;
                t4_write_reg(adapter, MPS_RX_GENEVE_TYPE_A, 0);
+               break;
        default:
                return;
        }
@@ -3157,6 +3158,7 @@ static void cxgb_add_udp_tunnel(struct net_device *netdev,
 
                t4_write_reg(adapter, MPS_RX_GENEVE_TYPE_A,
                             GENEVE_V(be16_to_cpu(ti->port)) | GENEVE_EN_F);
+               break;
        default:
                return;
        }
index 974a868a4824b78dc8cb7225f37b5d2cf8b24b32..3720c3e11ebb883466d04b4a2169878f0b135399 100644 (file)
@@ -8702,7 +8702,7 @@ static int t4_get_flash_params(struct adapter *adap)
        };
 
        unsigned int part, manufacturer;
-       unsigned int density, size;
+       unsigned int density, size = 0;
        u32 flashid = 0;
        int ret;
 
@@ -8772,11 +8772,6 @@ static int t4_get_flash_params(struct adapter *adap)
                case 0x22: /* 256MB */
                        size = 1 << 28;
                        break;
-
-               default:
-                       dev_err(adap->pdev_dev, "Micron Flash Part has bad size, ID = %#x, Density code = %#x\n",
-                               flashid, density);
-                       return -EINVAL;
                }
                break;
        }
@@ -8792,10 +8787,6 @@ static int t4_get_flash_params(struct adapter *adap)
                case 0x17: /* 64MB */
                        size = 1 << 26;
                        break;
-               default:
-                       dev_err(adap->pdev_dev, "ISSI Flash Part has bad size, ID = %#x, Density code = %#x\n",
-                               flashid, density);
-                       return -EINVAL;
                }
                break;
        }
@@ -8811,10 +8802,6 @@ static int t4_get_flash_params(struct adapter *adap)
                case 0x18: /* 16MB */
                        size = 1 << 24;
                        break;
-               default:
-                       dev_err(adap->pdev_dev, "Macronix Flash Part has bad size, ID = %#x, Density code = %#x\n",
-                               flashid, density);
-                       return -EINVAL;
                }
                break;
        }
@@ -8830,17 +8817,21 @@ static int t4_get_flash_params(struct adapter *adap)
                case 0x18: /* 16MB */
                        size = 1 << 24;
                        break;
-               default:
-                       dev_err(adap->pdev_dev, "Winbond Flash Part has bad size, ID = %#x, Density code = %#x\n",
-                               flashid, density);
-                       return -EINVAL;
                }
                break;
        }
-       default:
-               dev_err(adap->pdev_dev, "Unsupported Flash Part, ID = %#x\n",
-                       flashid);
-               return -EINVAL;
+       }
+
+       /* If we didn't recognize the FLASH part, that's no real issue: the
+        * Hardware/Software contract says that Hardware will _*ALWAYS*_
+        * use a FLASH part which is at least 4MB in size and has 64KB
+        * sectors.  The unrecognized FLASH part is likely to be much larger
+        * than 4MB, but that's all we really need.
+        */
+       if (size == 0) {
+               dev_warn(adap->pdev_dev, "Unknown Flash Part, ID = %#x, assuming 4MB\n",
+                        flashid);
+               size = 1 << 22;
        }
 
        /* Store decoded Flash size and fall through into vetting code. */
index 5ab912937aff2e8eb34887deb2aa36c8f45bebde..ec0b545197e2dfd7c0443917c0ec0f33861c77bd 100644 (file)
@@ -19,6 +19,7 @@ if NET_VENDOR_CIRRUS
 config CS89x0
        tristate "CS89x0 support"
        depends on ISA || EISA || ARM
+       depends on !PPC32
        ---help---
          Support for CS89x0 chipset based Ethernet cards. If you have a
          network (Ethernet) card of this type, say Y and read the file
index 973c1fb70d09929f92fc47db0e3d60e3146eaff0..99038dfc7fbe52bea5932691133e2bdeced48844 100644 (file)
@@ -79,7 +79,6 @@ void enic_rfs_flw_tbl_init(struct enic *enic)
        enic->rfs_h.max = enic->config.num_arfs;
        enic->rfs_h.free = enic->rfs_h.max;
        enic->rfs_h.toclean = 0;
-       enic_rfs_timer_start(enic);
 }
 
 void enic_rfs_flw_tbl_free(struct enic *enic)
@@ -88,7 +87,6 @@ void enic_rfs_flw_tbl_free(struct enic *enic)
 
        enic_rfs_timer_stop(enic);
        spin_lock_bh(&enic->rfs_h.lock);
-       enic->rfs_h.free = 0;
        for (i = 0; i < (1 << ENIC_RFS_FLW_BITSHIFT); i++) {
                struct hlist_head *hhead;
                struct hlist_node *tmp;
@@ -99,6 +97,7 @@ void enic_rfs_flw_tbl_free(struct enic *enic)
                        enic_delfltr(enic, n->fltr_id);
                        hlist_del(&n->node);
                        kfree(n);
+                       enic->rfs_h.free++;
                }
        }
        spin_unlock_bh(&enic->rfs_h.lock);
index 30d2eaa18c0479adcd75315db194d3785b8007bc..60641e202534109f3d1341607b6c4d413b75f65b 100644 (file)
@@ -1920,7 +1920,7 @@ static int enic_open(struct net_device *netdev)
 {
        struct enic *enic = netdev_priv(netdev);
        unsigned int i;
-       int err;
+       int err, ret;
 
        err = enic_request_intr(enic);
        if (err) {
@@ -1971,16 +1971,15 @@ static int enic_open(struct net_device *netdev)
                vnic_intr_unmask(&enic->intr[i]);
 
        enic_notify_timer_start(enic);
-       enic_rfs_flw_tbl_init(enic);
+       enic_rfs_timer_start(enic);
 
        return 0;
 
 err_out_free_rq:
        for (i = 0; i < enic->rq_count; i++) {
-               err = vnic_rq_disable(&enic->rq[i]);
-               if (err)
-                       return err;
-               vnic_rq_clean(&enic->rq[i], enic_free_rq_buf);
+               ret = vnic_rq_disable(&enic->rq[i]);
+               if (!ret)
+                       vnic_rq_clean(&enic->rq[i], enic_free_rq_buf);
        }
        enic_dev_notify_unset(enic);
 err_out_free_intr:
@@ -2048,28 +2047,42 @@ static int enic_stop(struct net_device *netdev)
        return 0;
 }
 
+static int _enic_change_mtu(struct net_device *netdev, int new_mtu)
+{
+       bool running = netif_running(netdev);
+       int err = 0;
+
+       ASSERT_RTNL();
+       if (running) {
+               err = enic_stop(netdev);
+               if (err)
+                       return err;
+       }
+
+       netdev->mtu = new_mtu;
+
+       if (running) {
+               err = enic_open(netdev);
+               if (err)
+                       return err;
+       }
+
+       return 0;
+}
+
 static int enic_change_mtu(struct net_device *netdev, int new_mtu)
 {
        struct enic *enic = netdev_priv(netdev);
-       int running = netif_running(netdev);
 
        if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic))
                return -EOPNOTSUPP;
 
-       if (running)
-               enic_stop(netdev);
-
-       netdev->mtu = new_mtu;
-
        if (netdev->mtu > enic->port_mtu)
                netdev_warn(netdev,
-                       "interface MTU (%d) set higher than port MTU (%d)\n",
-                       netdev->mtu, enic->port_mtu);
-
-       if (running)
-               enic_open(netdev);
+                           "interface MTU (%d) set higher than port MTU (%d)\n",
+                           netdev->mtu, enic->port_mtu);
 
-       return 0;
+       return _enic_change_mtu(netdev, new_mtu);
 }
 
 static void enic_change_mtu_work(struct work_struct *work)
@@ -2077,47 +2090,9 @@ static void enic_change_mtu_work(struct work_struct *work)
        struct enic *enic = container_of(work, struct enic, change_mtu_work);
        struct net_device *netdev = enic->netdev;
        int new_mtu = vnic_dev_mtu(enic->vdev);
-       int err;
-       unsigned int i;
-
-       new_mtu = max_t(int, ENIC_MIN_MTU, min_t(int, ENIC_MAX_MTU, new_mtu));
 
        rtnl_lock();
-
-       /* Stop RQ */
-       del_timer_sync(&enic->notify_timer);
-
-       for (i = 0; i < enic->rq_count; i++)
-               napi_disable(&enic->napi[i]);
-
-       vnic_intr_mask(&enic->intr[0]);
-       enic_synchronize_irqs(enic);
-       err = vnic_rq_disable(&enic->rq[0]);
-       if (err) {
-               rtnl_unlock();
-               netdev_err(netdev, "Unable to disable RQ.\n");
-               return;
-       }
-       vnic_rq_clean(&enic->rq[0], enic_free_rq_buf);
-       vnic_cq_clean(&enic->cq[0]);
-       vnic_intr_clean(&enic->intr[0]);
-
-       /* Fill RQ with new_mtu-sized buffers */
-       netdev->mtu = new_mtu;
-       vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf);
-       /* Need at least one buffer on ring to get going */
-       if (vnic_rq_desc_used(&enic->rq[0]) == 0) {
-               rtnl_unlock();
-               netdev_err(netdev, "Unable to alloc receive buffers.\n");
-               return;
-       }
-
-       /* Start RQ */
-       vnic_rq_enable(&enic->rq[0]);
-       napi_enable(&enic->napi[0]);
-       vnic_intr_unmask(&enic->intr[0]);
-       enic_notify_timer_start(enic);
-
+       (void)_enic_change_mtu(netdev, new_mtu);
        rtnl_unlock();
 
        netdev_info(netdev, "interface MTU set as %d\n", netdev->mtu);
@@ -2904,6 +2879,7 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        timer_setup(&enic->notify_timer, enic_notify_timer, 0);
 
+       enic_rfs_flw_tbl_init(enic);
        enic_set_rx_coal_setting(enic);
        INIT_WORK(&enic->reset, enic_reset);
        INIT_WORK(&enic->tx_hang_reset, enic_tx_hang_reset);
@@ -2916,7 +2892,6 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
         */
 
        enic->port_mtu = enic->config.mtu;
-       (void)enic_change_mtu(netdev, enic->port_mtu);
 
        err = enic_set_mac_addr(netdev, enic->mac_addr);
        if (err) {
@@ -3006,6 +2981,7 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        /* MTU range: 68 - 9000 */
        netdev->min_mtu = ENIC_MIN_MTU;
        netdev->max_mtu = ENIC_MAX_MTU;
+       netdev->mtu     = enic->port_mtu;
 
        err = register_netdev(netdev);
        if (err) {
index 78db8e62a83f17c05d615cb674703efa4e926bd0..ed6c76d20b45b2a38ccf87e63487e77a756812a3 100644 (file)
@@ -1735,8 +1735,8 @@ static void ftgmac100_ncsi_handler(struct ncsi_dev *nd)
        if (unlikely(nd->state != ncsi_dev_state_functional))
                return;
 
-       netdev_info(nd->dev, "NCSI interface %s\n",
-                   nd->link_up ? "up" : "down");
+       netdev_dbg(nd->dev, "NCSI interface %s\n",
+                  nd->link_up ? "up" : "down");
 }
 
 static void ftgmac100_setup_clk(struct ftgmac100 *priv)
index 5f4e1ffa7b95fe4f8d2bb6447764951c51fffc67..ab02057ac7304f088242a2a07481820302d3556b 100644 (file)
@@ -125,6 +125,9 @@ MODULE_PARM_DESC(tx_timeout, "The Tx timeout in ms");
 /* Default alignment for start of data in an Rx FD */
 #define DPAA_FD_DATA_ALIGNMENT  16
 
+/* The DPAA requires 256 bytes reserved and mapped for the SGT */
+#define DPAA_SGT_SIZE 256
+
 /* Values for the L3R field of the FM Parse Results
  */
 /* L3 Type field: First IP Present IPv4 */
@@ -1617,8 +1620,8 @@ static struct sk_buff *dpaa_cleanup_tx_fd(const struct dpaa_priv *priv,
 
        if (unlikely(qm_fd_get_format(fd) == qm_fd_sg)) {
                nr_frags = skb_shinfo(skb)->nr_frags;
-               dma_unmap_single(dev, addr, qm_fd_get_offset(fd) +
-                                sizeof(struct qm_sg_entry) * (1 + nr_frags),
+               dma_unmap_single(dev, addr,
+                                qm_fd_get_offset(fd) + DPAA_SGT_SIZE,
                                 dma_dir);
 
                /* The sgt buffer has been allocated with netdev_alloc_frag(),
@@ -1903,8 +1906,7 @@ static int skb_to_sg_fd(struct dpaa_priv *priv,
        void *sgt_buf;
 
        /* get a page frag to store the SGTable */
-       sz = SKB_DATA_ALIGN(priv->tx_headroom +
-               sizeof(struct qm_sg_entry) * (1 + nr_frags));
+       sz = SKB_DATA_ALIGN(priv->tx_headroom + DPAA_SGT_SIZE);
        sgt_buf = netdev_alloc_frag(sz);
        if (unlikely(!sgt_buf)) {
                netdev_err(net_dev, "netdev_alloc_frag() failed for size %d\n",
@@ -1972,9 +1974,8 @@ static int skb_to_sg_fd(struct dpaa_priv *priv,
        skbh = (struct sk_buff **)buffer_start;
        *skbh = skb;
 
-       addr = dma_map_single(dev, buffer_start, priv->tx_headroom +
-                             sizeof(struct qm_sg_entry) * (1 + nr_frags),
-                             dma_dir);
+       addr = dma_map_single(dev, buffer_start,
+                             priv->tx_headroom + DPAA_SGT_SIZE, dma_dir);
        if (unlikely(dma_mapping_error(dev, addr))) {
                dev_err(dev, "DMA mapping failed");
                err = -EINVAL;
index ce6e24c74978a22a1d22383f0a5b4f38ffec7c00..ecbf6187e13a1fe3d6dba06015ff9cc49aed6224 100644 (file)
@@ -324,6 +324,10 @@ struct fman_port_qmi_regs {
 #define HWP_HXS_PHE_REPORT 0x00000800
 #define HWP_HXS_PCAC_PSTAT 0x00000100
 #define HWP_HXS_PCAC_PSTOP 0x00000001
+#define HWP_HXS_TCP_OFFSET 0xA
+#define HWP_HXS_UDP_OFFSET 0xB
+#define HWP_HXS_SH_PAD_REM 0x80000000
+
 struct fman_port_hwp_regs {
        struct {
                u32 ssa; /* Soft Sequence Attachment */
@@ -728,6 +732,10 @@ static void init_hwp(struct fman_port *port)
                iowrite32be(0xffffffff, &regs->pmda[i].lcv);
        }
 
+       /* Short packet padding removal from checksum calculation */
+       iowrite32be(HWP_HXS_SH_PAD_REM, &regs->pmda[HWP_HXS_TCP_OFFSET].ssa);
+       iowrite32be(HWP_HXS_SH_PAD_REM, &regs->pmda[HWP_HXS_UDP_OFFSET].ssa);
+
        start_port_hwp(port);
 }
 
index 8bcf470ff5f38a4e62842a5f31d5c0b45141ab85..fb1a7251f45d336978199d208af5e1a40eee1556 100644 (file)
@@ -5,7 +5,7 @@
 config NET_VENDOR_HISILICON
        bool "Hisilicon devices"
        default y
-       depends on (OF || ACPI) && HAS_DMA
+       depends on OF || ACPI
        depends on ARM || ARM64 || COMPILE_TEST
        ---help---
          If you have a network (Ethernet) card belonging to this class, say Y.
index 5b122728dcb472c9f9849cb2a82a08e5b4917cca..09e9da10b786549b6232d8069c4e45857b95fd8c 100644 (file)
@@ -983,6 +983,7 @@ static int nic_dev_init(struct pci_dev *pdev)
        hinic_hwdev_cb_register(nic_dev->hwdev, HINIC_MGMT_MSG_CMD_LINK_STATUS,
                                nic_dev, link_status_event_handler);
 
+       SET_NETDEV_DEV(netdev, &pdev->dev);
        err = register_netdev(netdev);
        if (err) {
                dev_err(&pdev->dev, "Failed to register netdev\n");
index e2e5cdc7119c3ed0e890f99c7b30996d72d280e9..4c0f7eda1166c5df202c3b9a71cc2e43516531fb 100644 (file)
@@ -439,6 +439,7 @@ static void rx_free_irq(struct hinic_rxq *rxq)
 {
        struct hinic_rq *rq = rxq->rq;
 
+       irq_set_affinity_hint(rq->irq, NULL);
        free_irq(rq->irq, rxq);
        rx_del_napi(rxq);
 }
index 9128858479c4a031baa4b6b93b47d7097f01a995..2353ec829c04407d88b365ba58e14ec960c4e978 100644 (file)
@@ -229,6 +229,7 @@ netdev_tx_t hinic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
                txq->txq_stats.tx_busy++;
                u64_stats_update_end(&txq->txq_stats.syncp);
                err = NETDEV_TX_BUSY;
+               wqe_size = 0;
                goto flush_skbs;
        }
 
index d0e196bff0818ce214b4909cb97976aa3502bdc4..ffe7acbeaa22d372b7ce32f9950edb9a98cdf71a 100644 (file)
@@ -329,7 +329,8 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
        return;
 
 failure:
-       dev_info(dev, "replenish pools failure\n");
+       if (lpar_rc != H_PARAMETER && lpar_rc != H_CLOSED)
+               dev_err_ratelimited(dev, "rx: replenish packet buffer failed\n");
        pool->free_map[pool->next_free] = index;
        pool->rx_buff[index].skb = NULL;
 
@@ -1617,7 +1618,8 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
                                      &tx_crq);
        }
        if (lpar_rc != H_SUCCESS) {
-               dev_err(dev, "tx failed with code %ld\n", lpar_rc);
+               if (lpar_rc != H_CLOSED && lpar_rc != H_PARAMETER)
+                       dev_err_ratelimited(dev, "tx: send failed\n");
                dev_kfree_skb_any(skb);
                tx_buff->skb = NULL;
 
@@ -1825,8 +1827,8 @@ static int do_reset(struct ibmvnic_adapter *adapter,
 
                rc = ibmvnic_login(netdev);
                if (rc) {
-                       adapter->state = VNIC_PROBED;
-                       return 0;
+                       adapter->state = reset_state;
+                       return rc;
                }
 
                if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM ||
@@ -3204,6 +3206,25 @@ static union ibmvnic_crq *ibmvnic_next_crq(struct ibmvnic_adapter *adapter)
        return crq;
 }
 
+static void print_subcrq_error(struct device *dev, int rc, const char *func)
+{
+       switch (rc) {
+       case H_PARAMETER:
+               dev_warn_ratelimited(dev,
+                                    "%s failed: Send request is malformed or adapter failover pending. (rc=%d)\n",
+                                    func, rc);
+               break;
+       case H_CLOSED:
+               dev_warn_ratelimited(dev,
+                                    "%s failed: Backing queue closed. Adapter is down or failover pending. (rc=%d)\n",
+                                    func, rc);
+               break;
+       default:
+               dev_err_ratelimited(dev, "%s failed: (rc=%d)\n", func, rc);
+               break;
+       }
+}
+
 static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
                       union sub_crq *sub_crq)
 {
@@ -3230,11 +3251,8 @@ static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
                                cpu_to_be64(u64_crq[2]),
                                cpu_to_be64(u64_crq[3]));
 
-       if (rc) {
-               if (rc == H_CLOSED)
-                       dev_warn(dev, "CRQ Queue closed\n");
-               dev_err(dev, "Send error (rc=%d)\n", rc);
-       }
+       if (rc)
+               print_subcrq_error(dev, rc, __func__);
 
        return rc;
 }
@@ -3252,11 +3270,8 @@ static int send_subcrq_indirect(struct ibmvnic_adapter *adapter,
                                cpu_to_be64(remote_handle),
                                ioba, num_entries);
 
-       if (rc) {
-               if (rc == H_CLOSED)
-                       dev_warn(dev, "CRQ Queue closed\n");
-               dev_err(dev, "Send (indirect) error (rc=%d)\n", rc);
-       }
+       if (rc)
+               print_subcrq_error(dev, rc, __func__);
 
        return rc;
 }
index 8ffb7454e67c2a0309708c1b47487c4d1c58b440..b151ae316546c2483aa91abfabc900b608e53e4a 100644 (file)
@@ -2103,9 +2103,8 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
        unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
 #else
        unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
-                               SKB_DATA_ALIGN(I40E_SKB_PAD +
-                                              (xdp->data_end -
-                                               xdp->data_hard_start));
+                               SKB_DATA_ALIGN(xdp->data_end -
+                                              xdp->data_hard_start);
 #endif
        struct sk_buff *skb;
 
@@ -2124,7 +2123,7 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
                return NULL;
 
        /* update pointers within the skb to store the data */
-       skb_reserve(skb, I40E_SKB_PAD + (xdp->data - xdp->data_hard_start));
+       skb_reserve(skb, xdp->data - xdp->data_hard_start);
        __skb_put(skb, xdp->data_end - xdp->data);
        if (metasize)
                skb_metadata_set(skb, metasize);
@@ -2200,9 +2199,10 @@ static bool i40e_is_non_eop(struct i40e_ring *rx_ring,
        return true;
 }
 
-#define I40E_XDP_PASS 0
-#define I40E_XDP_CONSUMED 1
-#define I40E_XDP_TX 2
+#define I40E_XDP_PASS          0
+#define I40E_XDP_CONSUMED      BIT(0)
+#define I40E_XDP_TX            BIT(1)
+#define I40E_XDP_REDIR         BIT(2)
 
 static int i40e_xmit_xdp_ring(struct xdp_frame *xdpf,
                              struct i40e_ring *xdp_ring);
@@ -2249,7 +2249,7 @@ static struct sk_buff *i40e_run_xdp(struct i40e_ring *rx_ring,
                break;
        case XDP_REDIRECT:
                err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
-               result = !err ? I40E_XDP_TX : I40E_XDP_CONSUMED;
+               result = !err ? I40E_XDP_REDIR : I40E_XDP_CONSUMED;
                break;
        default:
                bpf_warn_invalid_xdp_action(act);
@@ -2312,7 +2312,8 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
        unsigned int total_rx_bytes = 0, total_rx_packets = 0;
        struct sk_buff *skb = rx_ring->skb;
        u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
-       bool failure = false, xdp_xmit = false;
+       unsigned int xdp_xmit = 0;
+       bool failure = false;
        struct xdp_buff xdp;
 
        xdp.rxq = &rx_ring->xdp_rxq;
@@ -2373,8 +2374,10 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
                }
 
                if (IS_ERR(skb)) {
-                       if (PTR_ERR(skb) == -I40E_XDP_TX) {
-                               xdp_xmit = true;
+                       unsigned int xdp_res = -PTR_ERR(skb);
+
+                       if (xdp_res & (I40E_XDP_TX | I40E_XDP_REDIR)) {
+                               xdp_xmit |= xdp_res;
                                i40e_rx_buffer_flip(rx_ring, rx_buffer, size);
                        } else {
                                rx_buffer->pagecnt_bias++;
@@ -2428,12 +2431,14 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
                total_rx_packets++;
        }
 
-       if (xdp_xmit) {
+       if (xdp_xmit & I40E_XDP_REDIR)
+               xdp_do_flush_map();
+
+       if (xdp_xmit & I40E_XDP_TX) {
                struct i40e_ring *xdp_ring =
                        rx_ring->vsi->xdp_rings[rx_ring->queue_index];
 
                i40e_xdp_ring_update_tail(xdp_ring);
-               xdp_do_flush_map();
        }
 
        rx_ring->skb = skb;
index 3f5c350716bb0e595d79ec928188f5862461694c..0bd1294ba51737240d510f31bbd255faceffeb11 100644 (file)
@@ -1871,7 +1871,12 @@ s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
        if (enable_addr != 0)
                rar_high |= IXGBE_RAH_AV;
 
+       /* Record lower 32 bits of MAC address and then make
+        * sure that write is flushed to hardware before writing
+        * the upper 16 bits and setting the valid bit.
+        */
        IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low);
+       IXGBE_WRITE_FLUSH(hw);
        IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
 
        return 0;
@@ -1903,8 +1908,13 @@ s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index)
        rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
        rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
 
-       IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0);
+       /* Clear the address valid bit and upper 16 bits of the address
+        * before clearing the lower bits. This way we aren't updating
+        * a live filter.
+        */
        IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
+       IXGBE_WRITE_FLUSH(hw);
+       IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0);
 
        /* clear VMDq pool/queue selection for this RAR */
        hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL);
index c116f459945d62455843d4e9262971630dd45099..da4322e4daed5de4fb44f06d8cdb488bc41f6432 100644 (file)
@@ -839,7 +839,7 @@ int ixgbe_ipsec_tx(struct ixgbe_ring *tx_ring,
        }
 
        itd->sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_TX_INDEX;
-       if (unlikely(itd->sa_idx > IXGBE_IPSEC_MAX_SA_COUNT)) {
+       if (unlikely(itd->sa_idx >= IXGBE_IPSEC_MAX_SA_COUNT)) {
                netdev_err(tx_ring->netdev, "%s: bad sa_idx=%d handle=%lu\n",
                           __func__, itd->sa_idx, xs->xso.offload_handle);
                return 0;
index 3e87dbbc90246dba3a59e3f8ccded5885b441ae2..62e57b05a0aed3d9a02bf8d473aa49505608728f 100644 (file)
@@ -2186,9 +2186,10 @@ static struct sk_buff *ixgbe_build_skb(struct ixgbe_ring *rx_ring,
        return skb;
 }
 
-#define IXGBE_XDP_PASS 0
-#define IXGBE_XDP_CONSUMED 1
-#define IXGBE_XDP_TX 2
+#define IXGBE_XDP_PASS         0
+#define IXGBE_XDP_CONSUMED     BIT(0)
+#define IXGBE_XDP_TX           BIT(1)
+#define IXGBE_XDP_REDIR                BIT(2)
 
 static int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter,
                               struct xdp_frame *xdpf);
@@ -2225,7 +2226,7 @@ static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter,
        case XDP_REDIRECT:
                err = xdp_do_redirect(adapter->netdev, xdp, xdp_prog);
                if (!err)
-                       result = IXGBE_XDP_TX;
+                       result = IXGBE_XDP_REDIR;
                else
                        result = IXGBE_XDP_CONSUMED;
                break;
@@ -2285,7 +2286,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
        unsigned int mss = 0;
 #endif /* IXGBE_FCOE */
        u16 cleaned_count = ixgbe_desc_unused(rx_ring);
-       bool xdp_xmit = false;
+       unsigned int xdp_xmit = 0;
        struct xdp_buff xdp;
 
        xdp.rxq = &rx_ring->xdp_rxq;
@@ -2328,8 +2329,10 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
                }
 
                if (IS_ERR(skb)) {
-                       if (PTR_ERR(skb) == -IXGBE_XDP_TX) {
-                               xdp_xmit = true;
+                       unsigned int xdp_res = -PTR_ERR(skb);
+
+                       if (xdp_res & (IXGBE_XDP_TX | IXGBE_XDP_REDIR)) {
+                               xdp_xmit |= xdp_res;
                                ixgbe_rx_buffer_flip(rx_ring, rx_buffer, size);
                        } else {
                                rx_buffer->pagecnt_bias++;
@@ -2401,7 +2404,10 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
                total_rx_packets++;
        }
 
-       if (xdp_xmit) {
+       if (xdp_xmit & IXGBE_XDP_REDIR)
+               xdp_do_flush_map();
+
+       if (xdp_xmit & IXGBE_XDP_TX) {
                struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()];
 
                /* Force memory writes to complete before letting h/w
@@ -2409,8 +2415,6 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
                 */
                wmb();
                writel(ring->next_to_use, ring->tail);
-
-               xdp_do_flush_map();
        }
 
        u64_stats_update_begin(&rx_ring->syncp);
index cc2f7701e71e1b033c4bd7ceb78c970351f4d9ee..f33fd22b351c856a3544cdd9628a9da500d13abf 100644 (file)
@@ -18,8 +18,8 @@ if NET_VENDOR_MARVELL
 
 config MV643XX_ETH
        tristate "Marvell Discovery (643XX) and Orion ethernet support"
-       depends on (MV64X60 || PPC32 || PLAT_ORION || COMPILE_TEST) && INET
-       depends on HAS_DMA
+       depends on MV64X60 || PPC32 || PLAT_ORION || COMPILE_TEST
+       depends on INET
        select PHYLIB
        select MVMDIO
        ---help---
@@ -58,7 +58,6 @@ config MVNETA_BM_ENABLE
 config MVNETA
        tristate "Marvell Armada 370/38x/XP/37xx network interface support"
        depends on ARCH_MVEBU || COMPILE_TEST
-       depends on HAS_DMA
        select MVMDIO
        select PHYLINK
        ---help---
@@ -84,7 +83,6 @@ config MVNETA_BM
 config MVPP2
        tristate "Marvell Armada 375/7K/8K network interface support"
        depends on ARCH_MVEBU || COMPILE_TEST
-       depends on HAS_DMA
        select MVMDIO
        select PHYLINK
        ---help---
@@ -93,7 +91,7 @@ config MVPP2
 
 config PXA168_ETH
        tristate "Marvell pxa168 ethernet support"
-       depends on HAS_IOMEM && HAS_DMA
+       depends on HAS_IOMEM
        depends on CPU_PXA168 || ARCH_BERLIN || COMPILE_TEST
        select PHYLIB
        ---help---
index 17a904cc6a5e0fbe538f42ec2b00573e035c2955..0ad2f3f7da85a029b5dea7dd3ce67b69d4ff8605 100644 (file)
@@ -1932,7 +1932,7 @@ static int mvneta_rx_swbm(struct mvneta_port *pp, int rx_todo,
                rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE);
                index = rx_desc - rxq->descs;
                data = rxq->buf_virt_addr[index];
-               phys_addr = rx_desc->buf_phys_addr;
+               phys_addr = rx_desc->buf_phys_addr - pp->rx_offset_correction;
 
                if (!mvneta_rxq_desc_is_first_last(rx_status) ||
                    (rx_status & MVNETA_RXD_ERR_SUMMARY)) {
index 9f54ccbddea74b57973ee724acf360fa23434a3e..3360f7b9ee73bdb32957472299a3438e8189f7bd 100644 (file)
@@ -474,10 +474,10 @@ static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
 {
        const struct mlx4_en_frag_info *frag_info = priv->frag_info;
        unsigned int truesize = 0;
+       bool release = true;
        int nr, frag_size;
        struct page *page;
        dma_addr_t dma;
-       bool release;
 
        /* Collect used fragments while replacing them in the HW descriptors */
        for (nr = 0;; frags++) {
@@ -500,7 +500,11 @@ static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
                        release = page_count(page) != 1 ||
                                  page_is_pfmemalloc(page) ||
                                  page_to_nid(page) != numa_mem_id();
-               } else {
+               } else if (!priv->rx_headroom) {
+                       /* rx_headroom for non XDP setup is always 0.
+                        * When XDP is set, the above condition will
+                        * guarantee page is always released.
+                        */
                        u32 sz_align = ALIGN(frag_size, SMP_CACHE_BYTES);
 
                        frags->page_offset += sz_align;
index 7b1b5ac986d0779db320ad986c13defa5db82949..31bd56727022fe7a3bacc3a09e11691f79f1a974 100644 (file)
@@ -2958,7 +2958,7 @@ int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
        u32 srqn = qp_get_srqn(qpc) & 0xffffff;
        int use_srq = (qp_get_srqn(qpc) >> 24) & 1;
        struct res_srq *srq;
-       int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff;
+       int local_qpn = vhcr->in_modifier & 0xffffff;
 
        err = adjust_qp_sched_queue(dev, slave, qpc, inbox);
        if (err)
index 323ffe8bf7e473c49261b7446530a7354e69f954..456f30007ad659e98a197f969edd5611b1728127 100644 (file)
@@ -123,7 +123,7 @@ int mlx5_frag_buf_alloc_node(struct mlx5_core_dev *dev, int size,
        int i;
 
        buf->size = size;
-       buf->npages = 1 << get_order(size);
+       buf->npages = DIV_ROUND_UP(size, PAGE_SIZE);
        buf->page_shift = PAGE_SHIFT;
        buf->frags = kcalloc(buf->npages, sizeof(struct mlx5_buf_list),
                             GFP_KERNEL);
index 487388aed98f22cc9ae814fd60d27b48d5105458..384c1fa490811ee651919c139b9cd9e724d4ff81 100644 (file)
@@ -807,6 +807,7 @@ static void cmd_work_handler(struct work_struct *work)
        unsigned long flags;
        bool poll_cmd = ent->polling;
        int alloc_ret;
+       int cmd_mode;
 
        sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem;
        down(sem);
@@ -853,6 +854,7 @@ static void cmd_work_handler(struct work_struct *work)
        set_signature(ent, !cmd->checksum_disabled);
        dump_command(dev, ent, 1);
        ent->ts1 = ktime_get_ns();
+       cmd_mode = cmd->mode;
 
        if (ent->callback)
                schedule_delayed_work(&ent->cb_timeout_work, cb_timeout);
@@ -877,7 +879,7 @@ static void cmd_work_handler(struct work_struct *work)
        iowrite32be(1 << ent->idx, &dev->iseg->cmd_dbell);
        mmiowb();
        /* if not in polling don't use ent after this point */
-       if (cmd->mode == CMD_MODE_POLLING || poll_cmd) {
+       if (cmd_mode == CMD_MODE_POLLING || poll_cmd) {
                poll_timeout(ent);
                /* make sure we read the descriptor after ownership is SW */
                rmb();
@@ -1276,7 +1278,7 @@ static ssize_t outlen_write(struct file *filp, const char __user *buf,
 {
        struct mlx5_core_dev *dev = filp->private_data;
        struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
-       char outlen_str[8];
+       char outlen_str[8] = {0};
        int outlen;
        void *ptr;
        int err;
@@ -1291,8 +1293,6 @@ static ssize_t outlen_write(struct file *filp, const char __user *buf,
        if (copy_from_user(outlen_str, buf, count))
                return -EFAULT;
 
-       outlen_str[7] = 0;
-
        err = sscanf(outlen_str, "%d", &outlen);
        if (err < 0)
                return err;
index eb9eb7aa953ae5560db4746569a02f177bcddf13..405236cf0b0434c4d0e5a9c67264156702f0b6d9 100644 (file)
@@ -858,8 +858,6 @@ struct mlx5e_profile {
                mlx5e_fp_handle_rx_cqe handle_rx_cqe;
                mlx5e_fp_handle_rx_cqe handle_rx_cqe_mpwqe;
        } rx_handlers;
-       void    (*netdev_registered_init)(struct mlx5e_priv *priv);
-       void    (*netdev_registered_remove)(struct mlx5e_priv *priv);
        int     max_tc;
 };
 
index 75e4308ba786aeca51bba013031aeee485e96dd9..d258bb6792713e8bc56bfc9be4c4c8b98f45aebe 100644 (file)
@@ -381,14 +381,14 @@ static void arfs_may_expire_flow(struct mlx5e_priv *priv)
        HLIST_HEAD(del_list);
        spin_lock_bh(&priv->fs.arfs.arfs_lock);
        mlx5e_for_each_arfs_rule(arfs_rule, htmp, priv->fs.arfs.arfs_tables, i, j) {
-               if (quota++ > MLX5E_ARFS_EXPIRY_QUOTA)
-                       break;
                if (!work_pending(&arfs_rule->arfs_work) &&
                    rps_may_expire_flow(priv->netdev,
                                        arfs_rule->rxq, arfs_rule->flow_id,
                                        arfs_rule->filter_id)) {
                        hlist_del_init(&arfs_rule->hlist);
                        hlist_add_head(&arfs_rule->hlist, &del_list);
+                       if (quota++ > MLX5E_ARFS_EXPIRY_QUOTA)
+                               break;
                }
        }
        spin_unlock_bh(&priv->fs.arfs.arfs_lock);
@@ -711,6 +711,9 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
            skb->protocol != htons(ETH_P_IPV6))
                return -EPROTONOSUPPORT;
 
+       if (skb->encapsulation)
+               return -EPROTONOSUPPORT;
+
        arfs_t = arfs_get_table(arfs, arfs_get_ip_proto(skb), skb->protocol);
        if (!arfs_t)
                return -EPROTONOSUPPORT;
index 0a52f31fef377e40ae9f19c091c33aca58bee154..722998d685646ebd2502dd25479a5d51127ac35c 100644 (file)
@@ -275,7 +275,8 @@ int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets)
 }
 
 static int mlx5e_dbcnl_validate_ets(struct net_device *netdev,
-                                   struct ieee_ets *ets)
+                                   struct ieee_ets *ets,
+                                   bool zero_sum_allowed)
 {
        bool have_ets_tc = false;
        int bw_sum = 0;
@@ -300,8 +301,9 @@ static int mlx5e_dbcnl_validate_ets(struct net_device *netdev,
        }
 
        if (have_ets_tc && bw_sum != 100) {
-               netdev_err(netdev,
-                          "Failed to validate ETS: BW sum is illegal\n");
+               if (bw_sum || (!bw_sum && !zero_sum_allowed))
+                       netdev_err(netdev,
+                                  "Failed to validate ETS: BW sum is illegal\n");
                return -EINVAL;
        }
        return 0;
@@ -316,7 +318,7 @@ static int mlx5e_dcbnl_ieee_setets(struct net_device *netdev,
        if (!MLX5_CAP_GEN(priv->mdev, ets))
                return -EOPNOTSUPP;
 
-       err = mlx5e_dbcnl_validate_ets(netdev, ets);
+       err = mlx5e_dbcnl_validate_ets(netdev, ets, false);
        if (err)
                return err;
 
@@ -441,16 +443,12 @@ static int mlx5e_dcbnl_ieee_setapp(struct net_device *dev, struct dcb_app *app)
        bool is_new;
        int err;
 
-       if (app->selector != IEEE_8021QAZ_APP_SEL_DSCP)
-               return -EINVAL;
-
-       if (!MLX5_CAP_GEN(priv->mdev, vport_group_manager))
-               return -EINVAL;
-
-       if (!MLX5_DSCP_SUPPORTED(priv->mdev))
-               return -EINVAL;
+       if (!MLX5_CAP_GEN(priv->mdev, vport_group_manager) ||
+           !MLX5_DSCP_SUPPORTED(priv->mdev))
+               return -EOPNOTSUPP;
 
-       if (app->protocol >= MLX5E_MAX_DSCP)
+       if ((app->selector != IEEE_8021QAZ_APP_SEL_DSCP) ||
+           (app->protocol >= MLX5E_MAX_DSCP))
                return -EINVAL;
 
        /* Save the old entry info */
@@ -498,16 +496,12 @@ static int mlx5e_dcbnl_ieee_delapp(struct net_device *dev, struct dcb_app *app)
        struct mlx5e_priv *priv = netdev_priv(dev);
        int err;
 
-       if (app->selector != IEEE_8021QAZ_APP_SEL_DSCP)
-               return -EINVAL;
-
-       if (!MLX5_CAP_GEN(priv->mdev, vport_group_manager))
-               return -EINVAL;
-
-       if (!MLX5_DSCP_SUPPORTED(priv->mdev))
-               return -EINVAL;
+       if  (!MLX5_CAP_GEN(priv->mdev, vport_group_manager) ||
+            !MLX5_DSCP_SUPPORTED(priv->mdev))
+               return -EOPNOTSUPP;
 
-       if (app->protocol >= MLX5E_MAX_DSCP)
+       if ((app->selector != IEEE_8021QAZ_APP_SEL_DSCP) ||
+           (app->protocol >= MLX5E_MAX_DSCP))
                return -EINVAL;
 
        /* Skip if no dscp app entry */
@@ -642,12 +636,9 @@ static u8 mlx5e_dcbnl_setall(struct net_device *netdev)
                          ets.prio_tc[i]);
        }
 
-       err = mlx5e_dbcnl_validate_ets(netdev, &ets);
-       if (err) {
-               netdev_err(netdev,
-                          "%s, Failed to validate ETS: %d\n", __func__, err);
+       err = mlx5e_dbcnl_validate_ets(netdev, &ets, true);
+       if (err)
                goto out;
-       }
 
        err = mlx5e_dcbnl_ieee_setets_core(priv, &ets);
        if (err) {
@@ -1147,7 +1138,7 @@ static int mlx5e_set_trust_state(struct mlx5e_priv *priv, u8 trust_state)
 {
        int err;
 
-       err =  mlx5_set_trust_state(priv->mdev, trust_state);
+       err = mlx5_set_trust_state(priv->mdev, trust_state);
        if (err)
                return err;
        priv->dcbx_dp.trust_state = trust_state;
@@ -1173,6 +1164,8 @@ static int mlx5e_trust_initialize(struct mlx5e_priv *priv)
        struct mlx5_core_dev *mdev = priv->mdev;
        int err;
 
+       priv->dcbx_dp.trust_state = MLX5_QPTS_TRUST_PCP;
+
        if (!MLX5_DSCP_SUPPORTED(mdev))
                return 0;
 
index 56c1b6f5593e053d4629b15635bacf1ece9d6a88..c592678ab5f14b884c822ffef6e288abe66f5ba5 100644 (file)
@@ -2846,7 +2846,7 @@ void mlx5e_activate_priv_channels(struct mlx5e_priv *priv)
        mlx5e_activate_channels(&priv->channels);
        netif_tx_start_all_queues(priv->netdev);
 
-       if (MLX5_VPORT_MANAGER(priv->mdev))
+       if (MLX5_ESWITCH_MANAGER(priv->mdev))
                mlx5e_add_sqs_fwd_rules(priv);
 
        mlx5e_wait_channels_min_rx_wqes(&priv->channels);
@@ -2857,7 +2857,7 @@ void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv)
 {
        mlx5e_redirect_rqts_to_drop(priv);
 
-       if (MLX5_VPORT_MANAGER(priv->mdev))
+       if (MLX5_ESWITCH_MANAGER(priv->mdev))
                mlx5e_remove_sqs_fwd_rules(priv);
 
        /* FIXME: This is a W/A only for tx timeout watch dog false alarm when
@@ -3712,7 +3712,8 @@ int mlx5e_change_mtu(struct net_device *netdev, int new_mtu,
 
        if (!reset) {
                params->sw_mtu = new_mtu;
-               set_mtu_cb(priv);
+               if (set_mtu_cb)
+                       set_mtu_cb(priv);
                netdev->mtu = params->sw_mtu;
                goto out;
        }
@@ -4597,7 +4598,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
        mlx5e_set_netdev_dev_addr(netdev);
 
 #if IS_ENABLED(CONFIG_MLX5_ESWITCH)
-       if (MLX5_VPORT_MANAGER(mdev))
+       if (MLX5_ESWITCH_MANAGER(mdev))
                netdev->switchdev_ops = &mlx5e_switchdev_ops;
 #endif
 
@@ -4753,7 +4754,7 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
 
        mlx5e_enable_async_events(priv);
 
-       if (MLX5_VPORT_MANAGER(priv->mdev))
+       if (MLX5_ESWITCH_MANAGER(priv->mdev))
                mlx5e_register_vport_reps(priv);
 
        if (netdev->reg_state != NETREG_REGISTERED)
@@ -4788,7 +4789,7 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv)
 
        queue_work(priv->wq, &priv->set_rx_mode_work);
 
-       if (MLX5_VPORT_MANAGER(priv->mdev))
+       if (MLX5_ESWITCH_MANAGER(priv->mdev))
                mlx5e_unregister_vport_reps(priv);
 
        mlx5e_disable_async_events(priv);
@@ -4972,7 +4973,7 @@ static void *mlx5e_add(struct mlx5_core_dev *mdev)
                return NULL;
 
 #ifdef CONFIG_MLX5_ESWITCH
-       if (MLX5_VPORT_MANAGER(mdev)) {
+       if (MLX5_ESWITCH_MANAGER(mdev)) {
                rpriv = mlx5e_alloc_nic_rep_priv(mdev);
                if (!rpriv) {
                        mlx5_core_warn(mdev, "Failed to alloc NIC rep priv data\n");
index 57987f6546e8357bdfaeb3e657e0f07fe47d940a..2b8040a3cdbd7c2f74bb854bd8141ba379ea37de 100644 (file)
@@ -823,7 +823,7 @@ bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv)
        struct mlx5e_rep_priv *rpriv = priv->ppriv;
        struct mlx5_eswitch_rep *rep;
 
-       if (!MLX5_CAP_GEN(priv->mdev, vport_group_manager))
+       if (!MLX5_ESWITCH_MANAGER(priv->mdev))
                return false;
 
        rep = rpriv->rep;
@@ -837,8 +837,12 @@ bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv)
 static bool mlx5e_is_vf_vport_rep(struct mlx5e_priv *priv)
 {
        struct mlx5e_rep_priv *rpriv = priv->ppriv;
-       struct mlx5_eswitch_rep *rep = rpriv->rep;
+       struct mlx5_eswitch_rep *rep;
 
+       if (!MLX5_ESWITCH_MANAGER(priv->mdev))
+               return false;
+
+       rep = rpriv->rep;
        if (rep && rep->vport != FDB_UPLINK_VPORT)
                return true;
 
index 0edf4751a8ba2549e380e7ddc27f57b34d49521a..dfbcda0d0e0808f859abc826162ba027cc937f52 100644 (file)
@@ -1957,6 +1957,10 @@ static bool actions_match_supported(struct mlx5e_priv *priv,
        else
                actions = flow->nic_attr->action;
 
+       if (flow->flags & MLX5E_TC_FLOW_EGRESS &&
+           !(actions & MLX5_FLOW_CONTEXT_ACTION_DECAP))
+               return false;
+
        if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
                return modify_header_match_supported(&parse_attr->spec, exts);
 
@@ -1966,15 +1970,15 @@ static bool actions_match_supported(struct mlx5e_priv *priv,
 static bool same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv)
 {
        struct mlx5_core_dev *fmdev, *pmdev;
-       u16 func_id, peer_id;
+       u64 fsystem_guid, psystem_guid;
 
        fmdev = priv->mdev;
        pmdev = peer_priv->mdev;
 
-       func_id = (u16)((fmdev->pdev->bus->number << 8) | PCI_SLOT(fmdev->pdev->devfn));
-       peer_id = (u16)((pmdev->pdev->bus->number << 8) | PCI_SLOT(pmdev->pdev->devfn));
+       mlx5_query_nic_vport_system_image_guid(fmdev, &fsystem_guid);
+       mlx5_query_nic_vport_system_image_guid(pmdev, &psystem_guid);
 
-       return (func_id == peer_id);
+       return (fsystem_guid == psystem_guid);
 }
 
 static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
index f63dfbcd29fea1efc2237d6dcecdbdd74259e1a0..40dba9e8af9260a3af4aa7e2a0cf5ff0622ee3c5 100644 (file)
@@ -1594,17 +1594,15 @@ static void esw_disable_vport(struct mlx5_eswitch *esw, int vport_num)
 }
 
 /* Public E-Switch API */
-#define ESW_ALLOWED(esw) ((esw) && MLX5_VPORT_MANAGER((esw)->dev))
+#define ESW_ALLOWED(esw) ((esw) && MLX5_ESWITCH_MANAGER((esw)->dev))
+
 
 int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode)
 {
        int err;
        int i, enabled_events;
 
-       if (!ESW_ALLOWED(esw))
-               return 0;
-
-       if (!MLX5_CAP_GEN(esw->dev, eswitch_flow_table) ||
+       if (!ESW_ALLOWED(esw) ||
            !MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) {
                esw_warn(esw->dev, "E-Switch FDB is not supported, aborting ...\n");
                return -EOPNOTSUPP;
@@ -1698,7 +1696,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
        int vport_num;
        int err;
 
-       if (!MLX5_VPORT_MANAGER(dev))
+       if (!MLX5_ESWITCH_MANAGER(dev))
                return 0;
 
        esw_info(dev,
@@ -1767,7 +1765,7 @@ abort:
 
 void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
 {
-       if (!esw || !MLX5_VPORT_MANAGER(esw->dev))
+       if (!esw || !MLX5_ESWITCH_MANAGER(esw->dev))
                return;
 
        esw_info(esw->dev, "cleanup\n");
@@ -1806,7 +1804,7 @@ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
        u64 node_guid;
        int err = 0;
 
-       if (!ESW_ALLOWED(esw))
+       if (!MLX5_CAP_GEN(esw->dev, vport_group_manager))
                return -EPERM;
        if (!LEGAL_VPORT(esw, vport) || is_multicast_ether_addr(mac))
                return -EINVAL;
@@ -1883,7 +1881,7 @@ int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
 {
        struct mlx5_vport *evport;
 
-       if (!ESW_ALLOWED(esw))
+       if (!MLX5_CAP_GEN(esw->dev, vport_group_manager))
                return -EPERM;
        if (!LEGAL_VPORT(esw, vport))
                return -EINVAL;
@@ -2218,6 +2216,6 @@ free_out:
 
 u8 mlx5_eswitch_mode(struct mlx5_eswitch *esw)
 {
-       return esw->mode;
+       return ESW_ALLOWED(esw) ? esw->mode : SRIOV_NONE;
 }
 EXPORT_SYMBOL_GPL(mlx5_eswitch_mode);
index cecd201f0b73ab8a42693a79070c21bcc850d6e4..91f1209886ffdbb37af33ac32369f312296f8bfa 100644 (file)
@@ -1079,8 +1079,8 @@ static int mlx5_devlink_eswitch_check(struct devlink *devlink)
        if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
                return -EOPNOTSUPP;
 
-       if (!MLX5_CAP_GEN(dev, vport_group_manager))
-               return -EOPNOTSUPP;
+       if(!MLX5_ESWITCH_MANAGER(dev))
+               return -EPERM;
 
        if (dev->priv.eswitch->mode == SRIOV_NONE)
                return -EOPNOTSUPP;
index 49a75d31185ecf25ff93c5f3a9beec6b48be28a1..6ddb2565884d5372ebfbe814baca6279da68e60b 100644 (file)
@@ -32,6 +32,7 @@
 
 #include <linux/mutex.h>
 #include <linux/mlx5/driver.h>
+#include <linux/mlx5/eswitch.h>
 
 #include "mlx5_core.h"
 #include "fs_core.h"
@@ -1886,7 +1887,7 @@ mlx5_add_flow_rules(struct mlx5_flow_table *ft,
        if (flow_act->action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) {
                if (!fwd_next_prio_supported(ft))
                        return ERR_PTR(-EOPNOTSUPP);
-               if (dest)
+               if (dest_num)
                        return ERR_PTR(-EINVAL);
                mutex_lock(&root->chain_lock);
                next_ft = find_next_chained_ft(prio);
@@ -2652,7 +2653,7 @@ int mlx5_init_fs(struct mlx5_core_dev *dev)
                        goto err;
        }
 
-       if (MLX5_CAP_GEN(dev, eswitch_flow_table)) {
+       if (MLX5_ESWITCH_MANAGER(dev)) {
                if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, ft_support)) {
                        err = init_fdb_root_ns(steering);
                        if (err)
index afd9f4fa22f40b70506fafa49f29cf647c22a959..41ad24f0de2cf9d171e586df3b9d167515d3cb03 100644 (file)
@@ -32,6 +32,7 @@
 
 #include <linux/mlx5/driver.h>
 #include <linux/mlx5/cmd.h>
+#include <linux/mlx5/eswitch.h>
 #include <linux/module.h>
 #include "mlx5_core.h"
 #include "../../mlxfw/mlxfw.h"
@@ -159,13 +160,13 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
        }
 
        if (MLX5_CAP_GEN(dev, vport_group_manager) &&
-           MLX5_CAP_GEN(dev, eswitch_flow_table)) {
+           MLX5_ESWITCH_MANAGER(dev)) {
                err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH_FLOW_TABLE);
                if (err)
                        return err;
        }
 
-       if (MLX5_CAP_GEN(dev, eswitch_flow_table)) {
+       if (MLX5_ESWITCH_MANAGER(dev)) {
                err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH);
                if (err)
                        return err;
index af3bb2f7a5048240d615344770f3c07ca250649b..b7c21eb21a218de91a9b053addbfb0813fa4f74b 100644 (file)
@@ -76,6 +76,7 @@ void mlx5i_init(struct mlx5_core_dev *mdev,
                void *ppriv)
 {
        struct mlx5e_priv *priv  = mlx5i_epriv(netdev);
+       u16 max_mtu;
 
        /* priv init */
        priv->mdev        = mdev;
@@ -84,6 +85,9 @@ void mlx5i_init(struct mlx5_core_dev *mdev,
        priv->ppriv       = ppriv;
        mutex_init(&priv->state_lock);
 
+       mlx5_query_port_max_mtu(mdev, &max_mtu, 1);
+       netdev->mtu = max_mtu;
+
        mlx5e_build_nic_params(mdev, &priv->channels.params,
                               profile->max_nch(mdev), netdev->mtu);
        mlx5i_build_nic_params(mdev, &priv->channels.params);
index 1e062e6b2587eb7217fbeca7260844ee7c2b465a..3f767cde4c1d50cbcd50d2eb670164fc20802983 100644 (file)
@@ -488,6 +488,7 @@ void mlx5_pps_event(struct mlx5_core_dev *mdev,
 void mlx5_init_clock(struct mlx5_core_dev *mdev)
 {
        struct mlx5_clock *clock = &mdev->clock;
+       u64 overflow_cycles;
        u64 ns;
        u64 frac = 0;
        u32 dev_freq;
@@ -511,10 +512,17 @@ void mlx5_init_clock(struct mlx5_core_dev *mdev)
 
        /* Calculate period in seconds to call the overflow watchdog - to make
         * sure counter is checked at least once every wrap around.
+        * The period is calculated as the minimum between max HW cycles count
+        * (The clock source mask) and max amount of cycles that can be
+        * multiplied by clock multiplier where the result doesn't exceed
+        * 64bits.
         */
-       ns = cyclecounter_cyc2ns(&clock->cycles, clock->cycles.mask,
+       overflow_cycles = div64_u64(~0ULL >> 1, clock->cycles.mult);
+       overflow_cycles = min(overflow_cycles, clock->cycles.mask >> 1);
+
+       ns = cyclecounter_cyc2ns(&clock->cycles, overflow_cycles,
                                 frac, &frac);
-       do_div(ns, NSEC_PER_SEC / 2 / HZ);
+       do_div(ns, NSEC_PER_SEC / HZ);
        clock->overflow_period = ns;
 
        mdev->clock_info_page = alloc_page(GFP_KERNEL);
index 7cb67122e8b5f04371651e1c1e2757acb281a36e..98359559c77e4286df95df17651a4b9f2ca8e427 100644 (file)
@@ -33,6 +33,7 @@
 #include <linux/etherdevice.h>
 #include <linux/mlx5/driver.h>
 #include <linux/mlx5/mlx5_ifc.h>
+#include <linux/mlx5/eswitch.h>
 #include "mlx5_core.h"
 #include "lib/mpfs.h"
 
@@ -98,7 +99,7 @@ int mlx5_mpfs_init(struct mlx5_core_dev *dev)
        int l2table_size = 1 << MLX5_CAP_GEN(dev, log_max_l2_table);
        struct mlx5_mpfs *mpfs;
 
-       if (!MLX5_VPORT_MANAGER(dev))
+       if (!MLX5_ESWITCH_MANAGER(dev))
                return 0;
 
        mpfs = kzalloc(sizeof(*mpfs), GFP_KERNEL);
@@ -122,7 +123,7 @@ void mlx5_mpfs_cleanup(struct mlx5_core_dev *dev)
 {
        struct mlx5_mpfs *mpfs = dev->priv.mpfs;
 
-       if (!MLX5_VPORT_MANAGER(dev))
+       if (!MLX5_ESWITCH_MANAGER(dev))
                return;
 
        WARN_ON(!hlist_empty(mpfs->hash));
@@ -137,7 +138,7 @@ int mlx5_mpfs_add_mac(struct mlx5_core_dev *dev, u8 *mac)
        u32 index;
        int err;
 
-       if (!MLX5_VPORT_MANAGER(dev))
+       if (!MLX5_ESWITCH_MANAGER(dev))
                return 0;
 
        mutex_lock(&mpfs->lock);
@@ -179,7 +180,7 @@ int mlx5_mpfs_del_mac(struct mlx5_core_dev *dev, u8 *mac)
        int err = 0;
        u32 index;
 
-       if (!MLX5_VPORT_MANAGER(dev))
+       if (!MLX5_ESWITCH_MANAGER(dev))
                return 0;
 
        mutex_lock(&mpfs->lock);
index fa9d0760dd36ffda5c2c439f12bbdffab6320ccd..31a9cbd85689b01fc0bfe9e6c221d73cc7c5fe13 100644 (file)
@@ -701,7 +701,7 @@ EXPORT_SYMBOL_GPL(mlx5_query_port_prio_tc);
 static int mlx5_set_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *in,
                                   int inlen)
 {
-       u32 out[MLX5_ST_SZ_DW(qtct_reg)];
+       u32 out[MLX5_ST_SZ_DW(qetc_reg)];
 
        if (!MLX5_CAP_GEN(mdev, ets))
                return -EOPNOTSUPP;
@@ -713,7 +713,7 @@ static int mlx5_set_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *in,
 static int mlx5_query_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *out,
                                     int outlen)
 {
-       u32 in[MLX5_ST_SZ_DW(qtct_reg)];
+       u32 in[MLX5_ST_SZ_DW(qetc_reg)];
 
        if (!MLX5_CAP_GEN(mdev, ets))
                return -EOPNOTSUPP;
index 2a8b529ce6dd176cbc29b9bb4b74cd1d1c48f671..a0674962f02c4d2a35d05c98f84436967703101c 100644 (file)
@@ -88,6 +88,9 @@ static int mlx5_device_enable_sriov(struct mlx5_core_dev *dev, int num_vfs)
                return -EBUSY;
        }
 
+       if (!MLX5_ESWITCH_MANAGER(dev))
+               goto enable_vfs_hca;
+
        err = mlx5_eswitch_enable_sriov(dev->priv.eswitch, num_vfs, SRIOV_LEGACY);
        if (err) {
                mlx5_core_warn(dev,
@@ -95,6 +98,7 @@ static int mlx5_device_enable_sriov(struct mlx5_core_dev *dev, int num_vfs)
                return err;
        }
 
+enable_vfs_hca:
        for (vf = 0; vf < num_vfs; vf++) {
                err = mlx5_core_enable_hca(dev, vf + 1);
                if (err) {
@@ -140,7 +144,8 @@ static void mlx5_device_disable_sriov(struct mlx5_core_dev *dev)
        }
 
 out:
-       mlx5_eswitch_disable_sriov(dev->priv.eswitch);
+       if (MLX5_ESWITCH_MANAGER(dev))
+               mlx5_eswitch_disable_sriov(dev->priv.eswitch);
 
        if (mlx5_wait_for_vf_pages(dev))
                mlx5_core_warn(dev, "timeout reclaiming VFs pages\n");
index 719cecb182c6c4eb5579eb1b36601acb6c0d0c5c..7eecd5b07bb1931bf3041b1ae12b0f3f5154405a 100644 (file)
@@ -549,8 +549,6 @@ int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
                return -EINVAL;
        if (!MLX5_CAP_GEN(mdev, vport_group_manager))
                return -EACCES;
-       if (!MLX5_CAP_ESW(mdev, nic_vport_node_guid_modify))
-               return -EOPNOTSUPP;
 
        in = kvzalloc(inlen, GFP_KERNEL);
        if (!in)
index b97bb72b4db45fde2e4687d6e517f41b8a1ad078..86478a6b99c5068e13688f2556e954ee3b3f9486 100644 (file)
@@ -113,35 +113,45 @@ err_db_free:
        return err;
 }
 
-static void mlx5e_qp_set_frag_buf(struct mlx5_frag_buf *buf,
-                                 struct mlx5_wq_qp *qp)
+static void mlx5_qp_set_frag_buf(struct mlx5_frag_buf *buf,
+                                struct mlx5_wq_qp *qp)
 {
+       struct mlx5_frag_buf_ctrl *sq_fbc;
        struct mlx5_frag_buf *rqb, *sqb;
 
-       rqb = &qp->rq.fbc.frag_buf;
+       rqb  = &qp->rq.fbc.frag_buf;
        *rqb = *buf;
        rqb->size   = mlx5_wq_cyc_get_byte_size(&qp->rq);
-       rqb->npages = 1 << get_order(rqb->size);
+       rqb->npages = DIV_ROUND_UP(rqb->size, PAGE_SIZE);
 
-       sqb = &qp->sq.fbc.frag_buf;
-       *sqb = *buf;
-       sqb->size   = mlx5_wq_cyc_get_byte_size(&qp->rq);
-       sqb->npages = 1 << get_order(sqb->size);
+       sq_fbc = &qp->sq.fbc;
+       sqb    = &sq_fbc->frag_buf;
+       *sqb   = *buf;
+       sqb->size   = mlx5_wq_cyc_get_byte_size(&qp->sq);
+       sqb->npages = DIV_ROUND_UP(sqb->size, PAGE_SIZE);
        sqb->frags += rqb->npages; /* first part is for the rq */
+       if (sq_fbc->strides_offset)
+               sqb->frags--;
 }
 
 int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
                      void *qpc, struct mlx5_wq_qp *wq,
                      struct mlx5_wq_ctrl *wq_ctrl)
 {
+       u32 sq_strides_offset;
        int err;
 
        mlx5_fill_fbc(MLX5_GET(qpc, qpc, log_rq_stride) + 4,
                      MLX5_GET(qpc, qpc, log_rq_size),
                      &wq->rq.fbc);
-       mlx5_fill_fbc(ilog2(MLX5_SEND_WQE_BB),
-                     MLX5_GET(qpc, qpc, log_sq_size),
-                     &wq->sq.fbc);
+
+       sq_strides_offset =
+               ((wq->rq.fbc.frag_sz_m1 + 1) % PAGE_SIZE) / MLX5_SEND_WQE_BB;
+
+       mlx5_fill_fbc_offset(ilog2(MLX5_SEND_WQE_BB),
+                            MLX5_GET(qpc, qpc, log_sq_size),
+                            sq_strides_offset,
+                            &wq->sq.fbc);
 
        err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
        if (err) {
@@ -156,7 +166,7 @@ int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
                goto err_db_free;
        }
 
-       mlx5e_qp_set_frag_buf(&wq_ctrl->buf, wq);
+       mlx5_qp_set_frag_buf(&wq_ctrl->buf, wq);
 
        wq->rq.db  = &wq_ctrl->db.db[MLX5_RCV_DBR];
        wq->sq.db  = &wq_ctrl->db.db[MLX5_SND_DBR];
index f4d9c9975ac3d857f50ef255756ea23a7a11fdb5..82827a8d3d67cac73ac3f6c232e3f750553deddc 100644 (file)
@@ -30,7 +30,7 @@ config MLXSW_CORE_THERMAL
 
 config MLXSW_PCI
        tristate "PCI bus implementation for Mellanox Technologies Switch ASICs"
-       depends on PCI && HAS_DMA && HAS_IOMEM && MLXSW_CORE
+       depends on PCI && HAS_IOMEM && MLXSW_CORE
        default m
        ---help---
          This is PCI bus implementation for Mellanox Technologies Switch ASICs.
index 3c0d882ba18380ae6d2f63d77f52163a603f1e47..f6f6a568d66a5a55cac67176c4c9a5ee01c11a90 100644 (file)
@@ -327,12 +327,16 @@ static void mlxsw_afa_resource_add(struct mlxsw_afa_block *block,
        list_add(&resource->list, &block->resource_list);
 }
 
+static void mlxsw_afa_resource_del(struct mlxsw_afa_resource *resource)
+{
+       list_del(&resource->list);
+}
+
 static void mlxsw_afa_resources_destroy(struct mlxsw_afa_block *block)
 {
        struct mlxsw_afa_resource *resource, *tmp;
 
        list_for_each_entry_safe(resource, tmp, &block->resource_list, list) {
-               list_del(&resource->list);
                resource->destructor(block, resource);
        }
 }
@@ -530,6 +534,7 @@ static void
 mlxsw_afa_fwd_entry_ref_destroy(struct mlxsw_afa_block *block,
                                struct mlxsw_afa_fwd_entry_ref *fwd_entry_ref)
 {
+       mlxsw_afa_resource_del(&fwd_entry_ref->resource);
        mlxsw_afa_fwd_entry_put(block->afa, fwd_entry_ref->fwd_entry);
        kfree(fwd_entry_ref);
 }
@@ -579,6 +584,7 @@ static void
 mlxsw_afa_counter_destroy(struct mlxsw_afa_block *block,
                          struct mlxsw_afa_counter *counter)
 {
+       mlxsw_afa_resource_del(&counter->resource);
        block->afa->ops->counter_index_put(block->afa->ops_priv,
                                           counter->counter_index);
        kfree(counter);
@@ -626,8 +632,8 @@ static char *mlxsw_afa_block_append_action(struct mlxsw_afa_block *block,
        char *oneact;
        char *actions;
 
-       if (WARN_ON(block->finished))
-               return NULL;
+       if (block->finished)
+               return ERR_PTR(-EINVAL);
        if (block->cur_act_index + action_size >
            block->afa->max_acts_per_set) {
                struct mlxsw_afa_set *set;
@@ -637,7 +643,7 @@ static char *mlxsw_afa_block_append_action(struct mlxsw_afa_block *block,
                 */
                set = mlxsw_afa_set_create(false);
                if (!set)
-                       return NULL;
+                       return ERR_PTR(-ENOBUFS);
                set->prev = block->cur_set;
                block->cur_act_index = 0;
                block->cur_set->next = set;
@@ -724,8 +730,8 @@ int mlxsw_afa_block_append_vlan_modify(struct mlxsw_afa_block *block,
                                                  MLXSW_AFA_VLAN_CODE,
                                                  MLXSW_AFA_VLAN_SIZE);
 
-       if (!act)
-               return -ENOBUFS;
+       if (IS_ERR(act))
+               return PTR_ERR(act);
        mlxsw_afa_vlan_pack(act, MLXSW_AFA_VLAN_VLAN_TAG_CMD_NOP,
                            MLXSW_AFA_VLAN_CMD_SET_OUTER, vid,
                            MLXSW_AFA_VLAN_CMD_SET_OUTER, pcp,
@@ -806,8 +812,8 @@ int mlxsw_afa_block_append_drop(struct mlxsw_afa_block *block)
                                                  MLXSW_AFA_TRAPDISC_CODE,
                                                  MLXSW_AFA_TRAPDISC_SIZE);
 
-       if (!act)
-               return -ENOBUFS;
+       if (IS_ERR(act))
+               return PTR_ERR(act);
        mlxsw_afa_trapdisc_pack(act, MLXSW_AFA_TRAPDISC_TRAP_ACTION_NOP,
                                MLXSW_AFA_TRAPDISC_FORWARD_ACTION_DISCARD, 0);
        return 0;
@@ -820,8 +826,8 @@ int mlxsw_afa_block_append_trap(struct mlxsw_afa_block *block, u16 trap_id)
                                                  MLXSW_AFA_TRAPDISC_CODE,
                                                  MLXSW_AFA_TRAPDISC_SIZE);
 
-       if (!act)
-               return -ENOBUFS;
+       if (IS_ERR(act))
+               return PTR_ERR(act);
        mlxsw_afa_trapdisc_pack(act, MLXSW_AFA_TRAPDISC_TRAP_ACTION_TRAP,
                                MLXSW_AFA_TRAPDISC_FORWARD_ACTION_DISCARD,
                                trap_id);
@@ -836,8 +842,8 @@ int mlxsw_afa_block_append_trap_and_forward(struct mlxsw_afa_block *block,
                                                  MLXSW_AFA_TRAPDISC_CODE,
                                                  MLXSW_AFA_TRAPDISC_SIZE);
 
-       if (!act)
-               return -ENOBUFS;
+       if (IS_ERR(act))
+               return PTR_ERR(act);
        mlxsw_afa_trapdisc_pack(act, MLXSW_AFA_TRAPDISC_TRAP_ACTION_TRAP,
                                MLXSW_AFA_TRAPDISC_FORWARD_ACTION_FORWARD,
                                trap_id);
@@ -856,6 +862,7 @@ static void
 mlxsw_afa_mirror_destroy(struct mlxsw_afa_block *block,
                         struct mlxsw_afa_mirror *mirror)
 {
+       mlxsw_afa_resource_del(&mirror->resource);
        block->afa->ops->mirror_del(block->afa->ops_priv,
                                    mirror->local_in_port,
                                    mirror->span_id,
@@ -908,8 +915,8 @@ mlxsw_afa_block_append_allocated_mirror(struct mlxsw_afa_block *block,
        char *act = mlxsw_afa_block_append_action(block,
                                                  MLXSW_AFA_TRAPDISC_CODE,
                                                  MLXSW_AFA_TRAPDISC_SIZE);
-       if (!act)
-               return -ENOBUFS;
+       if (IS_ERR(act))
+               return PTR_ERR(act);
        mlxsw_afa_trapdisc_pack(act, MLXSW_AFA_TRAPDISC_TRAP_ACTION_NOP,
                                MLXSW_AFA_TRAPDISC_FORWARD_ACTION_FORWARD, 0);
        mlxsw_afa_trapdisc_mirror_pack(act, true, mirror_agent);
@@ -996,8 +1003,8 @@ int mlxsw_afa_block_append_fwd(struct mlxsw_afa_block *block,
 
        act = mlxsw_afa_block_append_action(block, MLXSW_AFA_FORWARD_CODE,
                                            MLXSW_AFA_FORWARD_SIZE);
-       if (!act) {
-               err = -ENOBUFS;
+       if (IS_ERR(act)) {
+               err = PTR_ERR(act);
                goto err_append_action;
        }
        mlxsw_afa_forward_pack(act, MLXSW_AFA_FORWARD_TYPE_PBS,
@@ -1052,8 +1059,8 @@ int mlxsw_afa_block_append_allocated_counter(struct mlxsw_afa_block *block,
 {
        char *act = mlxsw_afa_block_append_action(block, MLXSW_AFA_POLCNT_CODE,
                                                  MLXSW_AFA_POLCNT_SIZE);
-       if (!act)
-               return -ENOBUFS;
+       if (IS_ERR(act))
+               return PTR_ERR(act);
        mlxsw_afa_polcnt_pack(act, MLXSW_AFA_POLCNT_COUNTER_SET_TYPE_PACKETS_BYTES,
                              counter_index);
        return 0;
@@ -1123,8 +1130,8 @@ int mlxsw_afa_block_append_fid_set(struct mlxsw_afa_block *block, u16 fid)
        char *act = mlxsw_afa_block_append_action(block,
                                                  MLXSW_AFA_VIRFWD_CODE,
                                                  MLXSW_AFA_VIRFWD_SIZE);
-       if (!act)
-               return -ENOBUFS;
+       if (IS_ERR(act))
+               return PTR_ERR(act);
        mlxsw_afa_virfwd_pack(act, MLXSW_AFA_VIRFWD_FID_CMD_SET, fid);
        return 0;
 }
@@ -1193,8 +1200,8 @@ int mlxsw_afa_block_append_mcrouter(struct mlxsw_afa_block *block,
        char *act = mlxsw_afa_block_append_action(block,
                                                  MLXSW_AFA_MCROUTER_CODE,
                                                  MLXSW_AFA_MCROUTER_SIZE);
-       if (!act)
-               return -ENOBUFS;
+       if (IS_ERR(act))
+               return PTR_ERR(act);
        mlxsw_afa_mcrouter_pack(act, MLXSW_AFA_MCROUTER_RPF_ACTION_TRAP,
                                expected_irif, min_mtu, rmid_valid, kvdl_index);
        return 0;
index 6aaaf3d9ba31d9538d9307caa0450a848bf6b091..77b2adb293415a9de16caaabbd203b397cd12a4a 100644 (file)
@@ -4756,6 +4756,12 @@ static void mlxsw_sp_rt6_destroy(struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
        kfree(mlxsw_sp_rt6);
 }
 
+static bool mlxsw_sp_fib6_rt_can_mp(const struct fib6_info *rt)
+{
+       /* RTF_CACHE routes are ignored */
+       return (rt->fib6_flags & (RTF_GATEWAY | RTF_ADDRCONF)) == RTF_GATEWAY;
+}
+
 static struct fib6_info *
 mlxsw_sp_fib6_entry_rt(const struct mlxsw_sp_fib6_entry *fib6_entry)
 {
@@ -4765,11 +4771,11 @@ mlxsw_sp_fib6_entry_rt(const struct mlxsw_sp_fib6_entry *fib6_entry)
 
 static struct mlxsw_sp_fib6_entry *
 mlxsw_sp_fib6_node_mp_entry_find(const struct mlxsw_sp_fib_node *fib_node,
-                                const struct fib6_info *nrt, bool append)
+                                const struct fib6_info *nrt, bool replace)
 {
        struct mlxsw_sp_fib6_entry *fib6_entry;
 
-       if (!append)
+       if (!mlxsw_sp_fib6_rt_can_mp(nrt) || replace)
                return NULL;
 
        list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) {
@@ -4784,7 +4790,8 @@ mlxsw_sp_fib6_node_mp_entry_find(const struct mlxsw_sp_fib_node *fib_node,
                        break;
                if (rt->fib6_metric < nrt->fib6_metric)
                        continue;
-               if (rt->fib6_metric == nrt->fib6_metric)
+               if (rt->fib6_metric == nrt->fib6_metric &&
+                   mlxsw_sp_fib6_rt_can_mp(rt))
                        return fib6_entry;
                if (rt->fib6_metric > nrt->fib6_metric)
                        break;
@@ -5163,7 +5170,7 @@ static struct mlxsw_sp_fib6_entry *
 mlxsw_sp_fib6_node_entry_find(const struct mlxsw_sp_fib_node *fib_node,
                              const struct fib6_info *nrt, bool replace)
 {
-       struct mlxsw_sp_fib6_entry *fib6_entry;
+       struct mlxsw_sp_fib6_entry *fib6_entry, *fallback = NULL;
 
        list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) {
                struct fib6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
@@ -5172,13 +5179,18 @@ mlxsw_sp_fib6_node_entry_find(const struct mlxsw_sp_fib_node *fib_node,
                        continue;
                if (rt->fib6_table->tb6_id != nrt->fib6_table->tb6_id)
                        break;
-               if (replace && rt->fib6_metric == nrt->fib6_metric)
-                       return fib6_entry;
+               if (replace && rt->fib6_metric == nrt->fib6_metric) {
+                       if (mlxsw_sp_fib6_rt_can_mp(rt) ==
+                           mlxsw_sp_fib6_rt_can_mp(nrt))
+                               return fib6_entry;
+                       if (mlxsw_sp_fib6_rt_can_mp(nrt))
+                               fallback = fallback ?: fib6_entry;
+               }
                if (rt->fib6_metric > nrt->fib6_metric)
-                       return fib6_entry;
+                       return fallback ?: fib6_entry;
        }
 
-       return NULL;
+       return fallback;
 }
 
 static int
@@ -5304,8 +5316,7 @@ static void mlxsw_sp_fib6_entry_replace(struct mlxsw_sp *mlxsw_sp,
 }
 
 static int mlxsw_sp_router_fib6_add(struct mlxsw_sp *mlxsw_sp,
-                                   struct fib6_info *rt, bool replace,
-                                   bool append)
+                                   struct fib6_info *rt, bool replace)
 {
        struct mlxsw_sp_fib6_entry *fib6_entry;
        struct mlxsw_sp_fib_node *fib_node;
@@ -5331,7 +5342,7 @@ static int mlxsw_sp_router_fib6_add(struct mlxsw_sp *mlxsw_sp,
        /* Before creating a new entry, try to append route to an existing
         * multipath entry.
         */
-       fib6_entry = mlxsw_sp_fib6_node_mp_entry_find(fib_node, rt, append);
+       fib6_entry = mlxsw_sp_fib6_node_mp_entry_find(fib_node, rt, replace);
        if (fib6_entry) {
                err = mlxsw_sp_fib6_entry_nexthop_add(mlxsw_sp, fib6_entry, rt);
                if (err)
@@ -5339,14 +5350,6 @@ static int mlxsw_sp_router_fib6_add(struct mlxsw_sp *mlxsw_sp,
                return 0;
        }
 
-       /* We received an append event, yet did not find any route to
-        * append to.
-        */
-       if (WARN_ON(append)) {
-               err = -EINVAL;
-               goto err_fib6_entry_append;
-       }
-
        fib6_entry = mlxsw_sp_fib6_entry_create(mlxsw_sp, fib_node, rt);
        if (IS_ERR(fib6_entry)) {
                err = PTR_ERR(fib6_entry);
@@ -5364,7 +5367,6 @@ static int mlxsw_sp_router_fib6_add(struct mlxsw_sp *mlxsw_sp,
 err_fib6_node_entry_link:
        mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
 err_fib6_entry_create:
-err_fib6_entry_append:
 err_fib6_entry_nexthop_add:
        mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
        return err;
@@ -5715,7 +5717,7 @@ static void mlxsw_sp_router_fib6_event_work(struct work_struct *work)
        struct mlxsw_sp_fib_event_work *fib_work =
                container_of(work, struct mlxsw_sp_fib_event_work, work);
        struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
-       bool replace, append;
+       bool replace;
        int err;
 
        rtnl_lock();
@@ -5726,10 +5728,8 @@ static void mlxsw_sp_router_fib6_event_work(struct work_struct *work)
        case FIB_EVENT_ENTRY_APPEND: /* fall through */
        case FIB_EVENT_ENTRY_ADD:
                replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
-               append = fib_work->event == FIB_EVENT_ENTRY_APPEND;
                err = mlxsw_sp_router_fib6_add(mlxsw_sp,
-                                              fib_work->fen6_info.rt, replace,
-                                              append);
+                                              fib_work->fen6_info.rt, replace);
                if (err)
                        mlxsw_sp_router_fib_abort(mlxsw_sp);
                mlxsw_sp_rt6_release(fib_work->fen6_info.rt);
index fb2c8f8071e64d3b6d52865ecaddf17f841a2b9d..776a8a9be8e3551311f5a99ba0285c4c698cf10a 100644 (file)
@@ -344,10 +344,9 @@ static int ocelot_port_stop(struct net_device *dev)
 static int ocelot_gen_ifh(u32 *ifh, struct frame_info *info)
 {
        ifh[0] = IFH_INJ_BYPASS;
-       ifh[1] = (0xff00 & info->port) >> 8;
+       ifh[1] = (0xf00 & info->port) >> 8;
        ifh[2] = (0xff & info->port) << 24;
-       ifh[3] = IFH_INJ_POP_CNT_DISABLE | (info->cpuq << 20) |
-                (info->tag_type << 16) | info->vid;
+       ifh[3] = (info->tag_type << 16) | info->vid;
 
        return 0;
 }
@@ -370,11 +369,13 @@ static int ocelot_port_xmit(struct sk_buff *skb, struct net_device *dev)
                         QS_INJ_CTRL_SOF, QS_INJ_CTRL, grp);
 
        info.port = BIT(port->chip_port);
-       info.cpuq = 0xff;
+       info.tag_type = IFH_TAG_TYPE_C;
+       info.vid = skb_vlan_tag_get(skb);
        ocelot_gen_ifh(ifh, &info);
 
        for (i = 0; i < IFH_LEN; i++)
-               ocelot_write_rix(ocelot, ifh[i], QS_INJ_WR, grp);
+               ocelot_write_rix(ocelot, (__force u32)cpu_to_be32(ifh[i]),
+                                QS_INJ_WR, grp);
 
        count = (skb->len + 3) / 4;
        last = skb->len % 4;
index fcdfb8e7fdeab0b9dcb353f4cd4a7d76370c9817..40216d56dddcb73d997ed4e4c48e63868610da89 100644 (file)
@@ -81,10 +81,10 @@ nfp_bpf_xdp_offload(struct nfp_app *app, struct nfp_net *nn,
 
        ret = nfp_net_bpf_offload(nn, prog, running, extack);
        /* Stop offload if replace not possible */
-       if (ret && prog)
-               nfp_bpf_xdp_offload(app, nn, NULL, extack);
+       if (ret)
+               return ret;
 
-       nn->dp.bpf_offload_xdp = prog && !ret;
+       nn->dp.bpf_offload_xdp = !!prog;
        return ret;
 }
 
@@ -202,6 +202,9 @@ static int nfp_bpf_setup_tc_block(struct net_device *netdev,
        if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
                return -EOPNOTSUPP;
 
+       if (tcf_block_shared(f->block))
+               return -EOPNOTSUPP;
+
        switch (f->command) {
        case TC_BLOCK_BIND:
                return tcf_block_cb_register(f->block,
index 1decf3a1cad34cf8b8bbe50ae2fe268fc9775049..e57d23746585f7abe1d7d52e0045fde2b2839852 100644 (file)
@@ -80,7 +80,7 @@ nfp_flower_repr_get_type_and_port(struct nfp_app *app, u32 port_id, u8 *port)
                        return NFP_REPR_TYPE_VF;
        }
 
-       return NFP_FLOWER_CMSG_PORT_TYPE_UNSPEC;
+       return __NFP_REPR_TYPE_MAX;
 }
 
 static struct net_device *
@@ -91,6 +91,8 @@ nfp_flower_repr_get(struct nfp_app *app, u32 port_id)
        u8 port = 0;
 
        repr_type = nfp_flower_repr_get_type_and_port(app, port_id, &port);
+       if (repr_type > NFP_REPR_TYPE_MAX)
+               return NULL;
 
        reprs = rcu_dereference(app->reprs[repr_type]);
        if (!reprs)
index 91935405f5861678077c188328d365ed5cb2ba7f..84f7a5dbea9d5bf17abd88416cc5a41f2fa4770b 100644 (file)
@@ -123,6 +123,20 @@ nfp_flower_compile_mac(struct nfp_flower_mac_mpls *frame,
                         NFP_FLOWER_MASK_MPLS_Q;
 
                frame->mpls_lse = cpu_to_be32(t_mpls);
+       } else if (dissector_uses_key(flow->dissector,
+                                     FLOW_DISSECTOR_KEY_BASIC)) {
+               /* Check for mpls ether type and set NFP_FLOWER_MASK_MPLS_Q
+                * bit, which indicates an mpls ether type but without any
+                * mpls fields.
+                */
+               struct flow_dissector_key_basic *key_basic;
+
+               key_basic = skb_flow_dissector_target(flow->dissector,
+                                                     FLOW_DISSECTOR_KEY_BASIC,
+                                                     flow->key);
+               if (key_basic->n_proto == cpu_to_be16(ETH_P_MPLS_UC) ||
+                   key_basic->n_proto == cpu_to_be16(ETH_P_MPLS_MC))
+                       frame->mpls_lse = cpu_to_be32(NFP_FLOWER_MASK_MPLS_Q);
        }
 }
 
index c42e64f32333f84640ff913b61ff199701e1b404..525057bee0ed8978f360d6eeb8293d8a990a0f22 100644 (file)
@@ -264,6 +264,14 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
                case cpu_to_be16(ETH_P_ARP):
                        return -EOPNOTSUPP;
 
+               case cpu_to_be16(ETH_P_MPLS_UC):
+               case cpu_to_be16(ETH_P_MPLS_MC):
+                       if (!(key_layer & NFP_FLOWER_LAYER_MAC)) {
+                               key_layer |= NFP_FLOWER_LAYER_MAC;
+                               key_size += sizeof(struct nfp_flower_mac_mpls);
+                       }
+                       break;
+
                /* Will be included in layer 2. */
                case cpu_to_be16(ETH_P_8021Q):
                        break;
@@ -623,6 +631,9 @@ static int nfp_flower_setup_tc_block(struct net_device *netdev,
        if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
                return -EOPNOTSUPP;
 
+       if (tcf_block_shared(f->block))
+               return -EOPNOTSUPP;
+
        switch (f->command) {
        case TC_BLOCK_BIND:
                return tcf_block_cb_register(f->block,
index 78afe75129ab5b7a852d5bffa40f5daa9b1c76d1..382bb93cb0900f7e83e0cc34277379c3b36468bd 100644 (file)
@@ -317,7 +317,7 @@ nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app,
        payload.dst_ipv4 = flow->daddr;
 
        /* If entry has expired send dst IP with all other fields 0. */
-       if (!(neigh->nud_state & NUD_VALID)) {
+       if (!(neigh->nud_state & NUD_VALID) || neigh->dead) {
                nfp_tun_del_route_from_cache(app, payload.dst_ipv4);
                /* Trigger ARP to verify invalid neighbour state. */
                neigh_event_send(neigh, NULL);
index 46b76d5a726c6ade2c48c000172a3d9ba9db7253..152283d7e59c8f4a7a69b45f520a7c9625e9ce16 100644 (file)
@@ -240,7 +240,6 @@ static int nfp_pcie_sriov_read_nfd_limit(struct nfp_pf *pf)
                return pci_sriov_set_totalvfs(pf->pdev, pf->limit_vfs);
 
        pf->limit_vfs = ~0;
-       pci_sriov_set_totalvfs(pf->pdev, 0); /* 0 is unset */
        /* Allow any setting for backwards compatibility if symbol not found */
        if (err == -ENOENT)
                return 0;
@@ -668,7 +667,7 @@ static int nfp_pci_probe(struct pci_dev *pdev,
 
        err = nfp_net_pci_probe(pf);
        if (err)
-               goto err_sriov_unlimit;
+               goto err_fw_unload;
 
        err = nfp_hwmon_register(pf);
        if (err) {
@@ -680,8 +679,6 @@ static int nfp_pci_probe(struct pci_dev *pdev,
 
 err_net_remove:
        nfp_net_pci_remove(pf);
-err_sriov_unlimit:
-       pci_sriov_set_totalvfs(pf->pdev, 0);
 err_fw_unload:
        kfree(pf->rtbl);
        nfp_mip_close(pf->mip);
@@ -715,7 +712,6 @@ static void nfp_pci_remove(struct pci_dev *pdev)
        nfp_hwmon_unregister(pf);
 
        nfp_pcie_sriov_disable(pdev);
-       pci_sriov_set_totalvfs(pf->pdev, 0);
 
        nfp_net_pci_remove(pf);
 
index cd34097b79f1be9d313d8f28b9701bb5bd6a3100..37a6d7822a3860647c416efeff47c7a7837a3a85 100644 (file)
@@ -232,7 +232,7 @@ struct nfp_nffw_info *nfp_nffw_info_open(struct nfp_cpp *cpp)
        err = nfp_cpp_read(cpp, nfp_resource_cpp_id(state->res),
                           nfp_resource_address(state->res),
                           fwinf, sizeof(*fwinf));
-       if (err < sizeof(*fwinf))
+       if (err < (int)sizeof(*fwinf))
                goto err_release;
 
        if (!nffw_res_flg_init_get(fwinf))
index 00db3401b89852a7fe5eaca7342344bcb3b66d4d..1dfaccd151f0d457a2ce38447400925113ae546d 100644 (file)
@@ -502,6 +502,7 @@ enum BAR_ID {
 struct qed_nvm_image_info {
        u32 num_images;
        struct bist_nvm_image_att *image_att;
+       bool valid;
 };
 
 #define DRV_MODULE_VERSION                   \
index 8f31406ec89407713b2ad32c81a30185b2c05727..e0680ce9132815568914dff86606363b9a02cb88 100644 (file)
@@ -255,9 +255,8 @@ qed_dcbx_get_app_protocol_type(struct qed_hwfn *p_hwfn,
                *type = DCBX_PROTOCOL_ROCE_V2;
        } else {
                *type = DCBX_MAX_PROTOCOL_TYPE;
-               DP_ERR(p_hwfn,
-                      "No action required, App TLV id = 0x%x app_prio_bitmap = 0x%x\n",
-                      id, app_prio_bitmap);
+               DP_ERR(p_hwfn, "No action required, App TLV entry = 0x%x\n",
+                      app_prio_bitmap);
                return false;
        }
 
@@ -710,9 +709,9 @@ qed_dcbx_get_local_lldp_params(struct qed_hwfn *p_hwfn,
        p_local = &p_hwfn->p_dcbx_info->lldp_local[LLDP_NEAREST_BRIDGE];
 
        memcpy(params->lldp_local.local_chassis_id, p_local->local_chassis_id,
-              ARRAY_SIZE(p_local->local_chassis_id));
+              sizeof(p_local->local_chassis_id));
        memcpy(params->lldp_local.local_port_id, p_local->local_port_id,
-              ARRAY_SIZE(p_local->local_port_id));
+              sizeof(p_local->local_port_id));
 }
 
 static void
@@ -724,9 +723,9 @@ qed_dcbx_get_remote_lldp_params(struct qed_hwfn *p_hwfn,
        p_remote = &p_hwfn->p_dcbx_info->lldp_remote[LLDP_NEAREST_BRIDGE];
 
        memcpy(params->lldp_remote.peer_chassis_id, p_remote->peer_chassis_id,
-              ARRAY_SIZE(p_remote->peer_chassis_id));
+              sizeof(p_remote->peer_chassis_id));
        memcpy(params->lldp_remote.peer_port_id, p_remote->peer_port_id,
-              ARRAY_SIZE(p_remote->peer_port_id));
+              sizeof(p_remote->peer_port_id));
 }
 
 static int
@@ -1479,8 +1478,8 @@ static u8 qed_dcbnl_getcap(struct qed_dev *cdev, int capid, u8 *cap)
                *cap = 0x80;
                break;
        case DCB_CAP_ATTR_DCBX:
-               *cap = (DCB_CAP_DCBX_LLD_MANAGED | DCB_CAP_DCBX_VER_CEE |
-                       DCB_CAP_DCBX_VER_IEEE | DCB_CAP_DCBX_STATIC);
+               *cap = (DCB_CAP_DCBX_VER_CEE | DCB_CAP_DCBX_VER_IEEE |
+                       DCB_CAP_DCBX_STATIC);
                break;
        default:
                *cap = false;
@@ -1548,8 +1547,6 @@ static u8 qed_dcbnl_getdcbx(struct qed_dev *cdev)
        if (!dcbx_info)
                return 0;
 
-       if (dcbx_info->operational.enabled)
-               mode |= DCB_CAP_DCBX_LLD_MANAGED;
        if (dcbx_info->operational.ieee)
                mode |= DCB_CAP_DCBX_VER_IEEE;
        if (dcbx_info->operational.cee)
index a14e484890299565ee8fdac8851ed9d7f3e90437..4340c4c90bcbe8b03e5373cfc674c8840ff640d9 100644 (file)
@@ -6723,7 +6723,7 @@ static enum dbg_status qed_parse_mcp_trace_buf(u8 *trace_buf,
                format_idx = header & MFW_TRACE_EVENTID_MASK;
 
                /* Skip message if its index doesn't exist in the meta data */
-               if (format_idx > s_mcp_trace_meta.formats_num) {
+               if (format_idx >= s_mcp_trace_meta.formats_num) {
                        u8 format_size =
                                (u8)((header & MFW_TRACE_PRM_SIZE_MASK) >>
                                     MFW_TRACE_PRM_SIZE_SHIFT);
index 329781cda77fbecc88328ea95f00e39d4be5db9b..e5249b4741d03f7c347c70a861288b787653741a 100644 (file)
@@ -1804,7 +1804,7 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params)
                        DP_INFO(p_hwfn, "Failed to update driver state\n");
 
                rc = qed_mcp_ov_update_eswitch(p_hwfn, p_hwfn->p_main_ptt,
-                                              QED_OV_ESWITCH_VEB);
+                                              QED_OV_ESWITCH_NONE);
                if (rc)
                        DP_INFO(p_hwfn, "Failed to update eswitch mode\n");
        }
index 99973e10b17977561be6536fee84cf2901622c3e..5ede6408649d66c25c85a25f0c9337feeb428670 100644 (file)
@@ -665,7 +665,7 @@ qed_sp_update_mcast_bin(struct qed_hwfn *p_hwfn,
 
        p_ramrod->common.update_approx_mcast_flg = 1;
        for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
-               u32 *p_bins = (u32 *)p_params->bins;
+               u32 *p_bins = p_params->bins;
 
                p_ramrod->approx_mcast.bins[i] = cpu_to_le32(p_bins[i]);
        }
@@ -1476,8 +1476,8 @@ qed_sp_eth_filter_mcast(struct qed_hwfn *p_hwfn,
                        enum spq_mode comp_mode,
                        struct qed_spq_comp_cb *p_comp_data)
 {
-       unsigned long bins[ETH_MULTICAST_MAC_BINS_IN_REGS];
        struct vport_update_ramrod_data *p_ramrod = NULL;
+       u32 bins[ETH_MULTICAST_MAC_BINS_IN_REGS];
        struct qed_spq_entry *p_ent = NULL;
        struct qed_sp_init_data init_data;
        u8 abs_vport_id = 0;
@@ -1513,26 +1513,25 @@ qed_sp_eth_filter_mcast(struct qed_hwfn *p_hwfn,
        /* explicitly clear out the entire vector */
        memset(&p_ramrod->approx_mcast.bins, 0,
               sizeof(p_ramrod->approx_mcast.bins));
-       memset(bins, 0, sizeof(unsigned long) *
-              ETH_MULTICAST_MAC_BINS_IN_REGS);
+       memset(bins, 0, sizeof(bins));
        /* filter ADD op is explicit set op and it removes
         *  any existing filters for the vport
         */
        if (p_filter_cmd->opcode == QED_FILTER_ADD) {
                for (i = 0; i < p_filter_cmd->num_mc_addrs; i++) {
-                       u32 bit;
+                       u32 bit, nbits;
 
                        bit = qed_mcast_bin_from_mac(p_filter_cmd->mac[i]);
-                       __set_bit(bit, bins);
+                       nbits = sizeof(u32) * BITS_PER_BYTE;
+                       bins[bit / nbits] |= 1 << (bit % nbits);
                }
 
                /* Convert to correct endianity */
                for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
                        struct vport_update_ramrod_mcast *p_ramrod_bins;
-                       u32 *p_bins = (u32 *)bins;
 
                        p_ramrod_bins = &p_ramrod->approx_mcast;
-                       p_ramrod_bins->bins[i] = cpu_to_le32(p_bins[i]);
+                       p_ramrod_bins->bins[i] = cpu_to_le32(bins[i]);
                }
        }
 
index 806a8da257e9a48cd553c6d173fc69300917391e..8d80f1095d171c85b7d010bb5297a58a1961808d 100644 (file)
@@ -215,7 +215,7 @@ struct qed_sp_vport_update_params {
        u8                              anti_spoofing_en;
        u8                              update_accept_any_vlan_flg;
        u8                              accept_any_vlan;
-       unsigned long                   bins[8];
+       u32                             bins[8];
        struct qed_rss_params           *rss_params;
        struct qed_filter_accept_flags  accept_flags;
        struct qed_sge_tpa_params       *sge_tpa_params;
index c97ebd681c471196cb4135deafbf8e07efc9d615..012973d75ad039436fb0007e9452eb0565f4938c 100644 (file)
@@ -201,8 +201,9 @@ void qed_ll2b_complete_rx_packet(void *cxt, struct qed_ll2_comp_rx_data *data)
 
        skb = build_skb(buffer->data, 0);
        if (!skb) {
-               rc = -ENOMEM;
-               goto out_post;
+               DP_INFO(cdev, "Failed to build SKB\n");
+               kfree(buffer->data);
+               goto out_post1;
        }
 
        data->u.placement_offset += NET_SKB_PAD;
@@ -224,8 +225,14 @@ void qed_ll2b_complete_rx_packet(void *cxt, struct qed_ll2_comp_rx_data *data)
                cdev->ll2->cbs->rx_cb(cdev->ll2->cb_cookie, skb,
                                      data->opaque_data_0,
                                      data->opaque_data_1);
+       } else {
+               DP_VERBOSE(p_hwfn, (NETIF_MSG_RX_STATUS | NETIF_MSG_PKTDATA |
+                                   QED_MSG_LL2 | QED_MSG_STORAGE),
+                          "Dropping the packet\n");
+               kfree(buffer->data);
        }
 
+out_post1:
        /* Update Buffer information and update FW producer */
        buffer->data = new_data;
        buffer->phys_addr = new_phys_addr;
index b04d57ca5176ee65f348bb5882965e19f107e2f8..758a9a5127fa8c00566e4f90d5f75db636570e33 100644 (file)
@@ -371,7 +371,7 @@ static struct qed_dev *qed_probe(struct pci_dev *pdev,
                goto err2;
        }
 
-       DP_INFO(cdev, "qed_probe completed successffuly\n");
+       DP_INFO(cdev, "qed_probe completed successfully\n");
 
        return cdev;
 
@@ -567,8 +567,16 @@ static irqreturn_t qed_single_int(int irq, void *dev_instance)
                /* Fastpath interrupts */
                for (j = 0; j < 64; j++) {
                        if ((0x2ULL << j) & status) {
-                               hwfn->simd_proto_handler[j].func(
-                                       hwfn->simd_proto_handler[j].token);
+                               struct qed_simd_fp_handler *p_handler =
+                                       &hwfn->simd_proto_handler[j];
+
+                               if (p_handler->func)
+                                       p_handler->func(p_handler->token);
+                               else
+                                       DP_NOTICE(hwfn,
+                                                 "Not calling fastpath handler as it is NULL [handler #%d, status 0x%llx]\n",
+                                                 j, status);
+
                                status &= ~(0x2ULL << j);
                                rc = IRQ_HANDLED;
                        }
@@ -781,6 +789,14 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev,
        /* We want a minimum of one slowpath and one fastpath vector per hwfn */
        cdev->int_params.in.min_msix_cnt = cdev->num_hwfns * 2;
 
+       if (is_kdump_kernel()) {
+               DP_INFO(cdev,
+                       "Kdump kernel: Limit the max number of requested MSI-X vectors to %hd\n",
+                       cdev->int_params.in.min_msix_cnt);
+               cdev->int_params.in.num_vectors =
+                       cdev->int_params.in.min_msix_cnt;
+       }
+
        rc = qed_set_int_mode(cdev, false);
        if (rc)  {
                DP_ERR(cdev, "qed_slowpath_setup_int ERR\n");
index 4e0b443c9519d67bc3b888ddf3b341c93291e328..cdd645024a32aadc40f54c4e02a88988898ce219 100644 (file)
@@ -592,6 +592,9 @@ int qed_mcp_nvm_wr_cmd(struct qed_hwfn *p_hwfn,
        *o_mcp_resp = mb_params.mcp_resp;
        *o_mcp_param = mb_params.mcp_param;
 
+       /* nvm_info needs to be updated */
+       p_hwfn->nvm_info.valid = false;
+
        return 0;
 }
 
@@ -1208,6 +1211,7 @@ static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn,
                break;
        default:
                p_link->speed = 0;
+               p_link->link_up = 0;
        }
 
        if (p_link->link_up && p_link->speed)
@@ -1305,9 +1309,15 @@ int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up)
        phy_cfg.pause |= (params->pause.forced_tx) ? ETH_PAUSE_TX : 0;
        phy_cfg.adv_speed = params->speed.advertised_speeds;
        phy_cfg.loopback_mode = params->loopback_mode;
-       if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE) {
-               if (params->eee.enable)
-                       phy_cfg.eee_cfg |= EEE_CFG_EEE_ENABLED;
+
+       /* There are MFWs that share this capability regardless of whether
+        * this is feasible or not. And given that at the very least adv_caps
+        * would be set internally by qed, we want to make sure LFA would
+        * still work.
+        */
+       if ((p_hwfn->mcp_info->capabilities &
+            FW_MB_PARAM_FEATURE_SUPPORT_EEE) && params->eee.enable) {
+               phy_cfg.eee_cfg |= EEE_CFG_EEE_ENABLED;
                if (params->eee.tx_lpi_enable)
                        phy_cfg.eee_cfg |= EEE_CFG_TX_LPI;
                if (params->eee.adv_caps & QED_EEE_1G_ADV)
@@ -2555,11 +2565,14 @@ int qed_mcp_bist_nvm_get_image_att(struct qed_hwfn *p_hwfn,
 
 int qed_mcp_nvm_info_populate(struct qed_hwfn *p_hwfn)
 {
-       struct qed_nvm_image_info *nvm_info = &p_hwfn->nvm_info;
+       struct qed_nvm_image_info nvm_info;
        struct qed_ptt *p_ptt;
        int rc;
        u32 i;
 
+       if (p_hwfn->nvm_info.valid)
+               return 0;
+
        p_ptt = qed_ptt_acquire(p_hwfn);
        if (!p_ptt) {
                DP_ERR(p_hwfn, "failed to acquire ptt\n");
@@ -2567,29 +2580,29 @@ int qed_mcp_nvm_info_populate(struct qed_hwfn *p_hwfn)
        }
 
        /* Acquire from MFW the amount of available images */
-       nvm_info->num_images = 0;
+       nvm_info.num_images = 0;
        rc = qed_mcp_bist_nvm_get_num_images(p_hwfn,
-                                            p_ptt, &nvm_info->num_images);
+                                            p_ptt, &nvm_info.num_images);
        if (rc == -EOPNOTSUPP) {
                DP_INFO(p_hwfn, "DRV_MSG_CODE_BIST_TEST is not supported\n");
                goto out;
-       } else if (rc || !nvm_info->num_images) {
+       } else if (rc || !nvm_info.num_images) {
                DP_ERR(p_hwfn, "Failed getting number of images\n");
                goto err0;
        }
 
-       nvm_info->image_att = kmalloc_array(nvm_info->num_images,
-                                           sizeof(struct bist_nvm_image_att),
-                                           GFP_KERNEL);
-       if (!nvm_info->image_att) {
+       nvm_info.image_att = kmalloc_array(nvm_info.num_images,
+                                          sizeof(struct bist_nvm_image_att),
+                                          GFP_KERNEL);
+       if (!nvm_info.image_att) {
                rc = -ENOMEM;
                goto err0;
        }
 
        /* Iterate over images and get their attributes */
-       for (i = 0; i < nvm_info->num_images; i++) {
+       for (i = 0; i < nvm_info.num_images; i++) {
                rc = qed_mcp_bist_nvm_get_image_att(p_hwfn, p_ptt,
-                                                   &nvm_info->image_att[i], i);
+                                                   &nvm_info.image_att[i], i);
                if (rc) {
                        DP_ERR(p_hwfn,
                               "Failed getting image index %d attributes\n", i);
@@ -2597,14 +2610,22 @@ int qed_mcp_nvm_info_populate(struct qed_hwfn *p_hwfn)
                }
 
                DP_VERBOSE(p_hwfn, QED_MSG_SP, "image index %d, size %x\n", i,
-                          nvm_info->image_att[i].len);
+                          nvm_info.image_att[i].len);
        }
 out:
+       /* Update hwfn's nvm_info */
+       if (nvm_info.num_images) {
+               p_hwfn->nvm_info.num_images = nvm_info.num_images;
+               kfree(p_hwfn->nvm_info.image_att);
+               p_hwfn->nvm_info.image_att = nvm_info.image_att;
+               p_hwfn->nvm_info.valid = true;
+       }
+
        qed_ptt_release(p_hwfn, p_ptt);
        return 0;
 
 err1:
-       kfree(nvm_info->image_att);
+       kfree(nvm_info.image_att);
 err0:
        qed_ptt_release(p_hwfn, p_ptt);
        return rc;
@@ -2641,6 +2662,7 @@ qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn,
                return -EINVAL;
        }
 
+       qed_mcp_nvm_info_populate(p_hwfn);
        for (i = 0; i < p_hwfn->nvm_info.num_images; i++)
                if (type == p_hwfn->nvm_info.image_att[i].image_type)
                        break;
index f01bf52bc381f6f02c33ee3d9df4a90982cf8245..26e918d7f2f9c0603ab6b0f2f132daba6d7bcc3b 100644 (file)
@@ -2831,7 +2831,7 @@ qed_iov_vp_update_mcast_bin_param(struct qed_hwfn *p_hwfn,
 
        p_data->update_approx_mcast_flg = 1;
        memcpy(p_data->bins, p_mcast_tlv->bins,
-              sizeof(unsigned long) * ETH_MULTICAST_MAC_BINS_IN_REGS);
+              sizeof(u32) * ETH_MULTICAST_MAC_BINS_IN_REGS);
        *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_MCAST;
 }
 
@@ -4513,6 +4513,8 @@ static void qed_sriov_enable_qid_config(struct qed_hwfn *hwfn,
 static int qed_sriov_enable(struct qed_dev *cdev, int num)
 {
        struct qed_iov_vf_init_params params;
+       struct qed_hwfn *hwfn;
+       struct qed_ptt *ptt;
        int i, j, rc;
 
        if (num >= RESC_NUM(&cdev->hwfns[0], QED_VPORT)) {
@@ -4525,8 +4527,8 @@ static int qed_sriov_enable(struct qed_dev *cdev, int num)
 
        /* Initialize HW for VF access */
        for_each_hwfn(cdev, j) {
-               struct qed_hwfn *hwfn = &cdev->hwfns[j];
-               struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
+               hwfn = &cdev->hwfns[j];
+               ptt = qed_ptt_acquire(hwfn);
 
                /* Make sure not to use more than 16 queues per VF */
                params.num_queues = min_t(int,
@@ -4562,6 +4564,19 @@ static int qed_sriov_enable(struct qed_dev *cdev, int num)
                goto err;
        }
 
+       hwfn = QED_LEADING_HWFN(cdev);
+       ptt = qed_ptt_acquire(hwfn);
+       if (!ptt) {
+               DP_ERR(hwfn, "Failed to acquire ptt\n");
+               rc = -EBUSY;
+               goto err;
+       }
+
+       rc = qed_mcp_ov_update_eswitch(hwfn, ptt, QED_OV_ESWITCH_VEB);
+       if (rc)
+               DP_INFO(cdev, "Failed to update eswitch mode\n");
+       qed_ptt_release(hwfn, ptt);
+
        return num;
 
 err:
index 2d7fcd6a0777aa264b8e228d14eae3cc0e2d212d..be6ddde1a104ff34050ee72b7dc5bc40658e6c2b 100644 (file)
@@ -1126,7 +1126,7 @@ int qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn,
                resp_size += sizeof(struct pfvf_def_resp_tlv);
 
                memcpy(p_mcast_tlv->bins, p_params->bins,
-                      sizeof(unsigned long) * ETH_MULTICAST_MAC_BINS_IN_REGS);
+                      sizeof(u32) * ETH_MULTICAST_MAC_BINS_IN_REGS);
        }
 
        update_rx = p_params->accept_flags.update_rx_mode_config;
@@ -1272,7 +1272,7 @@ void qed_vf_pf_filter_mcast(struct qed_hwfn *p_hwfn,
                        u32 bit;
 
                        bit = qed_mcast_bin_from_mac(p_filter_cmd->mac[i]);
-                       __set_bit(bit, sp_params.bins);
+                       sp_params.bins[bit / 32] |= 1 << (bit % 32);
                }
        }
 
index 4f05d5eb3cf50ae51298a1711f2a850bcac1fe93..033409db86ae7bbbe63b79f80490bd2857abfe7c 100644 (file)
@@ -392,7 +392,12 @@ struct vfpf_vport_update_mcast_bin_tlv {
        struct channel_tlv tl;
        u8 padding[4];
 
-       u64 bins[8];
+       /* There are only 256 approx bins, and in HSI they're divided into
+        * 32-bit values. As old VFs used to set-bit to the values on its side,
+        * the upper half of the array is never expected to contain any data.
+        */
+       u64 bins[4];
+       u64 obsolete_bins[4];
 };
 
 struct vfpf_vport_update_accept_param_tlv {
index 02adb513f4756cb58c423936213bdcb4158d1dfa..013ff567283c738f342ca5d6f5358e30ca6daa72 100644 (file)
@@ -337,8 +337,14 @@ int qede_ptp_get_ts_info(struct qede_dev *edev, struct ethtool_ts_info *info)
 {
        struct qede_ptp *ptp = edev->ptp;
 
-       if (!ptp)
-               return -EIO;
+       if (!ptp) {
+               info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
+                                       SOF_TIMESTAMPING_RX_SOFTWARE |
+                                       SOF_TIMESTAMPING_SOFTWARE;
+               info->phc_index = -1;
+
+               return 0;
+       }
 
        info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
                                SOF_TIMESTAMPING_RX_SOFTWARE |
index 891f03a7a33dc7286b5bb6d1b4ac2333ab74aacf..8d7b9bb910f2addae4712088884b334c42876934 100644 (file)
@@ -1128,6 +1128,8 @@ static ssize_t qlcnic_83xx_sysfs_flash_write_handler(struct file *filp,
        struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
 
        ret = kstrtoul(buf, 16, &data);
+       if (ret)
+               return ret;
 
        switch (data) {
        case QLC_83XX_FLASH_SECTOR_ERASE_CMD:
index 5803cd6db406c7f9c5426ceb87bf062d4f0434fb..206f0266463e362a0e34fe8ff5b626519500e2ed 100644 (file)
@@ -658,7 +658,7 @@ qcaspi_netdev_open(struct net_device *dev)
                return ret;
        }
 
-       netif_start_queue(qca->net_dev);
+       /* SPI thread takes care of TX queue */
 
        return 0;
 }
@@ -760,6 +760,9 @@ qcaspi_netdev_tx_timeout(struct net_device *dev)
        qca->net_dev->stats.tx_errors++;
        /* Trigger tx queue flush and QCA7000 reset */
        qca->sync = QCASPI_SYNC_UNKNOWN;
+
+       if (qca->spi_thread)
+               wake_up_process(qca->spi_thread);
 }
 
 static int
@@ -878,22 +881,22 @@ qca_spi_probe(struct spi_device *spi)
 
        if ((qcaspi_clkspeed < QCASPI_CLK_SPEED_MIN) ||
            (qcaspi_clkspeed > QCASPI_CLK_SPEED_MAX)) {
-               dev_info(&spi->dev, "Invalid clkspeed: %d\n",
-                        qcaspi_clkspeed);
+               dev_err(&spi->dev, "Invalid clkspeed: %d\n",
+                       qcaspi_clkspeed);
                return -EINVAL;
        }
 
        if ((qcaspi_burst_len < QCASPI_BURST_LEN_MIN) ||
            (qcaspi_burst_len > QCASPI_BURST_LEN_MAX)) {
-               dev_info(&spi->dev, "Invalid burst len: %d\n",
-                        qcaspi_burst_len);
+               dev_err(&spi->dev, "Invalid burst len: %d\n",
+                       qcaspi_burst_len);
                return -EINVAL;
        }
 
        if ((qcaspi_pluggable < QCASPI_PLUGGABLE_MIN) ||
            (qcaspi_pluggable > QCASPI_PLUGGABLE_MAX)) {
-               dev_info(&spi->dev, "Invalid pluggable: %d\n",
-                        qcaspi_pluggable);
+               dev_err(&spi->dev, "Invalid pluggable: %d\n",
+                       qcaspi_pluggable);
                return -EINVAL;
        }
 
@@ -955,8 +958,8 @@ qca_spi_probe(struct spi_device *spi)
        }
 
        if (register_netdev(qcaspi_devs)) {
-               dev_info(&spi->dev, "Unable to register net device %s\n",
-                        qcaspi_devs->name);
+               dev_err(&spi->dev, "Unable to register net device %s\n",
+                       qcaspi_devs->name);
                free_netdev(qcaspi_devs);
                return -EFAULT;
        }
index 75dfac0248f45cb423fd9883e38349a456b1dc0d..eaedc11ed686796b6246bf517bf7691aef43929c 100644 (file)
@@ -7148,7 +7148,7 @@ static void rtl8169_netpoll(struct net_device *dev)
 {
        struct rtl8169_private *tp = netdev_priv(dev);
 
-       rtl8169_interrupt(pci_irq_vector(tp->pci_dev, 0), dev);
+       rtl8169_interrupt(pci_irq_vector(tp->pci_dev, 0), tp);
 }
 #endif
 
@@ -7734,8 +7734,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
                return rc;
        }
 
-       /* override BIOS settings, use userspace tools to enable WOL */
-       __rtl8169_set_wol(tp, 0);
+       tp->saved_wolopts = __rtl8169_get_wol(tp);
 
        if (rtl_tbi_enabled(tp)) {
                tp->set_speed = rtl8169_set_speed_tbi;
@@ -7789,6 +7788,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
                NETIF_F_HW_VLAN_CTAG_RX;
        dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
                NETIF_F_HIGHDMA;
+       dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
 
        tp->cp_cmd |= RxChkSum | RxVlan;
 
index 27be51f0a421b43e191e594bdb6ebcd753b65eef..f3f7477043ce106155ca30ba7c07fb7d20e968bc 100644 (file)
@@ -17,7 +17,6 @@ if NET_VENDOR_RENESAS
 
 config SH_ETH
        tristate "Renesas SuperH Ethernet support"
-       depends on HAS_DMA
        depends on ARCH_RENESAS || SUPERH || COMPILE_TEST
        select CRC32
        select MII
@@ -31,7 +30,6 @@ config SH_ETH
 
 config RAVB
        tristate "Renesas Ethernet AVB support"
-       depends on HAS_DMA
        depends on ARCH_RENESAS || COMPILE_TEST
        select CRC32
        select MII
index 68f122140966d4de381b47fa192246eb7606707a..0d811c02ff340f09a385ec0677f0388034615eef 100644 (file)
@@ -980,6 +980,13 @@ static void ravb_adjust_link(struct net_device *ndev)
        struct ravb_private *priv = netdev_priv(ndev);
        struct phy_device *phydev = ndev->phydev;
        bool new_state = false;
+       unsigned long flags;
+
+       spin_lock_irqsave(&priv->lock, flags);
+
+       /* Disable TX and RX right over here, if E-MAC change is ignored */
+       if (priv->no_avb_link)
+               ravb_rcv_snd_disable(ndev);
 
        if (phydev->link) {
                if (phydev->duplex != priv->duplex) {
@@ -997,18 +1004,21 @@ static void ravb_adjust_link(struct net_device *ndev)
                        ravb_modify(ndev, ECMR, ECMR_TXF, 0);
                        new_state = true;
                        priv->link = phydev->link;
-                       if (priv->no_avb_link)
-                               ravb_rcv_snd_enable(ndev);
                }
        } else if (priv->link) {
                new_state = true;
                priv->link = 0;
                priv->speed = 0;
                priv->duplex = -1;
-               if (priv->no_avb_link)
-                       ravb_rcv_snd_disable(ndev);
        }
 
+       /* Enable TX and RX right over here, if E-MAC change is ignored */
+       if (priv->no_avb_link && phydev->link)
+               ravb_rcv_snd_enable(ndev);
+
+       mmiowb();
+       spin_unlock_irqrestore(&priv->lock, flags);
+
        if (new_state && netif_msg_link(priv))
                phy_print_status(phydev);
 }
@@ -1096,75 +1106,6 @@ static int ravb_phy_start(struct net_device *ndev)
        return 0;
 }
 
-static int ravb_get_link_ksettings(struct net_device *ndev,
-                                  struct ethtool_link_ksettings *cmd)
-{
-       struct ravb_private *priv = netdev_priv(ndev);
-       unsigned long flags;
-
-       if (!ndev->phydev)
-               return -ENODEV;
-
-       spin_lock_irqsave(&priv->lock, flags);
-       phy_ethtool_ksettings_get(ndev->phydev, cmd);
-       spin_unlock_irqrestore(&priv->lock, flags);
-
-       return 0;
-}
-
-static int ravb_set_link_ksettings(struct net_device *ndev,
-                                  const struct ethtool_link_ksettings *cmd)
-{
-       struct ravb_private *priv = netdev_priv(ndev);
-       unsigned long flags;
-       int error;
-
-       if (!ndev->phydev)
-               return -ENODEV;
-
-       spin_lock_irqsave(&priv->lock, flags);
-
-       /* Disable TX and RX */
-       ravb_rcv_snd_disable(ndev);
-
-       error = phy_ethtool_ksettings_set(ndev->phydev, cmd);
-       if (error)
-               goto error_exit;
-
-       if (cmd->base.duplex == DUPLEX_FULL)
-               priv->duplex = 1;
-       else
-               priv->duplex = 0;
-
-       ravb_set_duplex(ndev);
-
-error_exit:
-       mdelay(1);
-
-       /* Enable TX and RX */
-       ravb_rcv_snd_enable(ndev);
-
-       mmiowb();
-       spin_unlock_irqrestore(&priv->lock, flags);
-
-       return error;
-}
-
-static int ravb_nway_reset(struct net_device *ndev)
-{
-       struct ravb_private *priv = netdev_priv(ndev);
-       int error = -ENODEV;
-       unsigned long flags;
-
-       if (ndev->phydev) {
-               spin_lock_irqsave(&priv->lock, flags);
-               error = phy_start_aneg(ndev->phydev);
-               spin_unlock_irqrestore(&priv->lock, flags);
-       }
-
-       return error;
-}
-
 static u32 ravb_get_msglevel(struct net_device *ndev)
 {
        struct ravb_private *priv = netdev_priv(ndev);
@@ -1377,7 +1318,7 @@ static int ravb_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
 }
 
 static const struct ethtool_ops ravb_ethtool_ops = {
-       .nway_reset             = ravb_nway_reset,
+       .nway_reset             = phy_ethtool_nway_reset,
        .get_msglevel           = ravb_get_msglevel,
        .set_msglevel           = ravb_set_msglevel,
        .get_link               = ethtool_op_get_link,
@@ -1387,8 +1328,8 @@ static const struct ethtool_ops ravb_ethtool_ops = {
        .get_ringparam          = ravb_get_ringparam,
        .set_ringparam          = ravb_set_ringparam,
        .get_ts_info            = ravb_get_ts_info,
-       .get_link_ksettings     = ravb_get_link_ksettings,
-       .set_link_ksettings     = ravb_set_link_ksettings,
+       .get_link_ksettings     = phy_ethtool_get_link_ksettings,
+       .set_link_ksettings     = phy_ethtool_set_link_ksettings,
        .get_wol                = ravb_get_wol,
        .set_wol                = ravb_set_wol,
 };
index e9007b613f17ca8de16b67e054df42a800522fb5..5614fd231bbe1e4685582e15faf27dad412b241b 100644 (file)
@@ -1927,8 +1927,15 @@ static void sh_eth_adjust_link(struct net_device *ndev)
 {
        struct sh_eth_private *mdp = netdev_priv(ndev);
        struct phy_device *phydev = ndev->phydev;
+       unsigned long flags;
        int new_state = 0;
 
+       spin_lock_irqsave(&mdp->lock, flags);
+
+       /* Disable TX and RX right over here, if E-MAC change is ignored */
+       if (mdp->cd->no_psr || mdp->no_ether_link)
+               sh_eth_rcv_snd_disable(ndev);
+
        if (phydev->link) {
                if (phydev->duplex != mdp->duplex) {
                        new_state = 1;
@@ -1947,18 +1954,21 @@ static void sh_eth_adjust_link(struct net_device *ndev)
                        sh_eth_modify(ndev, ECMR, ECMR_TXF, 0);
                        new_state = 1;
                        mdp->link = phydev->link;
-                       if (mdp->cd->no_psr || mdp->no_ether_link)
-                               sh_eth_rcv_snd_enable(ndev);
                }
        } else if (mdp->link) {
                new_state = 1;
                mdp->link = 0;
                mdp->speed = 0;
                mdp->duplex = -1;
-               if (mdp->cd->no_psr || mdp->no_ether_link)
-                       sh_eth_rcv_snd_disable(ndev);
        }
 
+       /* Enable TX and RX right over here, if E-MAC change is ignored */
+       if ((mdp->cd->no_psr || mdp->no_ether_link) && phydev->link)
+               sh_eth_rcv_snd_enable(ndev);
+
+       mmiowb();
+       spin_unlock_irqrestore(&mdp->lock, flags);
+
        if (new_state && netif_msg_link(mdp))
                phy_print_status(phydev);
 }
@@ -2030,60 +2040,6 @@ static int sh_eth_phy_start(struct net_device *ndev)
        return 0;
 }
 
-static int sh_eth_get_link_ksettings(struct net_device *ndev,
-                                    struct ethtool_link_ksettings *cmd)
-{
-       struct sh_eth_private *mdp = netdev_priv(ndev);
-       unsigned long flags;
-
-       if (!ndev->phydev)
-               return -ENODEV;
-
-       spin_lock_irqsave(&mdp->lock, flags);
-       phy_ethtool_ksettings_get(ndev->phydev, cmd);
-       spin_unlock_irqrestore(&mdp->lock, flags);
-
-       return 0;
-}
-
-static int sh_eth_set_link_ksettings(struct net_device *ndev,
-                                    const struct ethtool_link_ksettings *cmd)
-{
-       struct sh_eth_private *mdp = netdev_priv(ndev);
-       unsigned long flags;
-       int ret;
-
-       if (!ndev->phydev)
-               return -ENODEV;
-
-       spin_lock_irqsave(&mdp->lock, flags);
-
-       /* disable tx and rx */
-       sh_eth_rcv_snd_disable(ndev);
-
-       ret = phy_ethtool_ksettings_set(ndev->phydev, cmd);
-       if (ret)
-               goto error_exit;
-
-       if (cmd->base.duplex == DUPLEX_FULL)
-               mdp->duplex = 1;
-       else
-               mdp->duplex = 0;
-
-       if (mdp->cd->set_duplex)
-               mdp->cd->set_duplex(ndev);
-
-error_exit:
-       mdelay(1);
-
-       /* enable tx and rx */
-       sh_eth_rcv_snd_enable(ndev);
-
-       spin_unlock_irqrestore(&mdp->lock, flags);
-
-       return ret;
-}
-
 /* If it is ever necessary to increase SH_ETH_REG_DUMP_MAX_REGS, the
  * version must be bumped as well.  Just adding registers up to that
  * limit is fine, as long as the existing register indices don't
@@ -2263,22 +2219,6 @@ static void sh_eth_get_regs(struct net_device *ndev, struct ethtool_regs *regs,
        pm_runtime_put_sync(&mdp->pdev->dev);
 }
 
-static int sh_eth_nway_reset(struct net_device *ndev)
-{
-       struct sh_eth_private *mdp = netdev_priv(ndev);
-       unsigned long flags;
-       int ret;
-
-       if (!ndev->phydev)
-               return -ENODEV;
-
-       spin_lock_irqsave(&mdp->lock, flags);
-       ret = phy_start_aneg(ndev->phydev);
-       spin_unlock_irqrestore(&mdp->lock, flags);
-
-       return ret;
-}
-
 static u32 sh_eth_get_msglevel(struct net_device *ndev)
 {
        struct sh_eth_private *mdp = netdev_priv(ndev);
@@ -2429,7 +2369,7 @@ static int sh_eth_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
 static const struct ethtool_ops sh_eth_ethtool_ops = {
        .get_regs_len   = sh_eth_get_regs_len,
        .get_regs       = sh_eth_get_regs,
-       .nway_reset     = sh_eth_nway_reset,
+       .nway_reset     = phy_ethtool_nway_reset,
        .get_msglevel   = sh_eth_get_msglevel,
        .set_msglevel   = sh_eth_set_msglevel,
        .get_link       = ethtool_op_get_link,
@@ -2438,8 +2378,8 @@ static const struct ethtool_ops sh_eth_ethtool_ops = {
        .get_sset_count     = sh_eth_get_sset_count,
        .get_ringparam  = sh_eth_get_ringparam,
        .set_ringparam  = sh_eth_set_ringparam,
-       .get_link_ksettings = sh_eth_get_link_ksettings,
-       .set_link_ksettings = sh_eth_set_link_ksettings,
+       .get_link_ksettings = phy_ethtool_get_link_ksettings,
+       .set_link_ksettings = phy_ethtool_set_link_ksettings,
        .get_wol        = sh_eth_get_wol,
        .set_wol        = sh_eth_set_wol,
 };
index 23f0785c0573ec72fea3db10dfdf353c41341ee8..7eeac3d6cfe898a9a4ef6df9378d8c6d29383ce1 100644 (file)
@@ -4288,9 +4288,9 @@ static int efx_ef10_filter_pri(struct efx_ef10_filter_table *table,
        return -EPROTONOSUPPORT;
 }
 
-static s32 efx_ef10_filter_insert(struct efx_nic *efx,
-                                 struct efx_filter_spec *spec,
-                                 bool replace_equal)
+static s32 efx_ef10_filter_insert_locked(struct efx_nic *efx,
+                                        struct efx_filter_spec *spec,
+                                        bool replace_equal)
 {
        DECLARE_BITMAP(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT);
        struct efx_ef10_nic_data *nic_data = efx->nic_data;
@@ -4307,7 +4307,7 @@ static s32 efx_ef10_filter_insert(struct efx_nic *efx,
        bool is_mc_recip;
        s32 rc;
 
-       down_read(&efx->filter_sem);
+       WARN_ON(!rwsem_is_locked(&efx->filter_sem));
        table = efx->filter_state;
        down_write(&table->lock);
 
@@ -4498,10 +4498,22 @@ out_unlock:
        if (rss_locked)
                mutex_unlock(&efx->rss_lock);
        up_write(&table->lock);
-       up_read(&efx->filter_sem);
        return rc;
 }
 
+static s32 efx_ef10_filter_insert(struct efx_nic *efx,
+                                 struct efx_filter_spec *spec,
+                                 bool replace_equal)
+{
+       s32 ret;
+
+       down_read(&efx->filter_sem);
+       ret = efx_ef10_filter_insert_locked(efx, spec, replace_equal);
+       up_read(&efx->filter_sem);
+
+       return ret;
+}
+
 static void efx_ef10_filter_update_rx_scatter(struct efx_nic *efx)
 {
        /* no need to do anything here on EF10 */
@@ -5285,7 +5297,7 @@ static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx,
                EFX_WARN_ON_PARANOID(ids[i] != EFX_EF10_FILTER_ID_INVALID);
                efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0);
                efx_filter_set_eth_local(&spec, vlan->vid, addr_list[i].addr);
-               rc = efx_ef10_filter_insert(efx, &spec, true);
+               rc = efx_ef10_filter_insert_locked(efx, &spec, true);
                if (rc < 0) {
                        if (rollback) {
                                netif_info(efx, drv, efx->net_dev,
@@ -5314,7 +5326,7 @@ static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx,
                efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0);
                eth_broadcast_addr(baddr);
                efx_filter_set_eth_local(&spec, vlan->vid, baddr);
-               rc = efx_ef10_filter_insert(efx, &spec, true);
+               rc = efx_ef10_filter_insert_locked(efx, &spec, true);
                if (rc < 0) {
                        netif_warn(efx, drv, efx->net_dev,
                                   "Broadcast filter insert failed rc=%d\n", rc);
@@ -5370,7 +5382,7 @@ static int efx_ef10_filter_insert_def(struct efx_nic *efx,
        if (vlan->vid != EFX_FILTER_VID_UNSPEC)
                efx_filter_set_eth_local(&spec, vlan->vid, NULL);
 
-       rc = efx_ef10_filter_insert(efx, &spec, true);
+       rc = efx_ef10_filter_insert_locked(efx, &spec, true);
        if (rc < 0) {
                const char *um = multicast ? "Multicast" : "Unicast";
                const char *encap_name = "";
@@ -5430,7 +5442,7 @@ static int efx_ef10_filter_insert_def(struct efx_nic *efx,
                                           filter_flags, 0);
                        eth_broadcast_addr(baddr);
                        efx_filter_set_eth_local(&spec, vlan->vid, baddr);
-                       rc = efx_ef10_filter_insert(efx, &spec, true);
+                       rc = efx_ef10_filter_insert_locked(efx, &spec, true);
                        if (rc < 0) {
                                netif_warn(efx, drv, efx->net_dev,
                                           "Broadcast filter insert failed rc=%d\n",
index ad4a354ce570e143a741e7ab7155ae84a8a5df34..ce3a177081a854a683493f7f6f2c79ac63f60cc4 100644 (file)
@@ -1871,12 +1871,6 @@ static void efx_remove_filters(struct efx_nic *efx)
        up_write(&efx->filter_sem);
 }
 
-static void efx_restore_filters(struct efx_nic *efx)
-{
-       down_read(&efx->filter_sem);
-       efx->type->filter_table_restore(efx);
-       up_read(&efx->filter_sem);
-}
 
 /**************************************************************************
  *
@@ -2688,6 +2682,7 @@ void efx_reset_down(struct efx_nic *efx, enum reset_type method)
        efx_disable_interrupts(efx);
 
        mutex_lock(&efx->mac_lock);
+       down_write(&efx->filter_sem);
        mutex_lock(&efx->rss_lock);
        if (efx->port_initialized && method != RESET_TYPE_INVISIBLE &&
            method != RESET_TYPE_DATAPATH)
@@ -2745,9 +2740,8 @@ int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok)
        if (efx->type->rx_restore_rss_contexts)
                efx->type->rx_restore_rss_contexts(efx);
        mutex_unlock(&efx->rss_lock);
-       down_read(&efx->filter_sem);
-       efx_restore_filters(efx);
-       up_read(&efx->filter_sem);
+       efx->type->filter_table_restore(efx);
+       up_write(&efx->filter_sem);
        if (efx->type->sriov_reset)
                efx->type->sriov_reset(efx);
 
@@ -2764,6 +2758,7 @@ fail:
        efx->port_initialized = false;
 
        mutex_unlock(&efx->rss_lock);
+       up_write(&efx->filter_sem);
        mutex_unlock(&efx->mac_lock);
 
        return rc;
@@ -3180,6 +3175,7 @@ bool efx_rps_check_rule(struct efx_arfs_rule *rule, unsigned int filter_idx,
        return true;
 }
 
+static
 struct hlist_head *efx_rps_hash_bucket(struct efx_nic *efx,
                                       const struct efx_filter_spec *spec)
 {
@@ -3472,7 +3468,9 @@ static int efx_pci_probe_main(struct efx_nic *efx)
 
        efx_init_napi(efx);
 
+       down_write(&efx->filter_sem);
        rc = efx->type->init(efx);
+       up_write(&efx->filter_sem);
        if (rc) {
                netif_err(efx, probe, efx->net_dev,
                          "failed to initialise NIC\n");
@@ -3764,7 +3762,9 @@ static int efx_pm_resume(struct device *dev)
        rc = efx->type->reset(efx, RESET_TYPE_ALL);
        if (rc)
                return rc;
+       down_write(&efx->filter_sem);
        rc = efx->type->init(efx);
+       up_write(&efx->filter_sem);
        if (rc)
                return rc;
        rc = efx_pm_thaw(dev);
index 8edf20967c82c583bb59ace5f1f9c30dcfd1530d..e045a5d6b938f43f391a726f301d8911f156b32c 100644 (file)
@@ -2794,6 +2794,7 @@ int efx_farch_filter_table_probe(struct efx_nic *efx)
        if (!state)
                return -ENOMEM;
        efx->filter_state = state;
+       init_rwsem(&state->lock);
 
        table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP];
        table->id = EFX_FARCH_FILTER_TABLE_RX_IP;
index cb5b0f58c395c2bdbf32e7283d91cf8c4ac5dbe9..edf20361ea5f15c7ddee617f899e31b92d7e261e 100644 (file)
@@ -111,7 +111,7 @@ config DWMAC_ROCKCHIP
 config DWMAC_SOCFPGA
        tristate "SOCFPGA dwmac support"
        default ARCH_SOCFPGA
-       depends on OF && (ARCH_SOCFPGA || COMPILE_TEST)
+       depends on OF && (ARCH_SOCFPGA || ARCH_STRATIX10 || COMPILE_TEST)
        select MFD_SYSCON
        help
          Support for ethernet controller on Altera SOCFPGA
index 6e359572b9f0ea53ed46b553fb1cb51273415f57..5b3b06a0a3bf53e1eac9572ae8d14add0c3835e7 100644 (file)
@@ -55,6 +55,7 @@ struct socfpga_dwmac {
        struct  device *dev;
        struct regmap *sys_mgr_base_addr;
        struct reset_control *stmmac_rst;
+       struct reset_control *stmmac_ocp_rst;
        void __iomem *splitter_base;
        bool f2h_ptp_ref_clk;
        struct tse_pcs pcs;
@@ -262,8 +263,8 @@ static int socfpga_dwmac_set_phy_mode(struct socfpga_dwmac *dwmac)
                val = SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_GMII_MII;
 
        /* Assert reset to the enet controller before changing the phy mode */
-       if (dwmac->stmmac_rst)
-               reset_control_assert(dwmac->stmmac_rst);
+       reset_control_assert(dwmac->stmmac_ocp_rst);
+       reset_control_assert(dwmac->stmmac_rst);
 
        regmap_read(sys_mgr_base_addr, reg_offset, &ctrl);
        ctrl &= ~(SYSMGR_EMACGRP_CTRL_PHYSEL_MASK << reg_shift);
@@ -288,8 +289,8 @@ static int socfpga_dwmac_set_phy_mode(struct socfpga_dwmac *dwmac)
        /* Deassert reset for the phy configuration to be sampled by
         * the enet controller, and operation to start in requested mode
         */
-       if (dwmac->stmmac_rst)
-               reset_control_deassert(dwmac->stmmac_rst);
+       reset_control_deassert(dwmac->stmmac_ocp_rst);
+       reset_control_deassert(dwmac->stmmac_rst);
        if (phymode == PHY_INTERFACE_MODE_SGMII) {
                if (tse_pcs_init(dwmac->pcs.tse_pcs_base, &dwmac->pcs) != 0) {
                        dev_err(dwmac->dev, "Unable to initialize TSE PCS");
@@ -324,6 +325,15 @@ static int socfpga_dwmac_probe(struct platform_device *pdev)
                goto err_remove_config_dt;
        }
 
+       dwmac->stmmac_ocp_rst = devm_reset_control_get_optional(dev, "stmmaceth-ocp");
+       if (IS_ERR(dwmac->stmmac_ocp_rst)) {
+               ret = PTR_ERR(dwmac->stmmac_ocp_rst);
+               dev_err(dev, "error getting reset control of ocp %d\n", ret);
+               goto err_remove_config_dt;
+       }
+
+       reset_control_deassert(dwmac->stmmac_ocp_rst);
+
        ret = socfpga_dwmac_parse_data(dwmac, dev);
        if (ret) {
                dev_err(dev, "Unable to parse OF data\n");
index 2e6e2a96b4f263023e04eaad77e56f160cbedc5c..f9a61f90cfbc6acb269d4e8320bb9a078ae04239 100644 (file)
@@ -37,7 +37,7 @@
  *             is done in the "stmmac files"
  */
 
-/* struct emac_variant - Descrive dwmac-sun8i hardware variant
+/* struct emac_variant - Describe dwmac-sun8i hardware variant
  * @default_syscon_value:      The default value of the EMAC register in syscon
  *                             This value is used for disabling properly EMAC
  *                             and used as a good starting value in case of the
index d37f17ca62fecf66a6b5af1c9aa105923310a341..65bc3556bd8f8c25b9b37421c80d6a663d8eb0db 100644 (file)
@@ -407,6 +407,16 @@ static void dwmac4_enable_tso(void __iomem *ioaddr, bool en, u32 chan)
        }
 }
 
+static void dwmac4_set_bfsize(void __iomem *ioaddr, int bfsize, u32 chan)
+{
+       u32 value = readl(ioaddr + DMA_CHAN_RX_CONTROL(chan));
+
+       value &= ~DMA_RBSZ_MASK;
+       value |= (bfsize << DMA_RBSZ_SHIFT) & DMA_RBSZ_MASK;
+
+       writel(value, ioaddr + DMA_CHAN_RX_CONTROL(chan));
+}
+
 const struct stmmac_dma_ops dwmac4_dma_ops = {
        .reset = dwmac4_dma_reset,
        .init = dwmac4_dma_init,
@@ -431,6 +441,7 @@ const struct stmmac_dma_ops dwmac4_dma_ops = {
        .set_rx_tail_ptr = dwmac4_set_rx_tail_ptr,
        .set_tx_tail_ptr = dwmac4_set_tx_tail_ptr,
        .enable_tso = dwmac4_enable_tso,
+       .set_bfsize = dwmac4_set_bfsize,
 };
 
 const struct stmmac_dma_ops dwmac410_dma_ops = {
@@ -457,4 +468,5 @@ const struct stmmac_dma_ops dwmac410_dma_ops = {
        .set_rx_tail_ptr = dwmac4_set_rx_tail_ptr,
        .set_tx_tail_ptr = dwmac4_set_tx_tail_ptr,
        .enable_tso = dwmac4_enable_tso,
+       .set_bfsize = dwmac4_set_bfsize,
 };
index c63c1fe3f26b9e4d5cb714ea3ceed56bf103b17e..22a4a6dbb1a4af42d3d7467e3ebca50efef57986 100644 (file)
 
 /* DMA Rx Channel X Control register defines */
 #define DMA_CONTROL_SR                 BIT(0)
+#define DMA_RBSZ_MASK                  GENMASK(14, 1)
+#define DMA_RBSZ_SHIFT                 1
 
 /* Interrupt status per channel */
 #define DMA_CHAN_STATUS_REB            GENMASK(21, 19)
index e44e7b26ce829be0eff000c6a68b064139d532b8..fe8b536b13f864bfff723ea2236a3e5982026533 100644 (file)
@@ -183,6 +183,7 @@ struct stmmac_dma_ops {
        void (*set_rx_tail_ptr)(void __iomem *ioaddr, u32 tail_ptr, u32 chan);
        void (*set_tx_tail_ptr)(void __iomem *ioaddr, u32 tail_ptr, u32 chan);
        void (*enable_tso)(void __iomem *ioaddr, bool en, u32 chan);
+       void (*set_bfsize)(void __iomem *ioaddr, int bfsize, u32 chan);
 };
 
 #define stmmac_reset(__priv, __args...) \
@@ -235,6 +236,8 @@ struct stmmac_dma_ops {
        stmmac_do_void_callback(__priv, dma, set_tx_tail_ptr, __args)
 #define stmmac_enable_tso(__priv, __args...) \
        stmmac_do_void_callback(__priv, dma, enable_tso, __args)
+#define stmmac_set_dma_bfsize(__priv, __args...) \
+       stmmac_do_void_callback(__priv, dma, set_bfsize, __args)
 
 struct mac_device_info;
 struct net_device;
index e79b0d7b388a16d524917b0dfed1b4dd2f079c2f..ef6a8d39db2f19b261ff9760d523dac664cffe93 100644 (file)
@@ -53,7 +53,7 @@
 #include "dwmac1000.h"
 #include "hwif.h"
 
-#define STMMAC_ALIGN(x)        L1_CACHE_ALIGN(x)
+#define        STMMAC_ALIGN(x)         __ALIGN_KERNEL(x, SMP_CACHE_BYTES)
 #define        TSO_MAX_BUFF_SIZE       (SZ_16K - 1)
 
 /* Module parameters */
@@ -928,6 +928,7 @@ static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
 static int stmmac_init_phy(struct net_device *dev)
 {
        struct stmmac_priv *priv = netdev_priv(dev);
+       u32 tx_cnt = priv->plat->tx_queues_to_use;
        struct phy_device *phydev;
        char phy_id_fmt[MII_BUS_ID_SIZE + 3];
        char bus_id[MII_BUS_ID_SIZE];
@@ -968,6 +969,15 @@ static int stmmac_init_phy(struct net_device *dev)
                phydev->advertising &= ~(SUPPORTED_1000baseT_Half |
                                         SUPPORTED_1000baseT_Full);
 
+       /*
+        * Half-duplex mode not supported with multiqueue
+        * half-duplex can only works with single queue
+        */
+       if (tx_cnt > 1)
+               phydev->supported &= ~(SUPPORTED_1000baseT_Half |
+                                      SUPPORTED_100baseT_Half |
+                                      SUPPORTED_10baseT_Half);
+
        /*
         * Broken HW is sometimes missing the pull-up resistor on the
         * MDIO line, which results in reads to non-existent devices returning
@@ -1794,6 +1804,8 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
 
                stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
                                rxfifosz, qmode);
+               stmmac_set_dma_bfsize(priv, priv->ioaddr, priv->dma_buf_sz,
+                               chan);
        }
 
        for (chan = 0; chan < tx_channels_count; chan++) {
index 8d375e51a5265104d515f5622696dc2e0cae1ea2..6a393b16a1fcaf924473e7a56968d69e57f12f7e 100644 (file)
@@ -257,7 +257,7 @@ static int stmmac_pci_probe(struct pci_dev *pdev,
                return -ENOMEM;
 
        /* Enable pci device */
-       ret = pcim_enable_device(pdev);
+       ret = pci_enable_device(pdev);
        if (ret) {
                dev_err(&pdev->dev, "%s: ERROR: failed to enable device\n",
                        __func__);
@@ -300,9 +300,45 @@ static int stmmac_pci_probe(struct pci_dev *pdev,
 static void stmmac_pci_remove(struct pci_dev *pdev)
 {
        stmmac_dvr_remove(&pdev->dev);
+       pci_disable_device(pdev);
 }
 
-static SIMPLE_DEV_PM_OPS(stmmac_pm_ops, stmmac_suspend, stmmac_resume);
+static int stmmac_pci_suspend(struct device *dev)
+{
+       struct pci_dev *pdev = to_pci_dev(dev);
+       int ret;
+
+       ret = stmmac_suspend(dev);
+       if (ret)
+               return ret;
+
+       ret = pci_save_state(pdev);
+       if (ret)
+               return ret;
+
+       pci_disable_device(pdev);
+       pci_wake_from_d3(pdev, true);
+       return 0;
+}
+
+static int stmmac_pci_resume(struct device *dev)
+{
+       struct pci_dev *pdev = to_pci_dev(dev);
+       int ret;
+
+       pci_restore_state(pdev);
+       pci_set_power_state(pdev, PCI_D0);
+
+       ret = pci_enable_device(pdev);
+       if (ret)
+               return ret;
+
+       pci_set_master(pdev);
+
+       return stmmac_resume(dev);
+}
+
+static SIMPLE_DEV_PM_OPS(stmmac_pm_ops, stmmac_pci_suspend, stmmac_pci_resume);
 
 /* synthetic ID, no official vendor */
 #define PCI_VENDOR_ID_STMMAC 0x700
index 6d141f3931eb650902469cebc18fa3613ad0dcb9..72da77b94ecd987e7e683d0ec890c842090e117e 100644 (file)
@@ -94,7 +94,6 @@ static int dwmac1000_validate_ucast_entries(int ucast_entries)
 /**
  * stmmac_axi_setup - parse DT parameters for programming the AXI register
  * @pdev: platform device
- * @priv: driver private struct.
  * Description:
  * if required, from device-tree the AXI internal register can be tuned
  * by using platform parameters.
index 7a16d40a72d13cf1d522e8a3a396c826fe76f9b9..b9221fc1674dfa0ef17a43f8ff86d700a1ae514f 100644 (file)
@@ -60,8 +60,7 @@
 #include <linux/sungem_phy.h>
 #include "sungem.h"
 
-/* Stripping FCS is causing problems, disabled for now */
-#undef STRIP_FCS
+#define STRIP_FCS
 
 #define DEFAULT_MSG    (NETIF_MSG_DRV          | \
                         NETIF_MSG_PROBE        | \
@@ -435,7 +434,7 @@ static int gem_rxmac_reset(struct gem *gp)
        writel(desc_dma & 0xffffffff, gp->regs + RXDMA_DBLOW);
        writel(RX_RING_SIZE - 4, gp->regs + RXDMA_KICK);
        val = (RXDMA_CFG_BASE | (RX_OFFSET << 10) |
-              ((14 / 2) << 13) | RXDMA_CFG_FTHRESH_128);
+              (ETH_HLEN << 13) | RXDMA_CFG_FTHRESH_128);
        writel(val, gp->regs + RXDMA_CFG);
        if (readl(gp->regs + GREG_BIFCFG) & GREG_BIFCFG_M66EN)
                writel(((5 & RXDMA_BLANK_IPKTS) |
@@ -760,7 +759,6 @@ static int gem_rx(struct gem *gp, int work_to_do)
        struct net_device *dev = gp->dev;
        int entry, drops, work_done = 0;
        u32 done;
-       __sum16 csum;
 
        if (netif_msg_rx_status(gp))
                printk(KERN_DEBUG "%s: rx interrupt, done: %d, rx_new: %d\n",
@@ -855,9 +853,13 @@ static int gem_rx(struct gem *gp, int work_to_do)
                        skb = copy_skb;
                }
 
-               csum = (__force __sum16)htons((status & RXDCTRL_TCPCSUM) ^ 0xffff);
-               skb->csum = csum_unfold(csum);
-               skb->ip_summed = CHECKSUM_COMPLETE;
+               if (likely(dev->features & NETIF_F_RXCSUM)) {
+                       __sum16 csum;
+
+                       csum = (__force __sum16)htons((status & RXDCTRL_TCPCSUM) ^ 0xffff);
+                       skb->csum = csum_unfold(csum);
+                       skb->ip_summed = CHECKSUM_COMPLETE;
+               }
                skb->protocol = eth_type_trans(skb, gp->dev);
 
                napi_gro_receive(&gp->napi, skb);
@@ -1761,7 +1763,7 @@ static void gem_init_dma(struct gem *gp)
        writel(0, gp->regs + TXDMA_KICK);
 
        val = (RXDMA_CFG_BASE | (RX_OFFSET << 10) |
-              ((14 / 2) << 13) | RXDMA_CFG_FTHRESH_128);
+              (ETH_HLEN << 13) | RXDMA_CFG_FTHRESH_128);
        writel(val, gp->regs + RXDMA_CFG);
 
        writel(desc_dma >> 32, gp->regs + RXDMA_DBHI);
@@ -2985,8 +2987,8 @@ static int gem_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        pci_set_drvdata(pdev, dev);
 
        /* We can do scatter/gather and HW checksum */
-       dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM;
-       dev->features |= dev->hw_features | NETIF_F_RXCSUM;
+       dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
+       dev->features = dev->hw_features;
        if (pci_using_dac)
                dev->features |= NETIF_F_HIGHDMA;
 
index 358edab9e72eeee18b9c17d74e66f2de92d5cc87..3e34cb8ac1d3e66f58196c67595a7baa3624e597 100644 (file)
@@ -2086,14 +2086,16 @@ static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev,
                int i;
 
                for (i = 0; i < cpsw->data.slaves; i++) {
-                       if (vid == cpsw->slaves[i].port_vlan)
-                               return -EINVAL;
+                       if (vid == cpsw->slaves[i].port_vlan) {
+                               ret = -EINVAL;
+                               goto err;
+                       }
                }
        }
 
        dev_info(priv->dev, "Adding vlanid %d to vlan filter\n", vid);
        ret = cpsw_add_vlan_ale_entry(priv, vid);
-
+err:
        pm_runtime_put(cpsw->dev);
        return ret;
 }
@@ -2119,22 +2121,17 @@ static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev,
 
                for (i = 0; i < cpsw->data.slaves; i++) {
                        if (vid == cpsw->slaves[i].port_vlan)
-                               return -EINVAL;
+                               goto err;
                }
        }
 
        dev_info(priv->dev, "removing vlanid %d from vlan filter\n", vid);
        ret = cpsw_ale_del_vlan(cpsw->ale, vid, 0);
-       if (ret != 0)
-               return ret;
-
-       ret = cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr,
-                                HOST_PORT_NUM, ALE_VLAN, vid);
-       if (ret != 0)
-               return ret;
-
-       ret = cpsw_ale_del_mcast(cpsw->ale, priv->ndev->broadcast,
-                                0, ALE_VLAN, vid);
+       ret |= cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr,
+                                 HOST_PORT_NUM, ALE_VLAN, vid);
+       ret |= cpsw_ale_del_mcast(cpsw->ale, priv->ndev->broadcast,
+                                 0, ALE_VLAN, vid);
+err:
        pm_runtime_put(cpsw->dev);
        return ret;
 }
index 93dc05c194d381b51336e95d552180a6997a5084..5766225a4ce117957cdd77fe2a9d4ad4abc8f150 100644 (file)
@@ -394,7 +394,7 @@ int cpsw_ale_del_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask,
 
        idx = cpsw_ale_match_addr(ale, addr, (flags & ALE_VLAN) ? vid : 0);
        if (idx < 0)
-               return -EINVAL;
+               return -ENOENT;
 
        cpsw_ale_read(ale, idx, ale_entry);
 
index cdbddf16dd2931ba66df103c064705d5f0aef350..4f1267477aa4b56b7f3e1d19420302728da56e7d 100644 (file)
@@ -205,7 +205,7 @@ static void cpdma_desc_pool_destroy(struct cpdma_ctlr *ctlr)
  * devices (e.g. cpsw switches) use plain old memory.  Descriptor pools
  * abstract out these details
  */
-int cpdma_desc_pool_create(struct cpdma_ctlr *ctlr)
+static int cpdma_desc_pool_create(struct cpdma_ctlr *ctlr)
 {
        struct cpdma_params *cpdma_params = &ctlr->params;
        struct cpdma_desc_pool *pool;
index 06d7c9e4dcda92deb027522dc04b34326c9fdc8a..f270beebb4289326baff5e86b33f47eae2eaa49b 100644 (file)
@@ -1385,6 +1385,15 @@ static int emac_devioctl(struct net_device *ndev, struct ifreq *ifrq, int cmd)
                return -EOPNOTSUPP;
 }
 
+static int match_first_device(struct device *dev, void *data)
+{
+       if (dev->parent && dev->parent->of_node)
+               return of_device_is_compatible(dev->parent->of_node,
+                                              "ti,davinci_mdio");
+
+       return !strncmp(dev_name(dev), "davinci_mdio", 12);
+}
+
 /**
  * emac_dev_open - EMAC device open
  * @ndev: The DaVinci EMAC network adapter
@@ -1484,8 +1493,14 @@ static int emac_dev_open(struct net_device *ndev)
 
        /* use the first phy on the bus if pdata did not give us a phy id */
        if (!phydev && !priv->phy_id) {
-               phy = bus_find_device_by_name(&mdio_bus_type, NULL,
-                                             "davinci_mdio");
+               /* NOTE: we can't use bus_find_device_by_name() here because
+                * the device name is not guaranteed to be 'davinci_mdio'. On
+                * some systems it can be 'davinci_mdio.0' so we need to use
+                * strncmp() against the first part of the string to correctly
+                * match it.
+                */
+               phy = bus_find_device(&mdio_bus_type, NULL, NULL,
+                                     match_first_device);
                if (phy) {
                        priv->phy_id = dev_name(phy);
                        if (!priv->phy_id || !*priv->phy_id)
index 16c3bfbe19928dfb56b2719490d0b401fae2ee5e..757a3b37ae8a8af8077001d548b65fe862d03c2d 100644 (file)
@@ -218,6 +218,7 @@ issue:
        ret = of_mdiobus_register(bus, np1);
        if (ret) {
                mdiobus_free(bus);
+               lp->mii_bus = NULL;
                return ret;
        }
        return 0;
index 750eaa53bf0ce59429d524ba0658ad6f488a4ba0..ada33c2d9ac20e01af4acec33727623204fda803 100644 (file)
@@ -476,7 +476,7 @@ static struct sk_buff **geneve_gro_receive(struct sock *sk,
 out_unlock:
        rcu_read_unlock();
 out:
-       NAPI_GRO_CB(skb)->flush |= flush;
+       skb_gro_flush_final(skb, pp, flush);
 
        return pp;
 }
index f347fd9c5b28370f6452f042bb7f59c0ec8a3cd3..777fa59f5e0cd5abdfb8390ac358d09cf77636a1 100644 (file)
 static const char banner[] __initconst = KERN_INFO \
        "AX.25: bpqether driver version 004\n";
 
-static char bcast_addr[6]={0xFF,0xFF,0xFF,0xFF,0xFF,0xFF};
-
-static char bpq_eth_addr[6];
-
 static int bpq_rcv(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *);
 static int bpq_device_event(struct notifier_block *, unsigned long, void *);
 
@@ -501,8 +497,8 @@ static int bpq_new_device(struct net_device *edev)
        bpq->ethdev = edev;
        bpq->axdev = ndev;
 
-       memcpy(bpq->dest_addr, bcast_addr, sizeof(bpq_eth_addr));
-       memcpy(bpq->acpt_addr, bcast_addr, sizeof(bpq_eth_addr));
+       eth_broadcast_addr(bpq->dest_addr);
+       eth_broadcast_addr(bpq->acpt_addr);
 
        err = register_netdevice(ndev);
        if (err)
index 1a924b867b0742b0aa3e5a15f4da3e6885173e74..4b6e308199d270cd455b7df0de20a8458f6b7941 100644 (file)
@@ -210,7 +210,7 @@ int netvsc_recv_callback(struct net_device *net,
 void netvsc_channel_cb(void *context);
 int netvsc_poll(struct napi_struct *napi, int budget);
 
-void rndis_set_subchannel(struct work_struct *w);
+int rndis_set_subchannel(struct net_device *ndev, struct netvsc_device *nvdev);
 int rndis_filter_open(struct netvsc_device *nvdev);
 int rndis_filter_close(struct netvsc_device *nvdev);
 struct netvsc_device *rndis_filter_device_add(struct hv_device *dev,
index 5d5bd513847fff4ff353e7c58d9967a354d06955..31c3d77b4733f0aa9900138b5c49f398d0642db4 100644 (file)
@@ -65,6 +65,41 @@ void netvsc_switch_datapath(struct net_device *ndev, bool vf)
                               VM_PKT_DATA_INBAND, 0);
 }
 
+/* Worker to setup sub channels on initial setup
+ * Initial hotplug event occurs in softirq context
+ * and can't wait for channels.
+ */
+static void netvsc_subchan_work(struct work_struct *w)
+{
+       struct netvsc_device *nvdev =
+               container_of(w, struct netvsc_device, subchan_work);
+       struct rndis_device *rdev;
+       int i, ret;
+
+       /* Avoid deadlock with device removal already under RTNL */
+       if (!rtnl_trylock()) {
+               schedule_work(w);
+               return;
+       }
+
+       rdev = nvdev->extension;
+       if (rdev) {
+               ret = rndis_set_subchannel(rdev->ndev, nvdev);
+               if (ret == 0) {
+                       netif_device_attach(rdev->ndev);
+               } else {
+                       /* fallback to only primary channel */
+                       for (i = 1; i < nvdev->num_chn; i++)
+                               netif_napi_del(&nvdev->chan_table[i].napi);
+
+                       nvdev->max_chn = 1;
+                       nvdev->num_chn = 1;
+               }
+       }
+
+       rtnl_unlock();
+}
+
 static struct netvsc_device *alloc_net_device(void)
 {
        struct netvsc_device *net_device;
@@ -81,7 +116,7 @@ static struct netvsc_device *alloc_net_device(void)
 
        init_completion(&net_device->channel_init_wait);
        init_waitqueue_head(&net_device->subchan_open);
-       INIT_WORK(&net_device->subchan_work, rndis_set_subchannel);
+       INIT_WORK(&net_device->subchan_work, netvsc_subchan_work);
 
        return net_device;
 }
@@ -1239,6 +1274,7 @@ int netvsc_poll(struct napi_struct *napi, int budget)
        struct hv_device *device = netvsc_channel_to_device(channel);
        struct net_device *ndev = hv_get_drvdata(device);
        int work_done = 0;
+       int ret;
 
        /* If starting a new interval */
        if (!nvchan->desc)
@@ -1250,16 +1286,18 @@ int netvsc_poll(struct napi_struct *napi, int budget)
                nvchan->desc = hv_pkt_iter_next(channel, nvchan->desc);
        }
 
-       /* If send of pending receive completions suceeded
-        *   and did not exhaust NAPI budget this time
-        *   and not doing busy poll
+       /* Send any pending receive completions */
+       ret = send_recv_completions(ndev, net_device, nvchan);
+
+       /* If it did not exhaust NAPI budget this time
+        *  and not doing busy poll
         * then re-enable host interrupts
-        *     and reschedule if ring is not empty.
+        *  and reschedule if ring is not empty
+        *   or sending receive completion failed.
         */
-       if (send_recv_completions(ndev, net_device, nvchan) == 0 &&
-           work_done < budget &&
+       if (work_done < budget &&
            napi_complete_done(napi, work_done) &&
-           hv_end_read(&channel->inbound) &&
+           (ret || hv_end_read(&channel->inbound)) &&
            napi_schedule_prep(napi)) {
                hv_begin_read(&channel->inbound);
                __napi_schedule(napi);
index fe2256bf1d137fea6b76c5e3a564b191e2b5da7c..dd1d6e115145d4c14fb25d1883d1e42614e211a9 100644 (file)
@@ -905,8 +905,20 @@ static int netvsc_attach(struct net_device *ndev,
        if (IS_ERR(nvdev))
                return PTR_ERR(nvdev);
 
-       /* Note: enable and attach happen when sub-channels setup */
+       if (nvdev->num_chn > 1) {
+               ret = rndis_set_subchannel(ndev, nvdev);
+
+               /* if unavailable, just proceed with one queue */
+               if (ret) {
+                       nvdev->max_chn = 1;
+                       nvdev->num_chn = 1;
+               }
+       }
+
+       /* In any case device is now ready */
+       netif_device_attach(ndev);
 
+       /* Note: enable and attach happen when sub-channels setup */
        netif_carrier_off(ndev);
 
        if (netif_running(ndev)) {
@@ -2089,6 +2101,9 @@ static int netvsc_probe(struct hv_device *dev,
 
        memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN);
 
+       if (nvdev->num_chn > 1)
+               schedule_work(&nvdev->subchan_work);
+
        /* hw_features computed in rndis_netdev_set_hwcaps() */
        net->features = net->hw_features |
                NETIF_F_HIGHDMA | NETIF_F_SG |
index 5428bb26110262fdfb66daaac8463c91e7981d42..408ece27131c4611a8600028831f10aa8b47ed60 100644 (file)
@@ -1062,29 +1062,15 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc)
  * This breaks overlap of processing the host message for the
  * new primary channel with the initialization of sub-channels.
  */
-void rndis_set_subchannel(struct work_struct *w)
+int rndis_set_subchannel(struct net_device *ndev, struct netvsc_device *nvdev)
 {
-       struct netvsc_device *nvdev
-               = container_of(w, struct netvsc_device, subchan_work);
        struct nvsp_message *init_packet = &nvdev->channel_init_pkt;
-       struct net_device_context *ndev_ctx;
-       struct rndis_device *rdev;
-       struct net_device *ndev;
-       struct hv_device *hv_dev;
+       struct net_device_context *ndev_ctx = netdev_priv(ndev);
+       struct hv_device *hv_dev = ndev_ctx->device_ctx;
+       struct rndis_device *rdev = nvdev->extension;
        int i, ret;
 
-       if (!rtnl_trylock()) {
-               schedule_work(w);
-               return;
-       }
-
-       rdev = nvdev->extension;
-       if (!rdev)
-               goto unlock;    /* device was removed */
-
-       ndev = rdev->ndev;
-       ndev_ctx = netdev_priv(ndev);
-       hv_dev = ndev_ctx->device_ctx;
+       ASSERT_RTNL();
 
        memset(init_packet, 0, sizeof(struct nvsp_message));
        init_packet->hdr.msg_type = NVSP_MSG5_TYPE_SUBCHANNEL;
@@ -1100,13 +1086,13 @@ void rndis_set_subchannel(struct work_struct *w)
                               VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
        if (ret) {
                netdev_err(ndev, "sub channel allocate send failed: %d\n", ret);
-               goto failed;
+               return ret;
        }
 
        wait_for_completion(&nvdev->channel_init_wait);
        if (init_packet->msg.v5_msg.subchn_comp.status != NVSP_STAT_SUCCESS) {
                netdev_err(ndev, "sub channel request failed\n");
-               goto failed;
+               return -EIO;
        }
 
        nvdev->num_chn = 1 +
@@ -1125,21 +1111,7 @@ void rndis_set_subchannel(struct work_struct *w)
        for (i = 0; i < VRSS_SEND_TAB_SIZE; i++)
                ndev_ctx->tx_table[i] = i % nvdev->num_chn;
 
-       netif_device_attach(ndev);
-       rtnl_unlock();
-       return;
-
-failed:
-       /* fallback to only primary channel */
-       for (i = 1; i < nvdev->num_chn; i++)
-               netif_napi_del(&nvdev->chan_table[i].napi);
-
-       nvdev->max_chn = 1;
-       nvdev->num_chn = 1;
-
-       netif_device_attach(ndev);
-unlock:
-       rtnl_unlock();
+       return 0;
 }
 
 static int rndis_netdev_set_hwcaps(struct rndis_device *rndis_device,
@@ -1360,21 +1332,13 @@ struct netvsc_device *rndis_filter_device_add(struct hv_device *dev,
                netif_napi_add(net, &net_device->chan_table[i].napi,
                               netvsc_poll, NAPI_POLL_WEIGHT);
 
-       if (net_device->num_chn > 1)
-               schedule_work(&net_device->subchan_work);
+       return net_device;
 
 out:
-       /* if unavailable, just proceed with one queue */
-       if (ret) {
-               net_device->max_chn = 1;
-               net_device->num_chn = 1;
-       }
-
-       /* No sub channels, device is ready */
-       if (net_device->num_chn == 1)
-               netif_device_attach(net);
-
-       return net_device;
+       /* setting up multiple channels failed */
+       net_device->max_chn = 1;
+       net_device->num_chn = 1;
+       return 0;
 
 err_dev_remv:
        rndis_filter_device_remove(dev, net_device);
index 64f1b1e77bc0f361dc59538072f59ca7a7c72690..23a52b9293f35eaec1d71063305a029ba466d819 100644 (file)
@@ -275,6 +275,8 @@ struct adf7242_local {
        struct spi_message stat_msg;
        struct spi_transfer stat_xfer;
        struct dentry *debugfs_root;
+       struct delayed_work work;
+       struct workqueue_struct *wqueue;
        unsigned long flags;
        int tx_stat;
        bool promiscuous;
@@ -575,10 +577,26 @@ static int adf7242_cmd_rx(struct adf7242_local *lp)
        /* Wait until the ACK is sent */
        adf7242_wait_status(lp, RC_STATUS_PHY_RDY, RC_STATUS_MASK, __LINE__);
        adf7242_clear_irqstat(lp);
+       mod_delayed_work(lp->wqueue, &lp->work, msecs_to_jiffies(400));
 
        return adf7242_cmd(lp, CMD_RC_RX);
 }
 
+static void adf7242_rx_cal_work(struct work_struct *work)
+{
+       struct adf7242_local *lp =
+       container_of(work, struct adf7242_local, work.work);
+
+       /* Reissuing RC_RX every 400ms - to adjust for offset
+        * drift in receiver (datasheet page 61, OCL section)
+        */
+
+       if (!test_bit(FLAG_XMIT, &lp->flags)) {
+               adf7242_cmd(lp, CMD_RC_PHY_RDY);
+               adf7242_cmd_rx(lp);
+       }
+}
+
 static int adf7242_set_txpower(struct ieee802154_hw *hw, int mbm)
 {
        struct adf7242_local *lp = hw->priv;
@@ -686,7 +704,7 @@ static int adf7242_start(struct ieee802154_hw *hw)
        enable_irq(lp->spi->irq);
        set_bit(FLAG_START, &lp->flags);
 
-       return adf7242_cmd(lp, CMD_RC_RX);
+       return adf7242_cmd_rx(lp);
 }
 
 static void adf7242_stop(struct ieee802154_hw *hw)
@@ -694,6 +712,7 @@ static void adf7242_stop(struct ieee802154_hw *hw)
        struct adf7242_local *lp = hw->priv;
 
        disable_irq(lp->spi->irq);
+       cancel_delayed_work_sync(&lp->work);
        adf7242_cmd(lp, CMD_RC_IDLE);
        clear_bit(FLAG_START, &lp->flags);
        adf7242_clear_irqstat(lp);
@@ -719,7 +738,10 @@ static int adf7242_channel(struct ieee802154_hw *hw, u8 page, u8 channel)
        adf7242_write_reg(lp, REG_CH_FREQ1, freq >> 8);
        adf7242_write_reg(lp, REG_CH_FREQ2, freq >> 16);
 
-       return adf7242_cmd(lp, CMD_RC_RX);
+       if (test_bit(FLAG_START, &lp->flags))
+               return adf7242_cmd_rx(lp);
+       else
+               return adf7242_cmd(lp, CMD_RC_PHY_RDY);
 }
 
 static int adf7242_set_hw_addr_filt(struct ieee802154_hw *hw,
@@ -814,6 +836,7 @@ static int adf7242_xmit(struct ieee802154_hw *hw, struct sk_buff *skb)
        /* ensure existing instances of the IRQ handler have completed */
        disable_irq(lp->spi->irq);
        set_bit(FLAG_XMIT, &lp->flags);
+       cancel_delayed_work_sync(&lp->work);
        reinit_completion(&lp->tx_complete);
        adf7242_cmd(lp, CMD_RC_PHY_RDY);
        adf7242_clear_irqstat(lp);
@@ -952,6 +975,7 @@ static irqreturn_t adf7242_isr(int irq, void *data)
        unsigned int xmit;
        u8 irq1;
 
+       mod_delayed_work(lp->wqueue, &lp->work, msecs_to_jiffies(400));
        adf7242_read_reg(lp, REG_IRQ1_SRC1, &irq1);
 
        if (!(irq1 & (IRQ_RX_PKT_RCVD | IRQ_CSMA_CA)))
@@ -1241,6 +1265,9 @@ static int adf7242_probe(struct spi_device *spi)
        spi_message_add_tail(&lp->stat_xfer, &lp->stat_msg);
 
        spi_set_drvdata(spi, lp);
+       INIT_DELAYED_WORK(&lp->work, adf7242_rx_cal_work);
+       lp->wqueue = alloc_ordered_workqueue(dev_name(&spi->dev),
+                                            WQ_MEM_RECLAIM);
 
        ret = adf7242_hw_init(lp);
        if (ret)
@@ -1284,6 +1311,9 @@ static int adf7242_remove(struct spi_device *spi)
        if (!IS_ERR_OR_NULL(lp->debugfs_root))
                debugfs_remove_recursive(lp->debugfs_root);
 
+       cancel_delayed_work_sync(&lp->work);
+       destroy_workqueue(lp->wqueue);
+
        ieee802154_unregister_hw(lp->hw);
        mutex_destroy(&lp->bmux);
        ieee802154_free_hw(lp->hw);
index 77abedf0b52447b4f1d0b5bdd99c259cb3555c1a..3d9e91579866826e476ceb2374b0d286e70c07fd 100644 (file)
@@ -940,7 +940,7 @@ at86rf230_xmit(struct ieee802154_hw *hw, struct sk_buff *skb)
 static int
 at86rf230_ed(struct ieee802154_hw *hw, u8 *level)
 {
-       BUG_ON(!level);
+       WARN_ON(!level);
        *level = 0xbe;
        return 0;
 }
@@ -1121,8 +1121,7 @@ at86rf230_set_hw_addr_filt(struct ieee802154_hw *hw,
        if (changed & IEEE802154_AFILT_SADDR_CHANGED) {
                u16 addr = le16_to_cpu(filt->short_addr);
 
-               dev_vdbg(&lp->spi->dev,
-                        "at86rf230_set_hw_addr_filt called for saddr\n");
+               dev_vdbg(&lp->spi->dev, "%s called for saddr\n", __func__);
                __at86rf230_write(lp, RG_SHORT_ADDR_0, addr);
                __at86rf230_write(lp, RG_SHORT_ADDR_1, addr >> 8);
        }
@@ -1130,8 +1129,7 @@ at86rf230_set_hw_addr_filt(struct ieee802154_hw *hw,
        if (changed & IEEE802154_AFILT_PANID_CHANGED) {
                u16 pan = le16_to_cpu(filt->pan_id);
 
-               dev_vdbg(&lp->spi->dev,
-                        "at86rf230_set_hw_addr_filt called for pan id\n");
+               dev_vdbg(&lp->spi->dev, "%s called for pan id\n", __func__);
                __at86rf230_write(lp, RG_PAN_ID_0, pan);
                __at86rf230_write(lp, RG_PAN_ID_1, pan >> 8);
        }
@@ -1140,15 +1138,13 @@ at86rf230_set_hw_addr_filt(struct ieee802154_hw *hw,
                u8 i, addr[8];
 
                memcpy(addr, &filt->ieee_addr, 8);
-               dev_vdbg(&lp->spi->dev,
-                        "at86rf230_set_hw_addr_filt called for IEEE addr\n");
+               dev_vdbg(&lp->spi->dev, "%s called for IEEE addr\n", __func__);
                for (i = 0; i < 8; i++)
                        __at86rf230_write(lp, RG_IEEE_ADDR_0 + i, addr[i]);
        }
 
        if (changed & IEEE802154_AFILT_PANC_CHANGED) {
-               dev_vdbg(&lp->spi->dev,
-                        "at86rf230_set_hw_addr_filt called for panc change\n");
+               dev_vdbg(&lp->spi->dev, "%s called for panc change\n", __func__);
                if (filt->pan_coord)
                        at86rf230_write_subreg(lp, SR_AACK_I_AM_COORD, 1);
                else
@@ -1252,7 +1248,6 @@ at86rf230_set_cca_mode(struct ieee802154_hw *hw,
        return at86rf230_write_subreg(lp, SR_CCA_MODE, val);
 }
 
-
 static int
 at86rf230_set_cca_ed_level(struct ieee802154_hw *hw, s32 mbm)
 {
index 0d673f7682ee065223b64462bc2c4df0a03826a0..176395e4b7bb0ca628bdd22b4f13a23e425bfae2 100644 (file)
@@ -49,7 +49,7 @@ struct fakelb_phy {
 
 static int fakelb_hw_ed(struct ieee802154_hw *hw, u8 *level)
 {
-       BUG_ON(!level);
+       WARN_ON(!level);
        *level = 0xbe;
 
        return 0;
index de0d7f28a181ca4acb1da2131d82a981627a8e96..e428277781ac4422bec2e8f47fd35476a85a74f7 100644 (file)
  */
 #include <linux/kernel.h>
 #include <linux/module.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
 #include <linux/spi/spi.h>
 #include <linux/workqueue.h>
 #include <linux/interrupt.h>
+#include <linux/irq.h>
 #include <linux/skbuff.h>
 #include <linux/of_gpio.h>
 #include <linux/regmap.h>
index 4377c26f714d0522ebf5d1de6ac774b6e42024ea..4a949569ec4c51668fe7b795caef7ece5d61854b 100644 (file)
@@ -75,10 +75,23 @@ static int ipvlan_set_port_mode(struct ipvl_port *port, u16 nval)
 {
        struct ipvl_dev *ipvlan;
        struct net_device *mdev = port->dev;
-       int err = 0;
+       unsigned int flags;
+       int err;
 
        ASSERT_RTNL();
        if (port->mode != nval) {
+               list_for_each_entry(ipvlan, &port->ipvlans, pnode) {
+                       flags = ipvlan->dev->flags;
+                       if (nval == IPVLAN_MODE_L3 || nval == IPVLAN_MODE_L3S) {
+                               err = dev_change_flags(ipvlan->dev,
+                                                      flags | IFF_NOARP);
+                       } else {
+                               err = dev_change_flags(ipvlan->dev,
+                                                      flags & ~IFF_NOARP);
+                       }
+                       if (unlikely(err))
+                               goto fail;
+               }
                if (nval == IPVLAN_MODE_L3S) {
                        /* New mode is L3S */
                        err = ipvlan_register_nf_hook(read_pnet(&port->pnet));
@@ -86,21 +99,28 @@ static int ipvlan_set_port_mode(struct ipvl_port *port, u16 nval)
                                mdev->l3mdev_ops = &ipvl_l3mdev_ops;
                                mdev->priv_flags |= IFF_L3MDEV_MASTER;
                        } else
-                               return err;
+                               goto fail;
                } else if (port->mode == IPVLAN_MODE_L3S) {
                        /* Old mode was L3S */
                        mdev->priv_flags &= ~IFF_L3MDEV_MASTER;
                        ipvlan_unregister_nf_hook(read_pnet(&port->pnet));
                        mdev->l3mdev_ops = NULL;
                }
-               list_for_each_entry(ipvlan, &port->ipvlans, pnode) {
-                       if (nval == IPVLAN_MODE_L3 || nval == IPVLAN_MODE_L3S)
-                               ipvlan->dev->flags |= IFF_NOARP;
-                       else
-                               ipvlan->dev->flags &= ~IFF_NOARP;
-               }
                port->mode = nval;
        }
+       return 0;
+
+fail:
+       /* Undo the flags changes that have been done so far. */
+       list_for_each_entry_continue_reverse(ipvlan, &port->ipvlans, pnode) {
+               flags = ipvlan->dev->flags;
+               if (port->mode == IPVLAN_MODE_L3 ||
+                   port->mode == IPVLAN_MODE_L3S)
+                       dev_change_flags(ipvlan->dev, flags | IFF_NOARP);
+               else
+                       dev_change_flags(ipvlan->dev, flags & ~IFF_NOARP);
+       }
+
        return err;
 }
 
@@ -594,7 +614,8 @@ int ipvlan_link_new(struct net *src_net, struct net_device *dev,
        ipvlan->phy_dev = phy_dev;
        ipvlan->dev = dev;
        ipvlan->sfeatures = IPVLAN_FEATURES;
-       ipvlan_adjust_mtu(ipvlan, phy_dev);
+       if (!tb[IFLA_MTU])
+               ipvlan_adjust_mtu(ipvlan, phy_dev);
        INIT_LIST_HEAD(&ipvlan->addrs);
        spin_lock_init(&ipvlan->addrs_lock);
 
@@ -693,6 +714,7 @@ void ipvlan_link_setup(struct net_device *dev)
 {
        ether_setup(dev);
 
+       dev->max_mtu = ETH_MAX_MTU;
        dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
        dev->priv_flags |= IFF_UNICAST_FLT | IFF_NO_QUEUE;
        dev->netdev_ops = &ipvlan_netdev_ops;
index 83f7420ddea569126db0cc25719940892d760075..4f390fa557e4ba0c897b20faefaa85b03f4ec70a 100644 (file)
@@ -527,7 +527,7 @@ static int net_failover_slave_register(struct net_device *slave_dev,
 
        netif_addr_lock_bh(failover_dev);
        dev_uc_sync_multiple(slave_dev, failover_dev);
-       dev_uc_sync_multiple(slave_dev, failover_dev);
+       dev_mc_sync_multiple(slave_dev, failover_dev);
        netif_addr_unlock_bh(failover_dev);
 
        err = vlan_vids_add_by_dev(slave_dev, failover_dev);
index ba663e5af168bc4db3152f6a8fdaf85bf4753148..5135fc371f01ba27365b85c278b3c20b90e65c58 100644 (file)
@@ -207,6 +207,7 @@ void nsim_devlink_teardown(struct netdevsim *ns)
                struct net *net = nsim_to_net(ns);
                bool *reg_devlink = net_generic(net, nsim_devlink_id);
 
+               devlink_resources_unregister(ns->devlink, NULL);
                devlink_unregister(ns->devlink);
                devlink_free(ns->devlink);
                ns->devlink = NULL;
index 081d99aa39853097e7d486e813f344fb895598aa..49ac678eb2dc7ca6539794b9ace40ba86aaa8d6a 100644 (file)
@@ -222,7 +222,7 @@ static int dp83811_config_intr(struct phy_device *phydev)
                if (err < 0)
                        return err;
 
-               err = phy_write(phydev, MII_DP83811_INT_STAT1, 0);
+               err = phy_write(phydev, MII_DP83811_INT_STAT2, 0);
        }
 
        return err;
index b8f57e9b937901fd142413c4002f39205546c35a..1cd439bdf6087af2913f589b499cd5c5abe5a3bb 100644 (file)
 #define MII_88E1318S_PHY_WOL_CTRL_CLEAR_WOL_STATUS             BIT(12)
 #define MII_88E1318S_PHY_WOL_CTRL_MAGIC_PACKET_MATCH_ENABLE    BIT(14)
 
-#define MII_88E1121_PHY_LED_CTRL       16
+#define MII_PHY_LED_CTRL               16
 #define MII_88E1121_PHY_LED_DEF                0x0030
+#define MII_88E1510_PHY_LED_DEF                0x1177
 
 #define MII_M1011_PHY_STATUS           0x11
 #define MII_M1011_PHY_STATUS_1000      0x8000
@@ -632,8 +633,40 @@ error:
        return err;
 }
 
+static void marvell_config_led(struct phy_device *phydev)
+{
+       u16 def_config;
+       int err;
+
+       switch (MARVELL_PHY_FAMILY_ID(phydev->phy_id)) {
+       /* Default PHY LED config: LED[0] .. Link, LED[1] .. Activity */
+       case MARVELL_PHY_FAMILY_ID(MARVELL_PHY_ID_88E1121R):
+       case MARVELL_PHY_FAMILY_ID(MARVELL_PHY_ID_88E1318S):
+               def_config = MII_88E1121_PHY_LED_DEF;
+               break;
+       /* Default PHY LED config:
+        * LED[0] .. 1000Mbps Link
+        * LED[1] .. 100Mbps Link
+        * LED[2] .. Blink, Activity
+        */
+       case MARVELL_PHY_FAMILY_ID(MARVELL_PHY_ID_88E1510):
+               def_config = MII_88E1510_PHY_LED_DEF;
+               break;
+       default:
+               return;
+       }
+
+       err = phy_write_paged(phydev, MII_MARVELL_LED_PAGE, MII_PHY_LED_CTRL,
+                             def_config);
+       if (err < 0)
+               pr_warn("Fail to config marvell phy LED.\n");
+}
+
 static int marvell_config_init(struct phy_device *phydev)
 {
+       /* Set defalut LED */
+       marvell_config_led(phydev);
+
        /* Set registers from marvell,reg-init DT property */
        return marvell_of_reg_init(phydev);
 }
@@ -813,21 +846,6 @@ static int m88e1111_config_init(struct phy_device *phydev)
        return genphy_soft_reset(phydev);
 }
 
-static int m88e1121_config_init(struct phy_device *phydev)
-{
-       int err;
-
-       /* Default PHY LED config: LED[0] .. Link, LED[1] .. Activity */
-       err = phy_write_paged(phydev, MII_MARVELL_LED_PAGE,
-                             MII_88E1121_PHY_LED_CTRL,
-                             MII_88E1121_PHY_LED_DEF);
-       if (err < 0)
-               return err;
-
-       /* Set marvell,reg-init configuration from device tree */
-       return marvell_config_init(phydev);
-}
-
 static int m88e1318_config_init(struct phy_device *phydev)
 {
        if (phy_interrupt_is_valid(phydev)) {
@@ -841,7 +859,7 @@ static int m88e1318_config_init(struct phy_device *phydev)
                        return err;
        }
 
-       return m88e1121_config_init(phydev);
+       return marvell_config_init(phydev);
 }
 
 static int m88e1510_config_init(struct phy_device *phydev)
@@ -2087,7 +2105,7 @@ static struct phy_driver marvell_drivers[] = {
                .features = PHY_GBIT_FEATURES,
                .flags = PHY_HAS_INTERRUPT,
                .probe = &m88e1121_probe,
-               .config_init = &m88e1121_config_init,
+               .config_init = &marvell_config_init,
                .config_aneg = &m88e1121_config_aneg,
                .read_status = &marvell_read_status,
                .ack_interrupt = &marvell_ack_interrupt,
index 0831b7142df7a334b889296688cc3218d8efa2a1..0c5b68e7da51aa8d0c7c73e7c6b3cc632b1e3510 100644 (file)
@@ -218,7 +218,7 @@ out:
 
 static int mdio_mux_iproc_remove(struct platform_device *pdev)
 {
-       struct iproc_mdiomux_desc *md = dev_get_platdata(&pdev->dev);
+       struct iproc_mdiomux_desc *md = platform_get_drvdata(pdev);
 
        mdio_mux_uninit(md->mux_handle);
        mdiobus_unregister(md->mii_bus);
index 537297d2b4b4309adeacd1e66541b2aa1830b8b8..6c9b24fe31488b03499a9866f13a065449b29cce 100644 (file)
@@ -514,7 +514,7 @@ static int phy_start_aneg_priv(struct phy_device *phydev, bool sync)
         * negotiation may already be done and aneg interrupt may not be
         * generated.
         */
-       if (phy_interrupt_is_valid(phydev) && (phydev->state == PHY_AN)) {
+       if (phydev->irq != PHY_POLL && phydev->state == PHY_AN) {
                err = phy_aneg_done(phydev);
                if (err > 0) {
                        trigger = true;
index bd0f339f69fd064737f8f3c80e6645e73c56a2b9..b9f5f40a7ac1e6640a653e8207cdd8885100e09f 100644 (file)
@@ -1724,11 +1724,8 @@ EXPORT_SYMBOL(genphy_loopback);
 
 static int __set_phy_supported(struct phy_device *phydev, u32 max_speed)
 {
-       /* The default values for phydev->supported are provided by the PHY
-        * driver "features" member, we want to reset to sane defaults first
-        * before supporting higher speeds.
-        */
-       phydev->supported &= PHY_DEFAULT_FEATURES;
+       phydev->supported &= ~(PHY_1000BT_FEATURES | PHY_100BT_FEATURES |
+                              PHY_10BT_FEATURES);
 
        switch (max_speed) {
        default:
index d437f4f5ed5291d21236a71ef3e36089344f9201..740655261e5b7347116d2a5b53445c8d023cb49c 100644 (file)
@@ -349,7 +349,6 @@ static int sfp_register_bus(struct sfp_bus *bus)
        }
        if (bus->started)
                bus->socket_ops->start(bus->sfp);
-       bus->netdev->sfp_bus = bus;
        bus->registered = true;
        return 0;
 }
@@ -364,7 +363,6 @@ static void sfp_unregister_bus(struct sfp_bus *bus)
                if (bus->phydev && ops && ops->disconnect_phy)
                        ops->disconnect_phy(bus->upstream);
        }
-       bus->netdev->sfp_bus = NULL;
        bus->registered = false;
 }
 
@@ -436,6 +434,14 @@ void sfp_upstream_stop(struct sfp_bus *bus)
 }
 EXPORT_SYMBOL_GPL(sfp_upstream_stop);
 
+static void sfp_upstream_clear(struct sfp_bus *bus)
+{
+       bus->upstream_ops = NULL;
+       bus->upstream = NULL;
+       bus->netdev->sfp_bus = NULL;
+       bus->netdev = NULL;
+}
+
 /**
  * sfp_register_upstream() - Register the neighbouring device
  * @fwnode: firmware node for the SFP bus
@@ -461,9 +467,13 @@ struct sfp_bus *sfp_register_upstream(struct fwnode_handle *fwnode,
                bus->upstream_ops = ops;
                bus->upstream = upstream;
                bus->netdev = ndev;
+               ndev->sfp_bus = bus;
 
-               if (bus->sfp)
+               if (bus->sfp) {
                        ret = sfp_register_bus(bus);
+                       if (ret)
+                               sfp_upstream_clear(bus);
+               }
                rtnl_unlock();
        }
 
@@ -488,8 +498,7 @@ void sfp_unregister_upstream(struct sfp_bus *bus)
        rtnl_lock();
        if (bus->sfp)
                sfp_unregister_bus(bus);
-       bus->upstream = NULL;
-       bus->netdev = NULL;
+       sfp_upstream_clear(bus);
        rtnl_unlock();
 
        sfp_bus_put(bus);
@@ -561,6 +570,13 @@ void sfp_module_remove(struct sfp_bus *bus)
 }
 EXPORT_SYMBOL_GPL(sfp_module_remove);
 
+static void sfp_socket_clear(struct sfp_bus *bus)
+{
+       bus->sfp_dev = NULL;
+       bus->sfp = NULL;
+       bus->socket_ops = NULL;
+}
+
 struct sfp_bus *sfp_register_socket(struct device *dev, struct sfp *sfp,
                                    const struct sfp_socket_ops *ops)
 {
@@ -573,8 +589,11 @@ struct sfp_bus *sfp_register_socket(struct device *dev, struct sfp *sfp,
                bus->sfp = sfp;
                bus->socket_ops = ops;
 
-               if (bus->netdev)
+               if (bus->netdev) {
                        ret = sfp_register_bus(bus);
+                       if (ret)
+                               sfp_socket_clear(bus);
+               }
                rtnl_unlock();
        }
 
@@ -592,9 +611,7 @@ void sfp_unregister_socket(struct sfp_bus *bus)
        rtnl_lock();
        if (bus->netdev)
                sfp_unregister_bus(bus);
-       bus->sfp_dev = NULL;
-       bus->sfp = NULL;
-       bus->socket_ops = NULL;
+       sfp_socket_clear(bus);
        rtnl_unlock();
 
        sfp_bus_put(bus);
index de51e8f70f44ea6663b330d2ae41024e99865490..ce61231e96ea5fe27f512fbd0d80d4609997e508 100644 (file)
@@ -1107,7 +1107,7 @@ static const struct proto_ops pppoe_ops = {
        .socketpair     = sock_no_socketpair,
        .accept         = sock_no_accept,
        .getname        = pppoe_getname,
-       .poll_mask      = datagram_poll_mask,
+       .poll           = datagram_poll,
        .listen         = sock_no_listen,
        .shutdown       = sock_no_shutdown,
        .setsockopt     = sock_no_setsockopt,
index a192a017cc68878360505b93df151de3d0b9b730..f5727baac84a5d10fd70837a75fcfa8194992f9a 100644 (file)
@@ -1688,7 +1688,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
                case XDP_TX:
                        get_page(alloc_frag->page);
                        alloc_frag->offset += buflen;
-                       if (tun_xdp_tx(tun->dev, &xdp))
+                       if (tun_xdp_tx(tun->dev, &xdp) < 0)
                                goto err_redirect;
                        rcu_read_unlock();
                        local_bh_enable();
index 3d4f7959dabb9c39e17754df4f72013c89743d5a..b1b3d8f7e67dd052eae618e33698c633751df60a 100644 (file)
@@ -642,10 +642,12 @@ static void ax88772_restore_phy(struct usbnet *dev)
                                     priv->presvd_phy_advertise);
 
                /* Restore BMCR */
+               if (priv->presvd_phy_bmcr & BMCR_ANENABLE)
+                       priv->presvd_phy_bmcr |= BMCR_ANRESTART;
+
                asix_mdio_write_nopm(dev->net, dev->mii.phy_id, MII_BMCR,
                                     priv->presvd_phy_bmcr);
 
-               mii_nway_restart(&dev->mii);
                priv->presvd_phy_advertise = 0;
                priv->presvd_phy_bmcr = 0;
        }
index b0e8b9613054137215e2f502f9deeab3bbad8f80..1eaec648bd1f716db3d06622cdfb7834e64e4e38 100644 (file)
@@ -967,8 +967,7 @@ void cdc_ncm_unbind(struct usbnet *dev, struct usb_interface *intf)
 
        atomic_set(&ctx->stop, 1);
 
-       if (hrtimer_active(&ctx->tx_timer))
-               hrtimer_cancel(&ctx->tx_timer);
+       hrtimer_cancel(&ctx->tx_timer);
 
        tasklet_kill(&ctx->bh);
 
index 8dff87ec6d99c5dca122dcdb5d3697157564cfa2..aeca484a75b89742458e4b76dca220c54f8c208d 100644 (file)
@@ -64,6 +64,7 @@
 #define DEFAULT_RX_CSUM_ENABLE         (true)
 #define DEFAULT_TSO_CSUM_ENABLE                (true)
 #define DEFAULT_VLAN_FILTER_ENABLE     (true)
+#define DEFAULT_VLAN_RX_OFFLOAD                (true)
 #define TX_OVERHEAD                    (8)
 #define RXW_PADDING                    2
 
@@ -1241,6 +1242,8 @@ static int lan78xx_link_reset(struct lan78xx_net *dev)
                        mod_timer(&dev->stat_monitor,
                                  jiffies + STAT_UPDATE_TIMER);
                }
+
+               tasklet_schedule(&dev->bh);
        }
 
        return ret;
@@ -2298,7 +2301,7 @@ static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
        if ((ll_mtu % dev->maxpacket) == 0)
                return -EDOM;
 
-       ret = lan78xx_set_rx_max_frame_length(dev, new_mtu + ETH_HLEN);
+       ret = lan78xx_set_rx_max_frame_length(dev, new_mtu + VLAN_ETH_HLEN);
 
        netdev->mtu = new_mtu;
 
@@ -2364,6 +2367,11 @@ static int lan78xx_set_features(struct net_device *netdev,
        }
 
        if (features & NETIF_F_HW_VLAN_CTAG_RX)
+               pdata->rfe_ctl |= RFE_CTL_VLAN_STRIP_;
+       else
+               pdata->rfe_ctl &= ~RFE_CTL_VLAN_STRIP_;
+
+       if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
                pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_;
        else
                pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_;
@@ -2587,7 +2595,8 @@ static int lan78xx_reset(struct lan78xx_net *dev)
        buf |= FCT_TX_CTL_EN_;
        ret = lan78xx_write_reg(dev, FCT_TX_CTL, buf);
 
-       ret = lan78xx_set_rx_max_frame_length(dev, dev->net->mtu + ETH_HLEN);
+       ret = lan78xx_set_rx_max_frame_length(dev,
+                                             dev->net->mtu + VLAN_ETH_HLEN);
 
        ret = lan78xx_read_reg(dev, MAC_RX, &buf);
        buf |= MAC_RX_RXEN_;
@@ -2975,6 +2984,12 @@ static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
        if (DEFAULT_TSO_CSUM_ENABLE)
                dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG;
 
+       if (DEFAULT_VLAN_RX_OFFLOAD)
+               dev->net->features |= NETIF_F_HW_VLAN_CTAG_RX;
+
+       if (DEFAULT_VLAN_FILTER_ENABLE)
+               dev->net->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+
        dev->net->hw_features = dev->net->features;
 
        ret = lan78xx_setup_irq_domain(dev);
@@ -3039,8 +3054,13 @@ static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
                                    struct sk_buff *skb,
                                    u32 rx_cmd_a, u32 rx_cmd_b)
 {
+       /* HW Checksum offload appears to be flawed if used when not stripping
+        * VLAN headers. Drop back to S/W checksums under these conditions.
+        */
        if (!(dev->net->features & NETIF_F_RXCSUM) ||
-           unlikely(rx_cmd_a & RX_CMD_A_ICSM_)) {
+           unlikely(rx_cmd_a & RX_CMD_A_ICSM_) ||
+           ((rx_cmd_a & RX_CMD_A_FVTG_) &&
+            !(dev->net->features & NETIF_F_HW_VLAN_CTAG_RX))) {
                skb->ip_summed = CHECKSUM_NONE;
        } else {
                skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_));
@@ -3048,6 +3068,16 @@ static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
        }
 }
 
+static void lan78xx_rx_vlan_offload(struct lan78xx_net *dev,
+                                   struct sk_buff *skb,
+                                   u32 rx_cmd_a, u32 rx_cmd_b)
+{
+       if ((dev->net->features & NETIF_F_HW_VLAN_CTAG_RX) &&
+           (rx_cmd_a & RX_CMD_A_FVTG_))
+               __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+                                      (rx_cmd_b & 0xffff));
+}
+
 static void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
 {
        int             status;
@@ -3112,6 +3142,8 @@ static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb)
                        if (skb->len == size) {
                                lan78xx_rx_csum_offload(dev, skb,
                                                        rx_cmd_a, rx_cmd_b);
+                               lan78xx_rx_vlan_offload(dev, skb,
+                                                       rx_cmd_a, rx_cmd_b);
 
                                skb_trim(skb, skb->len - 4); /* remove fcs */
                                skb->truesize = size + sizeof(struct sk_buff);
@@ -3130,6 +3162,7 @@ static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb)
                        skb_set_tail_pointer(skb2, size);
 
                        lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
+                       lan78xx_rx_vlan_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
 
                        skb_trim(skb2, skb2->len - 4); /* remove fcs */
                        skb2->truesize = size + sizeof(struct sk_buff);
@@ -3313,6 +3346,7 @@ static void lan78xx_tx_bh(struct lan78xx_net *dev)
        pkt_cnt = 0;
        count = 0;
        length = 0;
+       spin_lock_irqsave(&tqp->lock, flags);
        for (skb = tqp->next; pkt_cnt < tqp->qlen; skb = skb->next) {
                if (skb_is_gso(skb)) {
                        if (pkt_cnt) {
@@ -3321,7 +3355,8 @@ static void lan78xx_tx_bh(struct lan78xx_net *dev)
                        }
                        count = 1;
                        length = skb->len - TX_OVERHEAD;
-                       skb2 = skb_dequeue(tqp);
+                       __skb_unlink(skb, tqp);
+                       spin_unlock_irqrestore(&tqp->lock, flags);
                        goto gso_skb;
                }
 
@@ -3330,6 +3365,7 @@ static void lan78xx_tx_bh(struct lan78xx_net *dev)
                skb_totallen = skb->len + roundup(skb_totallen, sizeof(u32));
                pkt_cnt++;
        }
+       spin_unlock_irqrestore(&tqp->lock, flags);
 
        /* copy to a single skb */
        skb = alloc_skb(skb_totallen, GFP_ATOMIC);
index 8e8b51f171f4fa227340e80009ce5c2c059db053..cb0cc30c3d6a190e8d3132b6bab4c5d67e29979c 100644 (file)
@@ -1246,12 +1246,14 @@ static const struct usb_device_id products[] = {
        {QMI_FIXED_INTF(0x413c, 0x81b3, 8)},    /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */
        {QMI_FIXED_INTF(0x413c, 0x81b6, 8)},    /* Dell Wireless 5811e */
        {QMI_FIXED_INTF(0x413c, 0x81b6, 10)},   /* Dell Wireless 5811e */
+       {QMI_FIXED_INTF(0x413c, 0x81d7, 0)},    /* Dell Wireless 5821e */
        {QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)},    /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */
        {QMI_FIXED_INTF(0x03f0, 0x9d1d, 1)},    /* HP lt4120 Snapdragon X5 LTE */
        {QMI_FIXED_INTF(0x22de, 0x9061, 3)},    /* WeTelecom WPD-600N */
        {QMI_QUIRK_SET_DTR(0x1e0e, 0x9001, 5)}, /* SIMCom 7100E, 7230E, 7600E ++ */
        {QMI_QUIRK_SET_DTR(0x2c7c, 0x0125, 4)}, /* Quectel EC25, EC20 R2.0  Mini PCIe */
        {QMI_QUIRK_SET_DTR(0x2c7c, 0x0121, 4)}, /* Quectel EC21 Mini PCIe */
+       {QMI_QUIRK_SET_DTR(0x2c7c, 0x0191, 4)}, /* Quectel EG91 */
        {QMI_FIXED_INTF(0x2c7c, 0x0296, 4)},    /* Quectel BG96 */
        {QMI_QUIRK_SET_DTR(0x2c7c, 0x0306, 4)}, /* Quectel EP06 Mini PCIe */
 
index 86f7196f9d91fbf55c791fff88687a43518d66d8..2a58607a6aea809b14e0aa03955cfa099118e607 100644 (file)
@@ -3962,7 +3962,8 @@ static int rtl8152_close(struct net_device *netdev)
 #ifdef CONFIG_PM_SLEEP
        unregister_pm_notifier(&tp->pm_notifier);
 #endif
-       napi_disable(&tp->napi);
+       if (!test_bit(RTL8152_UNPLUG, &tp->flags))
+               napi_disable(&tp->napi);
        clear_bit(WORK_ENABLE, &tp->flags);
        usb_kill_urb(tp->intr_urb);
        cancel_delayed_work_sync(&tp->schedule);
index 5f565bd574da3bc7ce741e3b280a9ff5dece4352..48ba80a8ca5ce8e566931979edcff4bcfe47bc2e 100644 (file)
@@ -681,7 +681,7 @@ static void rtl8150_set_multicast(struct net_device *netdev)
                   (netdev->flags & IFF_ALLMULTI)) {
                rx_creg &= 0xfffe;
                rx_creg |= 0x0002;
-               dev_info(&netdev->dev, "%s: allmulti set\n", netdev->name);
+               dev_dbg(&netdev->dev, "%s: allmulti set\n", netdev->name);
        } else {
                /* ~RX_MULTICAST, ~RX_PROMISCUOUS */
                rx_creg &= 0x00fc;
index 7a6a1fe793090b8e28f5ef075f5ebc2ad385b5eb..05553d2524469f97e4a02bb48f43f6820ad2b3e5 100644 (file)
@@ -82,6 +82,9 @@ static bool turbo_mode = true;
 module_param(turbo_mode, bool, 0644);
 MODULE_PARM_DESC(turbo_mode, "Enable multiple frames per Rx transaction");
 
+static int smsc75xx_link_ok_nopm(struct usbnet *dev);
+static int smsc75xx_phy_gig_workaround(struct usbnet *dev);
+
 static int __must_check __smsc75xx_read_reg(struct usbnet *dev, u32 index,
                                            u32 *data, int in_pm)
 {
@@ -852,6 +855,9 @@ static int smsc75xx_phy_initialize(struct usbnet *dev)
                return -EIO;
        }
 
+       /* phy workaround for gig link */
+       smsc75xx_phy_gig_workaround(dev);
+
        smsc75xx_mdio_write(dev->net, dev->mii.phy_id, MII_ADVERTISE,
                ADVERTISE_ALL | ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP |
                ADVERTISE_PAUSE_ASYM);
@@ -987,6 +993,62 @@ static int smsc75xx_wait_ready(struct usbnet *dev, int in_pm)
        return -EIO;
 }
 
+static int smsc75xx_phy_gig_workaround(struct usbnet *dev)
+{
+       struct mii_if_info *mii = &dev->mii;
+       int ret = 0, timeout = 0;
+       u32 buf, link_up = 0;
+
+       /* Set the phy in Gig loopback */
+       smsc75xx_mdio_write(dev->net, mii->phy_id, MII_BMCR, 0x4040);
+
+       /* Wait for the link up */
+       do {
+               link_up = smsc75xx_link_ok_nopm(dev);
+               usleep_range(10000, 20000);
+               timeout++;
+       } while ((!link_up) && (timeout < 1000));
+
+       if (timeout >= 1000) {
+               netdev_warn(dev->net, "Timeout waiting for PHY link up\n");
+               return -EIO;
+       }
+
+       /* phy reset */
+       ret = smsc75xx_read_reg(dev, PMT_CTL, &buf);
+       if (ret < 0) {
+               netdev_warn(dev->net, "Failed to read PMT_CTL: %d\n", ret);
+               return ret;
+       }
+
+       buf |= PMT_CTL_PHY_RST;
+
+       ret = smsc75xx_write_reg(dev, PMT_CTL, buf);
+       if (ret < 0) {
+               netdev_warn(dev->net, "Failed to write PMT_CTL: %d\n", ret);
+               return ret;
+       }
+
+       timeout = 0;
+       do {
+               usleep_range(10000, 20000);
+               ret = smsc75xx_read_reg(dev, PMT_CTL, &buf);
+               if (ret < 0) {
+                       netdev_warn(dev->net, "Failed to read PMT_CTL: %d\n",
+                                   ret);
+                       return ret;
+               }
+               timeout++;
+       } while ((buf & PMT_CTL_PHY_RST) && (timeout < 100));
+
+       if (timeout >= 100) {
+               netdev_warn(dev->net, "timeout waiting for PHY Reset\n");
+               return -EIO;
+       }
+
+       return 0;
+}
+
 static int smsc75xx_reset(struct usbnet *dev)
 {
        struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
index b6c9a2af37328d1037c3b0ba761256092556167e..2b6ec927809e95b5036ff46c2bd5565a9d112eee 100644 (file)
@@ -53,6 +53,10 @@ module_param(napi_tx, bool, 0644);
 /* Amount of XDP headroom to prepend to packets for use by xdp_adjust_head */
 #define VIRTIO_XDP_HEADROOM 256
 
+/* Separating two types of XDP xmit */
+#define VIRTIO_XDP_TX          BIT(0)
+#define VIRTIO_XDP_REDIR       BIT(1)
+
 /* RX packet size EWMA. The average packet size is used to determine the packet
  * buffer size when refilling RX rings. As the entire RX ring may be refilled
  * at once, the weight is chosen so that the EWMA will be insensitive to short-
@@ -582,7 +586,8 @@ static struct sk_buff *receive_small(struct net_device *dev,
                                     struct receive_queue *rq,
                                     void *buf, void *ctx,
                                     unsigned int len,
-                                    bool *xdp_xmit)
+                                    unsigned int *xdp_xmit,
+                                    unsigned int *rbytes)
 {
        struct sk_buff *skb;
        struct bpf_prog *xdp_prog;
@@ -597,6 +602,7 @@ static struct sk_buff *receive_small(struct net_device *dev,
        int err;
 
        len -= vi->hdr_len;
+       *rbytes += len;
 
        rcu_read_lock();
        xdp_prog = rcu_dereference(rq->xdp_prog);
@@ -654,14 +660,14 @@ static struct sk_buff *receive_small(struct net_device *dev,
                                trace_xdp_exception(vi->dev, xdp_prog, act);
                                goto err_xdp;
                        }
-                       *xdp_xmit = true;
+                       *xdp_xmit |= VIRTIO_XDP_TX;
                        rcu_read_unlock();
                        goto xdp_xmit;
                case XDP_REDIRECT:
                        err = xdp_do_redirect(dev, &xdp, xdp_prog);
                        if (err)
                                goto err_xdp;
-                       *xdp_xmit = true;
+                       *xdp_xmit |= VIRTIO_XDP_REDIR;
                        rcu_read_unlock();
                        goto xdp_xmit;
                default:
@@ -701,11 +707,13 @@ static struct sk_buff *receive_big(struct net_device *dev,
                                   struct virtnet_info *vi,
                                   struct receive_queue *rq,
                                   void *buf,
-                                  unsigned int len)
+                                  unsigned int len,
+                                  unsigned int *rbytes)
 {
        struct page *page = buf;
        struct sk_buff *skb = page_to_skb(vi, rq, page, 0, len, PAGE_SIZE);
 
+       *rbytes += len - vi->hdr_len;
        if (unlikely(!skb))
                goto err;
 
@@ -723,7 +731,8 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
                                         void *buf,
                                         void *ctx,
                                         unsigned int len,
-                                        bool *xdp_xmit)
+                                        unsigned int *xdp_xmit,
+                                        unsigned int *rbytes)
 {
        struct virtio_net_hdr_mrg_rxbuf *hdr = buf;
        u16 num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers);
@@ -736,6 +745,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
        int err;
 
        head_skb = NULL;
+       *rbytes += len - vi->hdr_len;
 
        rcu_read_lock();
        xdp_prog = rcu_dereference(rq->xdp_prog);
@@ -818,7 +828,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
                                        put_page(xdp_page);
                                goto err_xdp;
                        }
-                       *xdp_xmit = true;
+                       *xdp_xmit |= VIRTIO_XDP_TX;
                        if (unlikely(xdp_page != page))
                                put_page(page);
                        rcu_read_unlock();
@@ -830,7 +840,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
                                        put_page(xdp_page);
                                goto err_xdp;
                        }
-                       *xdp_xmit = true;
+                       *xdp_xmit |= VIRTIO_XDP_REDIR;
                        if (unlikely(xdp_page != page))
                                put_page(page);
                        rcu_read_unlock();
@@ -873,6 +883,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
                        goto err_buf;
                }
 
+               *rbytes += len;
                page = virt_to_head_page(buf);
 
                truesize = mergeable_ctx_to_truesize(ctx);
@@ -928,6 +939,7 @@ err_skb:
                        dev->stats.rx_length_errors++;
                        break;
                }
+               *rbytes += len;
                page = virt_to_head_page(buf);
                put_page(page);
        }
@@ -938,13 +950,13 @@ xdp_xmit:
        return NULL;
 }
 
-static int receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
-                      void *buf, unsigned int len, void **ctx, bool *xdp_xmit)
+static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
+                       void *buf, unsigned int len, void **ctx,
+                       unsigned int *xdp_xmit, unsigned int *rbytes)
 {
        struct net_device *dev = vi->dev;
        struct sk_buff *skb;
        struct virtio_net_hdr_mrg_rxbuf *hdr;
-       int ret;
 
        if (unlikely(len < vi->hdr_len + ETH_HLEN)) {
                pr_debug("%s: short packet %i\n", dev->name, len);
@@ -956,23 +968,22 @@ static int receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
                } else {
                        put_page(virt_to_head_page(buf));
                }
-               return 0;
+               return;
        }
 
        if (vi->mergeable_rx_bufs)
-               skb = receive_mergeable(dev, vi, rq, buf, ctx, len, xdp_xmit);
+               skb = receive_mergeable(dev, vi, rq, buf, ctx, len, xdp_xmit,
+                                       rbytes);
        else if (vi->big_packets)
-               skb = receive_big(dev, vi, rq, buf, len);
+               skb = receive_big(dev, vi, rq, buf, len, rbytes);
        else
-               skb = receive_small(dev, vi, rq, buf, ctx, len, xdp_xmit);
+               skb = receive_small(dev, vi, rq, buf, ctx, len, xdp_xmit, rbytes);
 
        if (unlikely(!skb))
-               return 0;
+               return;
 
        hdr = skb_vnet_hdr(skb);
 
-       ret = skb->len;
-
        if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID)
                skb->ip_summed = CHECKSUM_UNNECESSARY;
 
@@ -989,12 +1000,11 @@ static int receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
                 ntohs(skb->protocol), skb->len, skb->pkt_type);
 
        napi_gro_receive(&rq->napi, skb);
-       return ret;
+       return;
 
 frame_err:
        dev->stats.rx_frame_errors++;
        dev_kfree_skb(skb);
-       return 0;
 }
 
 /* Unlike mergeable buffers, all buffers are allocated to the
@@ -1232,7 +1242,8 @@ static void refill_work(struct work_struct *work)
        }
 }
 
-static int virtnet_receive(struct receive_queue *rq, int budget, bool *xdp_xmit)
+static int virtnet_receive(struct receive_queue *rq, int budget,
+                          unsigned int *xdp_xmit)
 {
        struct virtnet_info *vi = rq->vq->vdev->priv;
        unsigned int len, received = 0, bytes = 0;
@@ -1243,13 +1254,13 @@ static int virtnet_receive(struct receive_queue *rq, int budget, bool *xdp_xmit)
 
                while (received < budget &&
                       (buf = virtqueue_get_buf_ctx(rq->vq, &len, &ctx))) {
-                       bytes += receive_buf(vi, rq, buf, len, ctx, xdp_xmit);
+                       receive_buf(vi, rq, buf, len, ctx, xdp_xmit, &bytes);
                        received++;
                }
        } else {
                while (received < budget &&
                       (buf = virtqueue_get_buf(rq->vq, &len)) != NULL) {
-                       bytes += receive_buf(vi, rq, buf, len, NULL, xdp_xmit);
+                       receive_buf(vi, rq, buf, len, NULL, xdp_xmit, &bytes);
                        received++;
                }
        }
@@ -1321,7 +1332,7 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
        struct virtnet_info *vi = rq->vq->vdev->priv;
        struct send_queue *sq;
        unsigned int received, qp;
-       bool xdp_xmit = false;
+       unsigned int xdp_xmit = 0;
 
        virtnet_poll_cleantx(rq);
 
@@ -1331,12 +1342,14 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
        if (received < budget)
                virtqueue_napi_complete(napi, rq->vq, received);
 
-       if (xdp_xmit) {
+       if (xdp_xmit & VIRTIO_XDP_REDIR)
+               xdp_do_flush_map();
+
+       if (xdp_xmit & VIRTIO_XDP_TX) {
                qp = vi->curr_queue_pairs - vi->xdp_queue_pairs +
                     smp_processor_id();
                sq = &vi->sq[qp];
                virtqueue_kick(sq->vq);
-               xdp_do_flush_map();
        }
 
        return received;
index aee0e60471f10d59c39ad39f8170eedea722455d..e857cb3335f6bd4e54b01050d11a4aa4b12b087b 100644 (file)
@@ -623,9 +623,7 @@ static struct sk_buff **vxlan_gro_receive(struct sock *sk,
        flush = 0;
 
 out:
-       skb_gro_remcsum_cleanup(skb, &grc);
-       skb->remcsum_offload = 0;
-       NAPI_GRO_CB(skb)->flush |= flush;
+       skb_gro_flush_final_remcsum(skb, pp, flush, &grc);
 
        return pp;
 }
@@ -638,8 +636,61 @@ static int vxlan_gro_complete(struct sock *sk, struct sk_buff *skb, int nhoff)
        return eth_gro_complete(skb, nhoff + sizeof(struct vxlanhdr));
 }
 
-/* Add new entry to forwarding table -- assumes lock held */
+static struct vxlan_fdb *vxlan_fdb_alloc(struct vxlan_dev *vxlan,
+                                        const u8 *mac, __u16 state,
+                                        __be32 src_vni, __u8 ndm_flags)
+{
+       struct vxlan_fdb *f;
+
+       f = kmalloc(sizeof(*f), GFP_ATOMIC);
+       if (!f)
+               return NULL;
+       f->state = state;
+       f->flags = ndm_flags;
+       f->updated = f->used = jiffies;
+       f->vni = src_vni;
+       INIT_LIST_HEAD(&f->remotes);
+       memcpy(f->eth_addr, mac, ETH_ALEN);
+
+       return f;
+}
+
 static int vxlan_fdb_create(struct vxlan_dev *vxlan,
+                           const u8 *mac, union vxlan_addr *ip,
+                           __u16 state, __be16 port, __be32 src_vni,
+                           __be32 vni, __u32 ifindex, __u8 ndm_flags,
+                           struct vxlan_fdb **fdb)
+{
+       struct vxlan_rdst *rd = NULL;
+       struct vxlan_fdb *f;
+       int rc;
+
+       if (vxlan->cfg.addrmax &&
+           vxlan->addrcnt >= vxlan->cfg.addrmax)
+               return -ENOSPC;
+
+       netdev_dbg(vxlan->dev, "add %pM -> %pIS\n", mac, ip);
+       f = vxlan_fdb_alloc(vxlan, mac, state, src_vni, ndm_flags);
+       if (!f)
+               return -ENOMEM;
+
+       rc = vxlan_fdb_append(f, ip, port, vni, ifindex, &rd);
+       if (rc < 0) {
+               kfree(f);
+               return rc;
+       }
+
+       ++vxlan->addrcnt;
+       hlist_add_head_rcu(&f->hlist,
+                          vxlan_fdb_head(vxlan, mac, src_vni));
+
+       *fdb = f;
+
+       return 0;
+}
+
+/* Add new entry to forwarding table -- assumes lock held */
+static int vxlan_fdb_update(struct vxlan_dev *vxlan,
                            const u8 *mac, union vxlan_addr *ip,
                            __u16 state, __u16 flags,
                            __be16 port, __be32 src_vni, __be32 vni,
@@ -689,37 +740,17 @@ static int vxlan_fdb_create(struct vxlan_dev *vxlan,
                if (!(flags & NLM_F_CREATE))
                        return -ENOENT;
 
-               if (vxlan->cfg.addrmax &&
-                   vxlan->addrcnt >= vxlan->cfg.addrmax)
-                       return -ENOSPC;
-
                /* Disallow replace to add a multicast entry */
                if ((flags & NLM_F_REPLACE) &&
                    (is_multicast_ether_addr(mac) || is_zero_ether_addr(mac)))
                        return -EOPNOTSUPP;
 
                netdev_dbg(vxlan->dev, "add %pM -> %pIS\n", mac, ip);
-               f = kmalloc(sizeof(*f), GFP_ATOMIC);
-               if (!f)
-                       return -ENOMEM;
-
-               notify = 1;
-               f->state = state;
-               f->flags = ndm_flags;
-               f->updated = f->used = jiffies;
-               f->vni = src_vni;
-               INIT_LIST_HEAD(&f->remotes);
-               memcpy(f->eth_addr, mac, ETH_ALEN);
-
-               rc = vxlan_fdb_append(f, ip, port, vni, ifindex, &rd);
-               if (rc < 0) {
-                       kfree(f);
+               rc = vxlan_fdb_create(vxlan, mac, ip, state, port, src_vni,
+                                     vni, ifindex, ndm_flags, &f);
+               if (rc < 0)
                        return rc;
-               }
-
-               ++vxlan->addrcnt;
-               hlist_add_head_rcu(&f->hlist,
-                                  vxlan_fdb_head(vxlan, mac, src_vni));
+               notify = 1;
        }
 
        if (notify) {
@@ -743,13 +774,15 @@ static void vxlan_fdb_free(struct rcu_head *head)
        kfree(f);
 }
 
-static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f)
+static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f,
+                             bool do_notify)
 {
        netdev_dbg(vxlan->dev,
                    "delete %pM\n", f->eth_addr);
 
        --vxlan->addrcnt;
-       vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_DELNEIGH);
+       if (do_notify)
+               vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_DELNEIGH);
 
        hlist_del_rcu(&f->hlist);
        call_rcu(&f->rcu, vxlan_fdb_free);
@@ -865,7 +898,7 @@ static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
                return -EAFNOSUPPORT;
 
        spin_lock_bh(&vxlan->hash_lock);
-       err = vxlan_fdb_create(vxlan, addr, &ip, ndm->ndm_state, flags,
+       err = vxlan_fdb_update(vxlan, addr, &ip, ndm->ndm_state, flags,
                               port, src_vni, vni, ifindex, ndm->ndm_flags);
        spin_unlock_bh(&vxlan->hash_lock);
 
@@ -899,7 +932,7 @@ static int __vxlan_fdb_delete(struct vxlan_dev *vxlan,
                goto out;
        }
 
-       vxlan_fdb_destroy(vxlan, f);
+       vxlan_fdb_destroy(vxlan, f, true);
 
 out:
        return 0;
@@ -1008,7 +1041,7 @@ static bool vxlan_snoop(struct net_device *dev,
 
                /* close off race between vxlan_flush and incoming packets */
                if (netif_running(dev))
-                       vxlan_fdb_create(vxlan, src_mac, src_ip,
+                       vxlan_fdb_update(vxlan, src_mac, src_ip,
                                         NUD_REACHABLE,
                                         NLM_F_EXCL|NLM_F_CREATE,
                                         vxlan->cfg.dst_port,
@@ -2366,7 +2399,7 @@ static void vxlan_cleanup(struct timer_list *t)
                                           "garbage collect %pM\n",
                                           f->eth_addr);
                                f->state = NUD_STALE;
-                               vxlan_fdb_destroy(vxlan, f);
+                               vxlan_fdb_destroy(vxlan, f, true);
                        } else if (time_before(timeout, next_timer))
                                next_timer = timeout;
                }
@@ -2417,7 +2450,7 @@ static void vxlan_fdb_delete_default(struct vxlan_dev *vxlan, __be32 vni)
        spin_lock_bh(&vxlan->hash_lock);
        f = __vxlan_find_mac(vxlan, all_zeros_mac, vni);
        if (f)
-               vxlan_fdb_destroy(vxlan, f);
+               vxlan_fdb_destroy(vxlan, f, true);
        spin_unlock_bh(&vxlan->hash_lock);
 }
 
@@ -2471,7 +2504,7 @@ static void vxlan_flush(struct vxlan_dev *vxlan, bool do_all)
                                continue;
                        /* the all_zeros_mac entry is deleted at vxlan_uninit */
                        if (!is_zero_ether_addr(f->eth_addr))
-                               vxlan_fdb_destroy(vxlan, f);
+                               vxlan_fdb_destroy(vxlan, f, true);
                }
        }
        spin_unlock_bh(&vxlan->hash_lock);
@@ -3162,6 +3195,7 @@ static int __vxlan_dev_create(struct net *net, struct net_device *dev,
 {
        struct vxlan_net *vn = net_generic(net, vxlan_net_id);
        struct vxlan_dev *vxlan = netdev_priv(dev);
+       struct vxlan_fdb *f = NULL;
        int err;
 
        err = vxlan_dev_configure(net, dev, conf, false, extack);
@@ -3175,24 +3209,35 @@ static int __vxlan_dev_create(struct net *net, struct net_device *dev,
                err = vxlan_fdb_create(vxlan, all_zeros_mac,
                                       &vxlan->default_dst.remote_ip,
                                       NUD_REACHABLE | NUD_PERMANENT,
-                                      NLM_F_EXCL | NLM_F_CREATE,
                                       vxlan->cfg.dst_port,
                                       vxlan->default_dst.remote_vni,
                                       vxlan->default_dst.remote_vni,
                                       vxlan->default_dst.remote_ifindex,
-                                      NTF_SELF);
+                                      NTF_SELF, &f);
                if (err)
                        return err;
        }
 
        err = register_netdevice(dev);
+       if (err)
+               goto errout;
+
+       err = rtnl_configure_link(dev, NULL);
        if (err) {
-               vxlan_fdb_delete_default(vxlan, vxlan->default_dst.remote_vni);
-               return err;
+               unregister_netdevice(dev);
+               goto errout;
        }
 
+       /* notify default fdb entry */
+       if (f)
+               vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_NEWNEIGH);
+
        list_add(&vxlan->next, &vn->vxlan_list);
        return 0;
+errout:
+       if (f)
+               vxlan_fdb_destroy(vxlan, f, false);
+       return err;
 }
 
 static int vxlan_nl2conf(struct nlattr *tb[], struct nlattr *data[],
@@ -3427,6 +3472,7 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
        struct vxlan_rdst *dst = &vxlan->default_dst;
        struct vxlan_rdst old_dst;
        struct vxlan_config conf;
+       struct vxlan_fdb *f = NULL;
        int err;
 
        err = vxlan_nl2conf(tb, data,
@@ -3455,16 +3501,16 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
                        err = vxlan_fdb_create(vxlan, all_zeros_mac,
                                               &dst->remote_ip,
                                               NUD_REACHABLE | NUD_PERMANENT,
-                                              NLM_F_CREATE | NLM_F_APPEND,
                                               vxlan->cfg.dst_port,
                                               dst->remote_vni,
                                               dst->remote_vni,
                                               dst->remote_ifindex,
-                                              NTF_SELF);
+                                              NTF_SELF, &f);
                        if (err) {
                                spin_unlock_bh(&vxlan->hash_lock);
                                return err;
                        }
+                       vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_NEWNEIGH);
                }
                spin_unlock_bh(&vxlan->hash_lock);
        }
index 90a4ad9a2d081eb582570476a41a55f71e911dd6..b3a1b6f5c40648f755df770b0288627587e2ff66 100644 (file)
@@ -1362,7 +1362,7 @@ static irqreturn_t lmc_interrupt (int irq, void *dev_instance) /*fold00*/
             case 0x001:
                 printk(KERN_WARNING "%s: Master Abort (naughty)\n", dev->name);
                 break;
-            case 0x010:
+            case 0x002:
                 printk(KERN_WARNING "%s: Target Abort (not so naughty)\n", dev->name);
                 break;
             default:
index e9c2fb318c03362d84031241a4191db9f4602c1a..836e0a47b94a0a192b210620d6652c41145cbab1 100644 (file)
@@ -6058,8 +6058,19 @@ static void ath10k_sta_rc_update_wk(struct work_struct *wk)
                           ath10k_mac_max_vht_nss(vht_mcs_mask)));
 
        if (changed & IEEE80211_RC_BW_CHANGED) {
-               ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM peer bw %d\n",
-                          sta->addr, bw);
+               enum wmi_phy_mode mode;
+
+               mode = chan_to_phymode(&def);
+               ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM peer bw %d phymode %d\n",
+                               sta->addr, bw, mode);
+
+               err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
+                               WMI_PEER_PHYMODE, mode);
+               if (err) {
+                       ath10k_warn(ar, "failed to update STA %pM peer phymode %d: %d\n",
+                                       sta->addr, mode, err);
+                       goto exit;
+               }
 
                err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
                                                WMI_PEER_CHAN_WIDTH, bw);
@@ -6100,6 +6111,7 @@ static void ath10k_sta_rc_update_wk(struct work_struct *wk)
                                    sta->addr);
        }
 
+exit:
        mutex_unlock(&ar->conf_mutex);
 }
 
index b48db54e986516271daab61f30277815a7a9bf6d..d68afb65402a069528b0dc3a01eab142c833898c 100644 (file)
@@ -6144,6 +6144,7 @@ enum wmi_peer_param {
        WMI_PEER_NSS        = 0x5,
        WMI_PEER_USE_4ADDR  = 0x6,
        WMI_PEER_DEBUG      = 0xa,
+       WMI_PEER_PHYMODE    = 0xd,
        WMI_PEER_DUMMY_VAR  = 0xff, /* dummy parameter for STA PS workaround */
 };
 
index 1279064a3b716c2ef6cf82d82c27ea664f1496b4..51a038022c8b80404b9bd841c6fefd3b866ffe66 100644 (file)
@@ -1,4 +1,4 @@
-/*
+/*
  * Copyright (c) 2018, The Linux Foundation. All rights reserved.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
index 9d99eb42d9176f0f833048b3f87a906542c9e90c..6acba67bca07abd7d662466b4422295dff359a33 100644 (file)
@@ -60,7 +60,6 @@ config BRCMFMAC_PCIE
        bool "PCIE bus interface support for FullMAC driver"
        depends on BRCMFMAC
        depends on PCI
-       depends on HAS_DMA
        select BRCMFMAC_PROTO_MSGBUF
        select FW_LOADER
        ---help---
index 45928b5b8d97c3217d8f0d13206c51e64f448a1c..4fffa6988087b8bca0d6c834bb62a75d4a422c02 100644 (file)
@@ -1785,7 +1785,8 @@ brcmf_pcie_prepare_fw_request(struct brcmf_pciedev_info *devinfo)
        fwreq->items[BRCMF_PCIE_FW_CODE].type = BRCMF_FW_TYPE_BINARY;
        fwreq->items[BRCMF_PCIE_FW_NVRAM].type = BRCMF_FW_TYPE_NVRAM;
        fwreq->items[BRCMF_PCIE_FW_NVRAM].flags = BRCMF_FW_REQF_OPTIONAL;
-       fwreq->domain_nr = pci_domain_nr(devinfo->pdev->bus);
+       /* NVRAM reserves PCI domain 0 for Broadcom's SDK faked bus */
+       fwreq->domain_nr = pci_domain_nr(devinfo->pdev->bus) + 1;
        fwreq->bus_nr = devinfo->pdev->bus->number;
 
        return fwreq;
index c99a191e8d693a3e6ef006826fcde5affb74a02d..a907d7b065fa8e0b7ab6a35dc2c265a6385d9c75 100644 (file)
@@ -4296,6 +4296,13 @@ void brcmf_sdio_remove(struct brcmf_sdio *bus)
        brcmf_dbg(TRACE, "Enter\n");
 
        if (bus) {
+               /* Stop watchdog task */
+               if (bus->watchdog_tsk) {
+                       send_sig(SIGTERM, bus->watchdog_tsk, 1);
+                       kthread_stop(bus->watchdog_tsk);
+                       bus->watchdog_tsk = NULL;
+               }
+
                /* De-register interrupt handler */
                brcmf_sdiod_intr_unregister(bus->sdiodev);
 
index e20c30b29c03faa4dd8e80a6ef8f43c72d878fa4..c8ea63d02619c345a8260a316665a40f03ac391c 100644 (file)
@@ -178,6 +178,17 @@ const struct iwl_cfg iwl9260_2ac_cfg = {
        .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
 };
 
+const struct iwl_cfg iwl9260_killer_2ac_cfg = {
+       .name = "Killer (R) Wireless-AC 1550 Wireless Network Adapter (9260NGW)",
+       .fw_name_pre = IWL9260A_FW_PRE,
+       .fw_name_pre_b_or_c_step = IWL9260B_FW_PRE,
+       IWL_DEVICE_9000,
+       .ht_params = &iwl9000_ht_params,
+       .nvm_ver = IWL9000_NVM_VERSION,
+       .nvm_calib_ver = IWL9000_TX_POWER_VERSION,
+       .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+};
+
 const struct iwl_cfg iwl9270_2ac_cfg = {
        .name = "Intel(R) Dual Band Wireless AC 9270",
        .fw_name_pre = IWL9260A_FW_PRE,
@@ -267,6 +278,34 @@ const struct iwl_cfg iwl9560_2ac_cfg_soc = {
        .soc_latency = 5000,
 };
 
+const struct iwl_cfg iwl9560_killer_2ac_cfg_soc = {
+       .name = "Killer (R) Wireless-AC 1550i Wireless Network Adapter (9560NGW)",
+       .fw_name_pre = IWL9000A_FW_PRE,
+       .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
+       .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
+       IWL_DEVICE_9000,
+       .ht_params = &iwl9000_ht_params,
+       .nvm_ver = IWL9000_NVM_VERSION,
+       .nvm_calib_ver = IWL9000_TX_POWER_VERSION,
+       .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+       .integrated = true,
+       .soc_latency = 5000,
+};
+
+const struct iwl_cfg iwl9560_killer_s_2ac_cfg_soc = {
+       .name = "Killer (R) Wireless-AC 1550s Wireless Network Adapter (9560NGW)",
+       .fw_name_pre = IWL9000A_FW_PRE,
+       .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
+       .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
+       IWL_DEVICE_9000,
+       .ht_params = &iwl9000_ht_params,
+       .nvm_ver = IWL9000_NVM_VERSION,
+       .nvm_calib_ver = IWL9000_TX_POWER_VERSION,
+       .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+       .integrated = true,
+       .soc_latency = 5000,
+};
+
 const struct iwl_cfg iwl9460_2ac_cfg_shared_clk = {
        .name = "Intel(R) Dual Band Wireless AC 9460",
        .fw_name_pre = IWL9000A_FW_PRE,
@@ -327,6 +366,36 @@ const struct iwl_cfg iwl9560_2ac_cfg_shared_clk = {
        .extra_phy_cfg_flags = FW_PHY_CFG_SHARED_CLK
 };
 
+const struct iwl_cfg iwl9560_killer_2ac_cfg_shared_clk = {
+       .name = "Killer (R) Wireless-AC 1550i Wireless Network Adapter (9560NGW)",
+       .fw_name_pre = IWL9000A_FW_PRE,
+       .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
+       .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
+       IWL_DEVICE_9000,
+       .ht_params = &iwl9000_ht_params,
+       .nvm_ver = IWL9000_NVM_VERSION,
+       .nvm_calib_ver = IWL9000_TX_POWER_VERSION,
+       .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+       .integrated = true,
+       .soc_latency = 5000,
+       .extra_phy_cfg_flags = FW_PHY_CFG_SHARED_CLK
+};
+
+const struct iwl_cfg iwl9560_killer_s_2ac_cfg_shared_clk = {
+       .name = "Killer (R) Wireless-AC 1550s Wireless Network Adapter (9560NGW)",
+       .fw_name_pre = IWL9000A_FW_PRE,
+       .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
+       .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
+       IWL_DEVICE_9000,
+       .ht_params = &iwl9000_ht_params,
+       .nvm_ver = IWL9000_NVM_VERSION,
+       .nvm_calib_ver = IWL9000_TX_POWER_VERSION,
+       .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+       .integrated = true,
+       .soc_latency = 5000,
+       .extra_phy_cfg_flags = FW_PHY_CFG_SHARED_CLK
+};
+
 MODULE_FIRMWARE(IWL9000A_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
 MODULE_FIRMWARE(IWL9000B_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
 MODULE_FIRMWARE(IWL9000RFB_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
index c503b26793f6dd77fb63da69c7a838c059eb12be..84a81680972351180bfc1e48f361ede22050280b 100644 (file)
@@ -551,6 +551,7 @@ extern const struct iwl_cfg iwl8275_2ac_cfg;
 extern const struct iwl_cfg iwl4165_2ac_cfg;
 extern const struct iwl_cfg iwl9160_2ac_cfg;
 extern const struct iwl_cfg iwl9260_2ac_cfg;
+extern const struct iwl_cfg iwl9260_killer_2ac_cfg;
 extern const struct iwl_cfg iwl9270_2ac_cfg;
 extern const struct iwl_cfg iwl9460_2ac_cfg;
 extern const struct iwl_cfg iwl9560_2ac_cfg;
@@ -558,10 +559,14 @@ extern const struct iwl_cfg iwl9460_2ac_cfg_soc;
 extern const struct iwl_cfg iwl9461_2ac_cfg_soc;
 extern const struct iwl_cfg iwl9462_2ac_cfg_soc;
 extern const struct iwl_cfg iwl9560_2ac_cfg_soc;
+extern const struct iwl_cfg iwl9560_killer_2ac_cfg_soc;
+extern const struct iwl_cfg iwl9560_killer_s_2ac_cfg_soc;
 extern const struct iwl_cfg iwl9460_2ac_cfg_shared_clk;
 extern const struct iwl_cfg iwl9461_2ac_cfg_shared_clk;
 extern const struct iwl_cfg iwl9462_2ac_cfg_shared_clk;
 extern const struct iwl_cfg iwl9560_2ac_cfg_shared_clk;
+extern const struct iwl_cfg iwl9560_killer_2ac_cfg_shared_clk;
+extern const struct iwl_cfg iwl9560_killer_s_2ac_cfg_shared_clk;
 extern const struct iwl_cfg iwl22000_2ac_cfg_hr;
 extern const struct iwl_cfg iwl22000_2ac_cfg_hr_cdb;
 extern const struct iwl_cfg iwl22000_2ac_cfg_jf;
index 38234bda901783c8fe533cb21f862f800910f53a..8520523b91b40b5a0f6a00b0fe02c8ba33254aa4 100644 (file)
@@ -545,6 +545,9 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
        {IWL_PCI_DEVICE(0x2526, 0x1210, iwl9260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x2526, 0x1410, iwl9270_2ac_cfg)},
        {IWL_PCI_DEVICE(0x2526, 0x1420, iwl9460_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x2526, 0x1550, iwl9260_killer_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x2526, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x2526, 0x1552, iwl9560_killer_2ac_cfg_soc)},
        {IWL_PCI_DEVICE(0x2526, 0x1610, iwl9270_2ac_cfg)},
        {IWL_PCI_DEVICE(0x2526, 0x2030, iwl9560_2ac_cfg_soc)},
        {IWL_PCI_DEVICE(0x2526, 0x2034, iwl9560_2ac_cfg_soc)},
@@ -554,6 +557,7 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
        {IWL_PCI_DEVICE(0x2526, 0x40A4, iwl9460_2ac_cfg)},
        {IWL_PCI_DEVICE(0x2526, 0x4234, iwl9560_2ac_cfg_soc)},
        {IWL_PCI_DEVICE(0x2526, 0x42A4, iwl9462_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x2526, 0x8014, iwl9260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x2526, 0xA014, iwl9260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x271B, 0x0010, iwl9160_2ac_cfg)},
        {IWL_PCI_DEVICE(0x271B, 0x0014, iwl9160_2ac_cfg)},
@@ -578,6 +582,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
        {IWL_PCI_DEVICE(0x2720, 0x1010, iwl9260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x2720, 0x1030, iwl9560_2ac_cfg_soc)},
        {IWL_PCI_DEVICE(0x2720, 0x1210, iwl9260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x2720, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x2720, 0x1552, iwl9560_killer_2ac_cfg_soc)},
        {IWL_PCI_DEVICE(0x2720, 0x2030, iwl9560_2ac_cfg_soc)},
        {IWL_PCI_DEVICE(0x2720, 0x2034, iwl9560_2ac_cfg_soc)},
        {IWL_PCI_DEVICE(0x2720, 0x4030, iwl9560_2ac_cfg)},
@@ -604,6 +610,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
        {IWL_PCI_DEVICE(0x30DC, 0x1010, iwl9260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x30DC, 0x1030, iwl9560_2ac_cfg_soc)},
        {IWL_PCI_DEVICE(0x30DC, 0x1210, iwl9260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x30DC, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x30DC, 0x1552, iwl9560_killer_2ac_cfg_soc)},
        {IWL_PCI_DEVICE(0x30DC, 0x2030, iwl9560_2ac_cfg_soc)},
        {IWL_PCI_DEVICE(0x30DC, 0x2034, iwl9560_2ac_cfg_soc)},
        {IWL_PCI_DEVICE(0x30DC, 0x4030, iwl9560_2ac_cfg_soc)},
@@ -630,6 +638,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
        {IWL_PCI_DEVICE(0x31DC, 0x1010, iwl9260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x31DC, 0x1030, iwl9560_2ac_cfg_shared_clk)},
        {IWL_PCI_DEVICE(0x31DC, 0x1210, iwl9260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x31DC, 0x1551, iwl9560_killer_s_2ac_cfg_shared_clk)},
+       {IWL_PCI_DEVICE(0x31DC, 0x1552, iwl9560_killer_2ac_cfg_shared_clk)},
        {IWL_PCI_DEVICE(0x31DC, 0x2030, iwl9560_2ac_cfg_shared_clk)},
        {IWL_PCI_DEVICE(0x31DC, 0x2034, iwl9560_2ac_cfg_shared_clk)},
        {IWL_PCI_DEVICE(0x31DC, 0x4030, iwl9560_2ac_cfg_shared_clk)},
@@ -656,6 +666,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
        {IWL_PCI_DEVICE(0x34F0, 0x1010, iwl9260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x34F0, 0x1030, iwl9560_2ac_cfg_soc)},
        {IWL_PCI_DEVICE(0x34F0, 0x1210, iwl9260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x34F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x34F0, 0x1552, iwl9560_killer_2ac_cfg_soc)},
        {IWL_PCI_DEVICE(0x34F0, 0x2030, iwl9560_2ac_cfg_soc)},
        {IWL_PCI_DEVICE(0x34F0, 0x2034, iwl9560_2ac_cfg_soc)},
        {IWL_PCI_DEVICE(0x34F0, 0x4030, iwl9560_2ac_cfg_soc)},
@@ -682,6 +694,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
        {IWL_PCI_DEVICE(0x3DF0, 0x1010, iwl9260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x3DF0, 0x1030, iwl9560_2ac_cfg_soc)},
        {IWL_PCI_DEVICE(0x3DF0, 0x1210, iwl9260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x3DF0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x3DF0, 0x1552, iwl9560_killer_2ac_cfg_soc)},
        {IWL_PCI_DEVICE(0x3DF0, 0x2030, iwl9560_2ac_cfg_soc)},
        {IWL_PCI_DEVICE(0x3DF0, 0x2034, iwl9560_2ac_cfg_soc)},
        {IWL_PCI_DEVICE(0x3DF0, 0x4030, iwl9560_2ac_cfg_soc)},
@@ -708,6 +722,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
        {IWL_PCI_DEVICE(0x43F0, 0x1010, iwl9260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x43F0, 0x1030, iwl9560_2ac_cfg_soc)},
        {IWL_PCI_DEVICE(0x43F0, 0x1210, iwl9260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x43F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x43F0, 0x1552, iwl9560_killer_2ac_cfg_soc)},
        {IWL_PCI_DEVICE(0x43F0, 0x2030, iwl9560_2ac_cfg_soc)},
        {IWL_PCI_DEVICE(0x43F0, 0x2034, iwl9560_2ac_cfg_soc)},
        {IWL_PCI_DEVICE(0x43F0, 0x4030, iwl9560_2ac_cfg_soc)},
@@ -743,6 +759,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
        {IWL_PCI_DEVICE(0x9DF0, 0x1010, iwl9260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x9DF0, 0x1030, iwl9560_2ac_cfg_soc)},
        {IWL_PCI_DEVICE(0x9DF0, 0x1210, iwl9260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x9DF0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x9DF0, 0x1552, iwl9560_killer_2ac_cfg_soc)},
        {IWL_PCI_DEVICE(0x9DF0, 0x2010, iwl9460_2ac_cfg_soc)},
        {IWL_PCI_DEVICE(0x9DF0, 0x2030, iwl9560_2ac_cfg_soc)},
        {IWL_PCI_DEVICE(0x9DF0, 0x2034, iwl9560_2ac_cfg_soc)},
@@ -771,6 +789,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
        {IWL_PCI_DEVICE(0xA0F0, 0x1010, iwl9260_2ac_cfg)},
        {IWL_PCI_DEVICE(0xA0F0, 0x1030, iwl9560_2ac_cfg_soc)},
        {IWL_PCI_DEVICE(0xA0F0, 0x1210, iwl9260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0xA0F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0xA0F0, 0x1552, iwl9560_killer_2ac_cfg_soc)},
        {IWL_PCI_DEVICE(0xA0F0, 0x2030, iwl9560_2ac_cfg_soc)},
        {IWL_PCI_DEVICE(0xA0F0, 0x2034, iwl9560_2ac_cfg_soc)},
        {IWL_PCI_DEVICE(0xA0F0, 0x4030, iwl9560_2ac_cfg_soc)},
@@ -797,6 +817,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
        {IWL_PCI_DEVICE(0xA370, 0x1010, iwl9260_2ac_cfg)},
        {IWL_PCI_DEVICE(0xA370, 0x1030, iwl9560_2ac_cfg_soc)},
        {IWL_PCI_DEVICE(0xA370, 0x1210, iwl9260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0xA370, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0xA370, 0x1552, iwl9560_killer_2ac_cfg_soc)},
        {IWL_PCI_DEVICE(0xA370, 0x2030, iwl9560_2ac_cfg_soc)},
        {IWL_PCI_DEVICE(0xA370, 0x2034, iwl9560_2ac_cfg_soc)},
        {IWL_PCI_DEVICE(0xA370, 0x4030, iwl9560_2ac_cfg_soc)},
index 6e3cf9817730b53f751f31401425ffd1c1dc82a4..88f4c89f89ba85f5ff64085f8f29abf20722d5ea 100644 (file)
@@ -644,11 +644,6 @@ static void mwifiex_usb_disconnect(struct usb_interface *intf)
                                         MWIFIEX_FUNC_SHUTDOWN);
        }
 
-       if (adapter->workqueue)
-               flush_workqueue(adapter->workqueue);
-
-       mwifiex_usb_free(card);
-
        mwifiex_dbg(adapter, FATAL,
                    "%s: removing card\n", __func__);
        mwifiex_remove_card(adapter);
@@ -1356,6 +1351,8 @@ static void mwifiex_unregister_dev(struct mwifiex_adapter *adapter)
 {
        struct usb_card_rec *card = (struct usb_card_rec *)adapter->card;
 
+       mwifiex_usb_free(card);
+
        mwifiex_usb_cleanup_tx_aggr(adapter);
 
        card->adapter = NULL;
index 9d2f9a776ef18e405c80e73c40c9eb3bc137b50d..b804abd464ae06365adbe108a5706412efe53f4b 100644 (file)
@@ -986,13 +986,15 @@ static void mt7601u_agc_tune(struct mt7601u_dev *dev)
         */
        spin_lock_bh(&dev->con_mon_lock);
        avg_rssi = ewma_rssi_read(&dev->avg_rssi);
-       WARN_ON_ONCE(avg_rssi == 0);
+       spin_unlock_bh(&dev->con_mon_lock);
+       if (avg_rssi == 0)
+               return;
+
        avg_rssi = -avg_rssi;
        if (avg_rssi <= -70)
                val -= 0x20;
        else if (avg_rssi <= -60)
                val -= 0x10;
-       spin_unlock_bh(&dev->con_mon_lock);
 
        if (val != mt7601u_bbp_rr(dev, 66))
                mt7601u_bbp_wr(dev, 66, val);
index 025fa6018550895ae529c7222d9595a1fb621748..8d1492a90bd135c09213f05d52ff85682a80de71 100644 (file)
@@ -7,7 +7,7 @@ config QTNFMAC
 config QTNFMAC_PEARL_PCIE
        tristate "Quantenna QSR10g PCIe support"
        default n
-       depends on HAS_DMA && PCI && CFG80211
+       depends on PCI && CFG80211
        select QTNFMAC
        select FW_LOADER
        select CRC32
index 220e2b71020859163cc4affc71f505648561b151..ae0ca800684950e65ecc01916f4782af54a8e0eb 100644 (file)
@@ -654,8 +654,7 @@ qtnf_disconnect(struct wiphy *wiphy, struct net_device *dev,
        vif = qtnf_mac_get_base_vif(mac);
        if (!vif) {
                pr_err("MAC%u: primary VIF is not configured\n", mac->macid);
-               ret = -EFAULT;
-               goto out;
+               return -EFAULT;
        }
 
        if (vif->wdev.iftype != NL80211_IFTYPE_STATION) {
index 39c817eddd78e9cf736fbbd440c6617867afad20..54c9f6ab0c8cadb483d10413783b45b401c6f6f4 100644 (file)
@@ -484,18 +484,21 @@ static void _rtl_init_deferred_work(struct ieee80211_hw *hw)
 
 }
 
-void rtl_deinit_deferred_work(struct ieee80211_hw *hw)
+void rtl_deinit_deferred_work(struct ieee80211_hw *hw, bool ips_wq)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
 
        del_timer_sync(&rtlpriv->works.watchdog_timer);
 
-       cancel_delayed_work(&rtlpriv->works.watchdog_wq);
-       cancel_delayed_work(&rtlpriv->works.ips_nic_off_wq);
-       cancel_delayed_work(&rtlpriv->works.ps_work);
-       cancel_delayed_work(&rtlpriv->works.ps_rfon_wq);
-       cancel_delayed_work(&rtlpriv->works.fwevt_wq);
-       cancel_delayed_work(&rtlpriv->works.c2hcmd_wq);
+       cancel_delayed_work_sync(&rtlpriv->works.watchdog_wq);
+       if (ips_wq)
+               cancel_delayed_work(&rtlpriv->works.ips_nic_off_wq);
+       else
+               cancel_delayed_work_sync(&rtlpriv->works.ips_nic_off_wq);
+       cancel_delayed_work_sync(&rtlpriv->works.ps_work);
+       cancel_delayed_work_sync(&rtlpriv->works.ps_rfon_wq);
+       cancel_delayed_work_sync(&rtlpriv->works.fwevt_wq);
+       cancel_delayed_work_sync(&rtlpriv->works.c2hcmd_wq);
 }
 EXPORT_SYMBOL_GPL(rtl_deinit_deferred_work);
 
index 912f205779c39e68387269825fdae5c4fcaa206f..a7ae40eaa3cd538f96622e4e9a53da3c2b13ccec 100644 (file)
@@ -121,7 +121,7 @@ void rtl_init_rfkill(struct ieee80211_hw *hw);
 void rtl_deinit_rfkill(struct ieee80211_hw *hw);
 
 void rtl_watch_dog_timer_callback(struct timer_list *t);
-void rtl_deinit_deferred_work(struct ieee80211_hw *hw);
+void rtl_deinit_deferred_work(struct ieee80211_hw *hw, bool ips_wq);
 
 bool rtl_action_proc(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx);
 int rtlwifi_rate_mapping(struct ieee80211_hw *hw, bool isht,
index cfea57efa7f43c6bc1c6e99a45978053a1eed2ba..4bf7967590ca7be3b9b452d0b0ec65d98b829599 100644 (file)
@@ -130,7 +130,6 @@ found_alt:
                       firmware->size);
                rtlpriv->rtlhal.wowlan_fwsize = firmware->size;
        }
-       rtlpriv->rtlhal.fwsize = firmware->size;
        release_firmware(firmware);
 }
 
@@ -196,7 +195,7 @@ static void rtl_op_stop(struct ieee80211_hw *hw)
                /* reset sec info */
                rtl_cam_reset_sec_info(hw);
 
-               rtl_deinit_deferred_work(hw);
+               rtl_deinit_deferred_work(hw, false);
        }
        rtlpriv->intf_ops->adapter_stop(hw);
 
index ae13bcfb3bf09cc142a81c9ab78c31ad80fd035e..5d1fda16fc8c4c966ff8e24ca8d49bd3b6ca38c6 100644 (file)
@@ -2377,7 +2377,7 @@ void rtl_pci_disconnect(struct pci_dev *pdev)
                ieee80211_unregister_hw(hw);
                rtlmac->mac80211_registered = 0;
        } else {
-               rtl_deinit_deferred_work(hw);
+               rtl_deinit_deferred_work(hw, false);
                rtlpriv->intf_ops->adapter_stop(hw);
        }
        rtlpriv->cfg->ops->disable_interrupt(hw);
index 71af24e2e05197a344dd549c94d6f8ecba7ce00f..479a4cfc245d349e105457845719ca79b7ebb10d 100644 (file)
@@ -71,7 +71,7 @@ bool rtl_ps_disable_nic(struct ieee80211_hw *hw)
        struct rtl_priv *rtlpriv = rtl_priv(hw);
 
        /*<1> Stop all timer */
-       rtl_deinit_deferred_work(hw);
+       rtl_deinit_deferred_work(hw, true);
 
        /*<2> Disable Interrupt */
        rtlpriv->cfg->ops->disable_interrupt(hw);
@@ -292,7 +292,7 @@ void rtl_ips_nic_on(struct ieee80211_hw *hw)
        struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
        enum rf_pwrstate rtstate;
 
-       cancel_delayed_work(&rtlpriv->works.ips_nic_off_wq);
+       cancel_delayed_work_sync(&rtlpriv->works.ips_nic_off_wq);
 
        mutex_lock(&rtlpriv->locks.ips_mutex);
        if (ppsc->inactiveps) {
index f9faffc498bcbd2d94cad365814955f9b1347759..2ac5004d7a401ab5d1255126c5c0a00a5e233705 100644 (file)
@@ -1132,7 +1132,7 @@ void rtl_usb_disconnect(struct usb_interface *intf)
                ieee80211_unregister_hw(hw);
                rtlmac->mac80211_registered = 0;
        } else {
-               rtl_deinit_deferred_work(hw);
+               rtl_deinit_deferred_work(hw, false);
                rtlpriv->intf_ops->adapter_stop(hw);
        }
        /*deinit rfkill */
index 922ce0abf5cf105a5394285b07356ebcad055d78..9dd2ca62d84af79b82fb59111d471b09658cfe57 100644 (file)
@@ -87,6 +87,7 @@ struct netfront_cb {
 /* IRQ name is queue name with "-tx" or "-rx" appended */
 #define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3)
 
+static DECLARE_WAIT_QUEUE_HEAD(module_load_q);
 static DECLARE_WAIT_QUEUE_HEAD(module_unload_q);
 
 struct netfront_stats {
@@ -893,7 +894,6 @@ static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
                                  struct sk_buff *skb,
                                  struct sk_buff_head *list)
 {
-       struct skb_shared_info *shinfo = skb_shinfo(skb);
        RING_IDX cons = queue->rx.rsp_cons;
        struct sk_buff *nskb;
 
@@ -902,15 +902,16 @@ static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
                        RING_GET_RESPONSE(&queue->rx, ++cons);
                skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0];
 
-               if (shinfo->nr_frags == MAX_SKB_FRAGS) {
+               if (skb_shinfo(skb)->nr_frags == MAX_SKB_FRAGS) {
                        unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
 
                        BUG_ON(pull_to <= skb_headlen(skb));
                        __pskb_pull_tail(skb, pull_to - skb_headlen(skb));
                }
-               BUG_ON(shinfo->nr_frags >= MAX_SKB_FRAGS);
+               BUG_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS);
 
-               skb_add_rx_frag(skb, shinfo->nr_frags, skb_frag_page(nfrag),
+               skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+                               skb_frag_page(nfrag),
                                rx->offset, rx->status, PAGE_SIZE);
 
                skb_shinfo(nskb)->nr_frags = 0;
@@ -1330,6 +1331,11 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)
        netif_carrier_off(netdev);
 
        xenbus_switch_state(dev, XenbusStateInitialising);
+       wait_event(module_load_q,
+                          xenbus_read_driver_state(dev->otherend) !=
+                          XenbusStateClosed &&
+                          xenbus_read_driver_state(dev->otherend) !=
+                          XenbusStateUnknown);
        return netdev;
 
  exit:
@@ -1810,7 +1816,7 @@ static int talk_to_netback(struct xenbus_device *dev,
        err = xen_net_read_mac(dev, info->netdev->dev_addr);
        if (err) {
                xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename);
-               goto out;
+               goto out_unlocked;
        }
 
        rtnl_lock();
@@ -1925,6 +1931,7 @@ abort_transaction_no_dev_fatal:
        xennet_destroy_queues(info);
  out:
        rtnl_unlock();
+out_unlocked:
        device_unregister(&dev->dev);
        return err;
 }
@@ -1950,10 +1957,6 @@ static int xennet_connect(struct net_device *dev)
        /* talk_to_netback() sets the correct number of queues */
        num_queues = dev->real_num_tx_queues;
 
-       rtnl_lock();
-       netdev_update_features(dev);
-       rtnl_unlock();
-
        if (dev->reg_state == NETREG_UNINITIALIZED) {
                err = register_netdev(dev);
                if (err) {
@@ -1963,6 +1966,10 @@ static int xennet_connect(struct net_device *dev)
                }
        }
 
+       rtnl_lock();
+       netdev_update_features(dev);
+       rtnl_unlock();
+
        /*
         * All public and private state should now be sane.  Get
         * ready to start sending and receiving packets and give the driver
index d5553c47014fade81a4f461903b3cb6c4372ccf5..5d823e965883b0f5f23db5ab39afc9f96a128267 100644 (file)
@@ -74,7 +74,7 @@ static void pn533_recv_response(struct urb *urb)
        struct sk_buff *skb = NULL;
 
        if (!urb->status) {
-               skb = alloc_skb(urb->actual_length, GFP_KERNEL);
+               skb = alloc_skb(urb->actual_length, GFP_ATOMIC);
                if (!skb) {
                        nfc_err(&phy->udev->dev, "failed to alloc memory\n");
                } else {
@@ -186,7 +186,7 @@ static int pn533_usb_send_frame(struct pn533 *dev,
 
        if (dev->protocol_type == PN533_PROTO_REQ_RESP) {
                /* request for response for sent packet directly */
-               rc = pn533_submit_urb_for_response(phy, GFP_ATOMIC);
+               rc = pn533_submit_urb_for_response(phy, GFP_KERNEL);
                if (rc)
                        goto error;
        } else if (dev->protocol_type == PN533_PROTO_REQ_ACK_RESP) {
index a59b6c4bb5b89444139ae3907deb73c9947147d0..ad3d17c42e23ccef936430210b36e9e649034e01 100644 (file)
@@ -5,6 +5,7 @@
 // Copyright (C) 2017 Finn Thain
 
 #include <linux/device.h>
+#include <linux/dma-mapping.h>
 #include <linux/list.h>
 #include <linux/nubus.h>
 #include <linux/seq_file.h>
@@ -93,6 +94,8 @@ int nubus_device_register(struct nubus_board *board)
        board->dev.release = nubus_device_release;
        board->dev.bus = &nubus_bus_type;
        dev_set_name(&board->dev, "slot.%X", board->slot);
+       board->dev.dma_mask = &board->dev.coherent_dma_mask;
+       dma_set_mask(&board->dev, DMA_BIT_MASK(32));
        return device_register(&board->dev);
 }
 
index 2e96b34bc936bf89f6a9a65d983e4bbf3a673fbd..fb667bf469c7e980411c2836d4a9b97d1f96a53d 100644 (file)
@@ -278,6 +278,7 @@ static int nsio_rw_bytes(struct nd_namespace_common *ndns,
                        return -EIO;
                if (memcpy_mcsafe(buf, nsio->addr + offset, size) != 0)
                        return -EIO;
+               return 0;
        }
 
        if (unlikely(is_bad_pmem(&nsio->bb, sector, sz_align))) {
index 68940356cad3f100f4cfbdd325d42235ea3c5da4..8b1fd7f1a224eedebf08cddfe2258949c50a6bcf 100644 (file)
@@ -414,7 +414,8 @@ static int pmem_attach_disk(struct device *dev,
        blk_queue_logical_block_size(q, pmem_sector_size(ndns));
        blk_queue_max_hw_sectors(q, UINT_MAX);
        blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
-       blk_queue_flag_set(QUEUE_FLAG_DAX, q);
+       if (pmem->pfn_flags & PFN_MAP)
+               blk_queue_flag_set(QUEUE_FLAG_DAX, q);
        q->queuedata = pmem;
 
        disk = alloc_disk_node(0, nid);
index 21710a7460c823bbc4f84134d7ecce70d3f993ba..bf65501e6ed634a9e60c8c142955a16a60630159 100644 (file)
@@ -100,6 +100,22 @@ static struct class *nvme_subsys_class;
 static void nvme_ns_remove(struct nvme_ns *ns);
 static int nvme_revalidate_disk(struct gendisk *disk);
 static void nvme_put_subsystem(struct nvme_subsystem *subsys);
+static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl,
+                                          unsigned nsid);
+
+static void nvme_set_queue_dying(struct nvme_ns *ns)
+{
+       /*
+        * Revalidating a dead namespace sets capacity to 0. This will end
+        * buffered writers dirtying pages that can't be synced.
+        */
+       if (!ns->disk || test_and_set_bit(NVME_NS_DEAD, &ns->flags))
+               return;
+       revalidate_disk(ns->disk);
+       blk_set_queue_dying(ns->queue);
+       /* Forcibly unquiesce queues to avoid blocking dispatch */
+       blk_mq_unquiesce_queue(ns->queue);
+}
 
 static void nvme_queue_scan(struct nvme_ctrl *ctrl)
 {
@@ -1044,14 +1060,17 @@ EXPORT_SYMBOL_GPL(nvme_set_queue_count);
 
 static void nvme_enable_aen(struct nvme_ctrl *ctrl)
 {
-       u32 result;
+       u32 result, supported_aens = ctrl->oaes & NVME_AEN_SUPPORTED;
        int status;
 
-       status = nvme_set_features(ctrl, NVME_FEAT_ASYNC_EVENT,
-                       ctrl->oaes & NVME_AEN_SUPPORTED, NULL, 0, &result);
+       if (!supported_aens)
+               return;
+
+       status = nvme_set_features(ctrl, NVME_FEAT_ASYNC_EVENT, supported_aens,
+                       NULL, 0, &result);
        if (status)
                dev_warn(ctrl->device, "Failed to configure AEN (cfg %x)\n",
-                        ctrl->oaes & NVME_AEN_SUPPORTED);
+                        supported_aens);
 }
 
 static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
@@ -1151,19 +1170,15 @@ static u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
 
 static void nvme_update_formats(struct nvme_ctrl *ctrl)
 {
-       struct nvme_ns *ns, *next;
-       LIST_HEAD(rm_list);
+       struct nvme_ns *ns;
 
-       down_write(&ctrl->namespaces_rwsem);
-       list_for_each_entry(ns, &ctrl->namespaces, list) {
-               if (ns->disk && nvme_revalidate_disk(ns->disk)) {
-                       list_move_tail(&ns->list, &rm_list);
-               }
-       }
-       up_write(&ctrl->namespaces_rwsem);
+       down_read(&ctrl->namespaces_rwsem);
+       list_for_each_entry(ns, &ctrl->namespaces, list)
+               if (ns->disk && nvme_revalidate_disk(ns->disk))
+                       nvme_set_queue_dying(ns);
+       up_read(&ctrl->namespaces_rwsem);
 
-       list_for_each_entry_safe(ns, next, &rm_list, list)
-               nvme_ns_remove(ns);
+       nvme_remove_invalid_namespaces(ctrl, NVME_NSID_ALL);
 }
 
 static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects)
@@ -1218,7 +1233,7 @@ static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
        effects = nvme_passthru_start(ctrl, ns, cmd.opcode);
        status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
                        (void __user *)(uintptr_t)cmd.addr, cmd.data_len,
-                       (void __user *)(uintptr_t)cmd.metadata, cmd.metadata,
+                       (void __user *)(uintptr_t)cmd.metadata, cmd.metadata_len,
                        0, &cmd.result, timeout);
        nvme_passthru_end(ctrl, effects);
 
@@ -1808,6 +1823,7 @@ static void nvme_set_queue_limits(struct nvme_ctrl *ctrl,
                u32 max_segments =
                        (ctrl->max_hw_sectors / (ctrl->page_size >> 9)) + 1;
 
+               max_segments = min_not_zero(max_segments, ctrl->max_segments);
                blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors);
                blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX));
        }
@@ -3137,7 +3153,7 @@ static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl,
 
        down_write(&ctrl->namespaces_rwsem);
        list_for_each_entry_safe(ns, next, &ctrl->namespaces, list) {
-               if (ns->head->ns_id > nsid)
+               if (ns->head->ns_id > nsid || test_bit(NVME_NS_DEAD, &ns->flags))
                        list_move_tail(&ns->list, &rm_list);
        }
        up_write(&ctrl->namespaces_rwsem);
@@ -3541,19 +3557,9 @@ void nvme_kill_queues(struct nvme_ctrl *ctrl)
        if (ctrl->admin_q)
                blk_mq_unquiesce_queue(ctrl->admin_q);
 
-       list_for_each_entry(ns, &ctrl->namespaces, list) {
-               /*
-                * Revalidating a dead namespace sets capacity to 0. This will
-                * end buffered writers dirtying pages that can't be synced.
-                */
-               if (!ns->disk || test_and_set_bit(NVME_NS_DEAD, &ns->flags))
-                       continue;
-               revalidate_disk(ns->disk);
-               blk_set_queue_dying(ns->queue);
+       list_for_each_entry(ns, &ctrl->namespaces, list)
+               nvme_set_queue_dying(ns);
 
-               /* Forcibly unquiesce queues to avoid blocking dispatch */
-               blk_mq_unquiesce_queue(ns->queue);
-       }
        up_read(&ctrl->namespaces_rwsem);
 }
 EXPORT_SYMBOL_GPL(nvme_kill_queues);
index 903eb4545e2699bc1b62365e5ca4490e824a8c5c..f7efe5a58cc7c2f11163e9bb9c9319a08b3eb1eb 100644 (file)
@@ -539,14 +539,18 @@ static struct nvmf_transport_ops *nvmf_lookup_transport(
 /*
  * For something we're not in a state to send to the device the default action
  * is to busy it and retry it after the controller state is recovered.  However,
- * anything marked for failfast or nvme multipath is immediately failed.
+ * if the controller is deleting or if anything is marked for failfast or
+ * nvme multipath it is immediately failed.
  *
  * Note: commands used to initialize the controller will be marked for failfast.
  * Note: nvme cli/ioctl commands are marked for failfast.
  */
-blk_status_t nvmf_fail_nonready_command(struct request *rq)
+blk_status_t nvmf_fail_nonready_command(struct nvme_ctrl *ctrl,
+               struct request *rq)
 {
-       if (!blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH))
+       if (ctrl->state != NVME_CTRL_DELETING &&
+           ctrl->state != NVME_CTRL_DEAD &&
+           !blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH))
                return BLK_STS_RESOURCE;
        nvme_req(rq)->status = NVME_SC_ABORT_REQ;
        return BLK_STS_IOERR;
index e1818a27aa2d7bcf75ff0e2c4522a61e294d8d9b..aa2fdb2a2e8fc0143b59ff48692284ba50c8225f 100644 (file)
@@ -162,7 +162,8 @@ void nvmf_unregister_transport(struct nvmf_transport_ops *ops);
 void nvmf_free_options(struct nvmf_ctrl_options *opts);
 int nvmf_get_address(struct nvme_ctrl *ctrl, char *buf, int size);
 bool nvmf_should_reconnect(struct nvme_ctrl *ctrl);
-blk_status_t nvmf_fail_nonready_command(struct request *rq);
+blk_status_t nvmf_fail_nonready_command(struct nvme_ctrl *ctrl,
+               struct request *rq);
 bool __nvmf_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
                bool queue_live);
 
index b528a2f5826cbfe19b22aadd7e09e1ceff512cb6..9bac912173ba37811545d036fa518ee882971f5a 100644 (file)
@@ -2272,7 +2272,7 @@ nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
 
        if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE ||
            !nvmf_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
-               return nvmf_fail_nonready_command(rq);
+               return nvmf_fail_nonready_command(&queue->ctrl->ctrl, rq);
 
        ret = nvme_setup_cmd(ns, rq, sqe);
        if (ret)
@@ -2790,6 +2790,9 @@ nvme_fc_delete_association(struct nvme_fc_ctrl *ctrl)
        /* re-enable the admin_q so anything new can fast fail */
        blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
 
+       /* resume the io queues so that things will fast fail */
+       nvme_start_queues(&ctrl->ctrl);
+
        nvme_fc_ctlr_inactive_on_rport(ctrl);
 }
 
@@ -2804,9 +2807,6 @@ nvme_fc_delete_ctrl(struct nvme_ctrl *nctrl)
         * waiting for io to terminate
         */
        nvme_fc_delete_association(ctrl);
-
-       /* resume the io queues so that things will fast fail */
-       nvme_start_queues(nctrl);
 }
 
 static void
index 231807cbc849869afcbc16fce2e3389539ce2684..0c4a33df3b2f3bb9d8a710556dab3f8c55ed2302 100644 (file)
@@ -170,6 +170,7 @@ struct nvme_ctrl {
        u64 cap;
        u32 page_size;
        u32 max_hw_sectors;
+       u32 max_segments;
        u16 oncs;
        u16 oacs;
        u16 nssa;
index fc33804662e7bd35cfbacd93a26101bf23b3f43d..ddd441b1516aff50a8eb919d123d7103e18e69d3 100644 (file)
 
 #define SGES_PER_PAGE  (PAGE_SIZE / sizeof(struct nvme_sgl_desc))
 
+/*
+ * These can be higher, but we need to ensure that any command doesn't
+ * require an sg allocation that needs more than a page of data.
+ */
+#define NVME_MAX_KB_SZ 4096
+#define NVME_MAX_SEGS  127
+
 static int use_threaded_interrupts;
 module_param(use_threaded_interrupts, int, 0);
 
@@ -100,6 +107,8 @@ struct nvme_dev {
        struct nvme_ctrl ctrl;
        struct completion ioq_wait;
 
+       mempool_t *iod_mempool;
+
        /* shadow doorbell buffer support: */
        u32 *dbbuf_dbs;
        dma_addr_t dbbuf_dbs_dma_addr;
@@ -477,10 +486,7 @@ static blk_status_t nvme_init_iod(struct request *rq, struct nvme_dev *dev)
        iod->use_sgl = nvme_pci_use_sgls(dev, rq);
 
        if (nseg > NVME_INT_PAGES || size > NVME_INT_BYTES(dev)) {
-               size_t alloc_size = nvme_pci_iod_alloc_size(dev, size, nseg,
-                               iod->use_sgl);
-
-               iod->sg = kmalloc(alloc_size, GFP_ATOMIC);
+               iod->sg = mempool_alloc(dev->iod_mempool, GFP_ATOMIC);
                if (!iod->sg)
                        return BLK_STS_RESOURCE;
        } else {
@@ -526,7 +532,7 @@ static void nvme_free_iod(struct nvme_dev *dev, struct request *req)
        }
 
        if (iod->sg != iod->inline_sg)
-               kfree(iod->sg);
+               mempool_free(iod->sg, dev->iod_mempool);
 }
 
 #ifdef CONFIG_BLK_DEV_INTEGRITY
@@ -2280,6 +2286,7 @@ static void nvme_pci_free_ctrl(struct nvme_ctrl *ctrl)
                blk_put_queue(dev->ctrl.admin_q);
        kfree(dev->queues);
        free_opal_dev(dev->ctrl.opal_dev);
+       mempool_destroy(dev->iod_mempool);
        kfree(dev);
 }
 
@@ -2289,6 +2296,7 @@ static void nvme_remove_dead_ctrl(struct nvme_dev *dev, int status)
 
        nvme_get_ctrl(&dev->ctrl);
        nvme_dev_disable(dev, false);
+       nvme_kill_queues(&dev->ctrl);
        if (!queue_work(nvme_wq, &dev->remove_work))
                nvme_put_ctrl(&dev->ctrl);
 }
@@ -2333,6 +2341,13 @@ static void nvme_reset_work(struct work_struct *work)
        if (result)
                goto out;
 
+       /*
+        * Limit the max command size to prevent iod->sg allocations going
+        * over a single page.
+        */
+       dev->ctrl.max_hw_sectors = NVME_MAX_KB_SZ << 1;
+       dev->ctrl.max_segments = NVME_MAX_SEGS;
+
        result = nvme_init_identify(&dev->ctrl);
        if (result)
                goto out;
@@ -2405,7 +2420,6 @@ static void nvme_remove_dead_ctrl_work(struct work_struct *work)
        struct nvme_dev *dev = container_of(work, struct nvme_dev, remove_work);
        struct pci_dev *pdev = to_pci_dev(dev->dev);
 
-       nvme_kill_queues(&dev->ctrl);
        if (pci_get_drvdata(pdev))
                device_release_driver(&pdev->dev);
        nvme_put_ctrl(&dev->ctrl);
@@ -2509,6 +2523,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        int node, result = -ENOMEM;
        struct nvme_dev *dev;
        unsigned long quirks = id->driver_data;
+       size_t alloc_size;
 
        node = dev_to_node(&pdev->dev);
        if (node == NUMA_NO_NODE)
@@ -2541,10 +2556,27 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 
        quirks |= check_vendor_combination_bug(pdev);
 
+       /*
+        * Double check that our mempool alloc size will cover the biggest
+        * command we support.
+        */
+       alloc_size = nvme_pci_iod_alloc_size(dev, NVME_MAX_KB_SZ,
+                                               NVME_MAX_SEGS, true);
+       WARN_ON_ONCE(alloc_size > PAGE_SIZE);
+
+       dev->iod_mempool = mempool_create_node(1, mempool_kmalloc,
+                                               mempool_kfree,
+                                               (void *) alloc_size,
+                                               GFP_KERNEL, node);
+       if (!dev->iod_mempool) {
+               result = -ENOMEM;
+               goto release_pools;
+       }
+
        result = nvme_init_ctrl(&dev->ctrl, &pdev->dev, &nvme_pci_ctrl_ops,
                        quirks);
        if (result)
-               goto release_pools;
+               goto release_mempool;
 
        dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev));
 
@@ -2553,6 +2585,8 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 
        return 0;
 
+ release_mempool:
+       mempool_destroy(dev->iod_mempool);
  release_pools:
        nvme_release_prp_pools(dev);
  unmap:
index c9424da0d23e3cbbdd0e2b5209d9eddca9f1591f..66ec5985c9f3a9f9f2176cf14c47e222b1c59cbe 100644 (file)
@@ -560,12 +560,6 @@ static void nvme_rdma_free_queue(struct nvme_rdma_queue *queue)
        if (!test_and_clear_bit(NVME_RDMA_Q_ALLOCATED, &queue->flags))
                return;
 
-       if (nvme_rdma_queue_idx(queue) == 0) {
-               nvme_rdma_free_qe(queue->device->dev,
-                       &queue->ctrl->async_event_sqe,
-                       sizeof(struct nvme_command), DMA_TO_DEVICE);
-       }
-
        nvme_rdma_destroy_queue_ib(queue);
        rdma_destroy_id(queue->cm_id);
 }
@@ -698,7 +692,7 @@ static struct blk_mq_tag_set *nvme_rdma_alloc_tagset(struct nvme_ctrl *nctrl,
                set = &ctrl->tag_set;
                memset(set, 0, sizeof(*set));
                set->ops = &nvme_rdma_mq_ops;
-               set->queue_depth = nctrl->opts->queue_size;
+               set->queue_depth = nctrl->sqsize + 1;
                set->reserved_tags = 1; /* fabric connect */
                set->numa_node = NUMA_NO_NODE;
                set->flags = BLK_MQ_F_SHOULD_MERGE;
@@ -734,11 +728,15 @@ out:
 static void nvme_rdma_destroy_admin_queue(struct nvme_rdma_ctrl *ctrl,
                bool remove)
 {
-       nvme_rdma_stop_queue(&ctrl->queues[0]);
        if (remove) {
                blk_cleanup_queue(ctrl->ctrl.admin_q);
                nvme_rdma_free_tagset(&ctrl->ctrl, ctrl->ctrl.admin_tagset);
        }
+       if (ctrl->async_event_sqe.data) {
+               nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe,
+                               sizeof(struct nvme_command), DMA_TO_DEVICE);
+               ctrl->async_event_sqe.data = NULL;
+       }
        nvme_rdma_free_queue(&ctrl->queues[0]);
 }
 
@@ -755,11 +753,16 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
 
        ctrl->max_fr_pages = nvme_rdma_get_max_fr_pages(ctrl->device->dev);
 
+       error = nvme_rdma_alloc_qe(ctrl->device->dev, &ctrl->async_event_sqe,
+                       sizeof(struct nvme_command), DMA_TO_DEVICE);
+       if (error)
+               goto out_free_queue;
+
        if (new) {
                ctrl->ctrl.admin_tagset = nvme_rdma_alloc_tagset(&ctrl->ctrl, true);
                if (IS_ERR(ctrl->ctrl.admin_tagset)) {
                        error = PTR_ERR(ctrl->ctrl.admin_tagset);
-                       goto out_free_queue;
+                       goto out_free_async_qe;
                }
 
                ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
@@ -795,12 +798,6 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
        if (error)
                goto out_stop_queue;
 
-       error = nvme_rdma_alloc_qe(ctrl->queues[0].device->dev,
-                       &ctrl->async_event_sqe, sizeof(struct nvme_command),
-                       DMA_TO_DEVICE);
-       if (error)
-               goto out_stop_queue;
-
        return 0;
 
 out_stop_queue:
@@ -811,6 +808,9 @@ out_cleanup_queue:
 out_free_tagset:
        if (new)
                nvme_rdma_free_tagset(&ctrl->ctrl, ctrl->ctrl.admin_tagset);
+out_free_async_qe:
+       nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe,
+               sizeof(struct nvme_command), DMA_TO_DEVICE);
 out_free_queue:
        nvme_rdma_free_queue(&ctrl->queues[0]);
        return error;
@@ -819,7 +819,6 @@ out_free_queue:
 static void nvme_rdma_destroy_io_queues(struct nvme_rdma_ctrl *ctrl,
                bool remove)
 {
-       nvme_rdma_stop_io_queues(ctrl);
        if (remove) {
                blk_cleanup_queue(ctrl->ctrl.connect_q);
                nvme_rdma_free_tagset(&ctrl->ctrl, ctrl->ctrl.tagset);
@@ -888,9 +887,9 @@ static void nvme_rdma_free_ctrl(struct nvme_ctrl *nctrl)
        list_del(&ctrl->list);
        mutex_unlock(&nvme_rdma_ctrl_mutex);
 
-       kfree(ctrl->queues);
        nvmf_free_options(nctrl->opts);
 free_ctrl:
+       kfree(ctrl->queues);
        kfree(ctrl);
 }
 
@@ -949,6 +948,7 @@ static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work)
        return;
 
 destroy_admin:
+       nvme_rdma_stop_queue(&ctrl->queues[0]);
        nvme_rdma_destroy_admin_queue(ctrl, false);
 requeue:
        dev_info(ctrl->ctrl.device, "Failed reconnect attempt %d\n",
@@ -965,12 +965,14 @@ static void nvme_rdma_error_recovery_work(struct work_struct *work)
 
        if (ctrl->ctrl.queue_count > 1) {
                nvme_stop_queues(&ctrl->ctrl);
+               nvme_rdma_stop_io_queues(ctrl);
                blk_mq_tagset_busy_iter(&ctrl->tag_set,
                                        nvme_cancel_request, &ctrl->ctrl);
                nvme_rdma_destroy_io_queues(ctrl, false);
        }
 
        blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
+       nvme_rdma_stop_queue(&ctrl->queues[0]);
        blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
                                nvme_cancel_request, &ctrl->ctrl);
        nvme_rdma_destroy_admin_queue(ctrl, false);
@@ -1637,7 +1639,7 @@ static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
        WARN_ON_ONCE(rq->tag < 0);
 
        if (!nvmf_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
-               return nvmf_fail_nonready_command(rq);
+               return nvmf_fail_nonready_command(&queue->ctrl->ctrl, rq);
 
        dev = queue->device->dev;
        ib_dma_sync_single_for_cpu(dev, sqe->dma,
@@ -1736,6 +1738,7 @@ static void nvme_rdma_shutdown_ctrl(struct nvme_rdma_ctrl *ctrl, bool shutdown)
 {
        if (ctrl->ctrl.queue_count > 1) {
                nvme_stop_queues(&ctrl->ctrl);
+               nvme_rdma_stop_io_queues(ctrl);
                blk_mq_tagset_busy_iter(&ctrl->tag_set,
                                        nvme_cancel_request, &ctrl->ctrl);
                nvme_rdma_destroy_io_queues(ctrl, shutdown);
@@ -1747,6 +1750,7 @@ static void nvme_rdma_shutdown_ctrl(struct nvme_rdma_ctrl *ctrl, bool shutdown)
                nvme_disable_ctrl(&ctrl->ctrl, ctrl->ctrl.cap);
 
        blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
+       nvme_rdma_stop_queue(&ctrl->queues[0]);
        blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
                                nvme_cancel_request, &ctrl->ctrl);
        blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
@@ -1932,11 +1936,6 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
                goto out_free_ctrl;
        }
 
-       ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_rdma_ctrl_ops,
-                               0 /* no quirks, we're perfect! */);
-       if (ret)
-               goto out_free_ctrl;
-
        INIT_DELAYED_WORK(&ctrl->reconnect_work,
                        nvme_rdma_reconnect_ctrl_work);
        INIT_WORK(&ctrl->err_work, nvme_rdma_error_recovery_work);
@@ -1950,14 +1949,19 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
        ctrl->queues = kcalloc(ctrl->ctrl.queue_count, sizeof(*ctrl->queues),
                                GFP_KERNEL);
        if (!ctrl->queues)
-               goto out_uninit_ctrl;
+               goto out_free_ctrl;
+
+       ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_rdma_ctrl_ops,
+                               0 /* no quirks, we're perfect! */);
+       if (ret)
+               goto out_kfree_queues;
 
        changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING);
        WARN_ON_ONCE(!changed);
 
        ret = nvme_rdma_configure_admin_queue(ctrl, true);
        if (ret)
-               goto out_kfree_queues;
+               goto out_uninit_ctrl;
 
        /* sanity check icdoff */
        if (ctrl->ctrl.icdoff) {
@@ -1974,20 +1978,19 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
                goto out_remove_admin_queue;
        }
 
-       if (opts->queue_size > ctrl->ctrl.maxcmd) {
-               /* warn if maxcmd is lower than queue_size */
-               dev_warn(ctrl->ctrl.device,
-                       "queue_size %zu > ctrl maxcmd %u, clamping down\n",
-                       opts->queue_size, ctrl->ctrl.maxcmd);
-               opts->queue_size = ctrl->ctrl.maxcmd;
-       }
-
+       /* only warn if argument is too large here, will clamp later */
        if (opts->queue_size > ctrl->ctrl.sqsize + 1) {
-               /* warn if sqsize is lower than queue_size */
                dev_warn(ctrl->ctrl.device,
                        "queue_size %zu > ctrl sqsize %u, clamping down\n",
                        opts->queue_size, ctrl->ctrl.sqsize + 1);
-               opts->queue_size = ctrl->ctrl.sqsize + 1;
+       }
+
+       /* warn if maxcmd is lower than sqsize+1 */
+       if (ctrl->ctrl.sqsize + 1 > ctrl->ctrl.maxcmd) {
+               dev_warn(ctrl->ctrl.device,
+                       "sqsize %u > ctrl maxcmd %u, clamping down\n",
+                       ctrl->ctrl.sqsize + 1, ctrl->ctrl.maxcmd);
+               ctrl->ctrl.sqsize = ctrl->ctrl.maxcmd - 1;
        }
 
        if (opts->nr_io_queues) {
@@ -2013,15 +2016,16 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
        return &ctrl->ctrl;
 
 out_remove_admin_queue:
+       nvme_rdma_stop_queue(&ctrl->queues[0]);
        nvme_rdma_destroy_admin_queue(ctrl, true);
-out_kfree_queues:
-       kfree(ctrl->queues);
 out_uninit_ctrl:
        nvme_uninit_ctrl(&ctrl->ctrl);
        nvme_put_ctrl(&ctrl->ctrl);
        if (ret > 0)
                ret = -EIO;
        return ERR_PTR(ret);
+out_kfree_queues:
+       kfree(ctrl->queues);
 out_free_ctrl:
        kfree(ctrl);
        return ERR_PTR(ret);
index d3f3b3ec4d1afaf3d7ed3626f3211ccba54c87e4..ebea1373d1b7af0fa7bfb2a8675b64f10239c59d 100644 (file)
@@ -282,6 +282,7 @@ static ssize_t nvmet_ns_device_path_store(struct config_item *item,
 {
        struct nvmet_ns *ns = to_nvmet_ns(item);
        struct nvmet_subsys *subsys = ns->subsys;
+       size_t len;
        int ret;
 
        mutex_lock(&subsys->lock);
@@ -289,10 +290,14 @@ static ssize_t nvmet_ns_device_path_store(struct config_item *item,
        if (ns->enabled)
                goto out_unlock;
 
-       kfree(ns->device_path);
+       ret = -EINVAL;
+       len = strcspn(page, "\n");
+       if (!len)
+               goto out_unlock;
 
+       kfree(ns->device_path);
        ret = -ENOMEM;
-       ns->device_path = kstrndup(page, strcspn(page, "\n"), GFP_KERNEL);
+       ns->device_path = kstrndup(page, len, GFP_KERNEL);
        if (!ns->device_path)
                goto out_unlock;
 
index a03da764ecae8cb3ec9bf0bb9c68971669b895fc..9838103f2d629b5490b12177693bed04c130e353 100644 (file)
@@ -339,7 +339,7 @@ int nvmet_ns_enable(struct nvmet_ns *ns)
                goto out_unlock;
 
        ret = nvmet_bdev_ns_enable(ns);
-       if (ret)
+       if (ret == -ENOTBLK)
                ret = nvmet_file_ns_enable(ns);
        if (ret)
                goto out_unlock;
@@ -686,6 +686,14 @@ static void nvmet_start_ctrl(struct nvmet_ctrl *ctrl)
        }
 
        ctrl->csts = NVME_CSTS_RDY;
+
+       /*
+        * Controllers that are not yet enabled should not really enforce the
+        * keep alive timeout, but we still want to track a timeout and cleanup
+        * in case a host died before it enabled the controller.  Hence, simply
+        * reset the keep alive timer when the controller is enabled.
+        */
+       mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ);
 }
 
 static void nvmet_clear_ctrl(struct nvmet_ctrl *ctrl)
index 408279cb6f2c8041886b4eedaecf8eba9d8aadf5..29b4b236afd85fc7dc668d4ed60f66a093174d17 100644 (file)
@@ -58,8 +58,8 @@ struct nvmet_fc_ls_iod {
        struct work_struct              work;
 } __aligned(sizeof(unsigned long long));
 
+/* desired maximum for a single sequence - if sg list allows it */
 #define NVMET_FC_MAX_SEQ_LENGTH                (256 * 1024)
-#define NVMET_FC_MAX_XFR_SGENTS                (NVMET_FC_MAX_SEQ_LENGTH / PAGE_SIZE)
 
 enum nvmet_fcp_datadir {
        NVMET_FCP_NODATA,
@@ -74,6 +74,7 @@ struct nvmet_fc_fcp_iod {
        struct nvme_fc_cmd_iu           cmdiubuf;
        struct nvme_fc_ersp_iu          rspiubuf;
        dma_addr_t                      rspdma;
+       struct scatterlist              *next_sg;
        struct scatterlist              *data_sg;
        int                             data_sg_cnt;
        u32                             offset;
@@ -1025,8 +1026,7 @@ nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo,
        INIT_LIST_HEAD(&newrec->assoc_list);
        kref_init(&newrec->ref);
        ida_init(&newrec->assoc_cnt);
-       newrec->max_sg_cnt = min_t(u32, NVMET_FC_MAX_XFR_SGENTS,
-                                       template->max_sgl_segments);
+       newrec->max_sg_cnt = template->max_sgl_segments;
 
        ret = nvmet_fc_alloc_ls_iodlist(newrec);
        if (ret) {
@@ -1722,6 +1722,7 @@ nvmet_fc_alloc_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
                                ((fod->io_dir == NVMET_FCP_WRITE) ?
                                        DMA_FROM_DEVICE : DMA_TO_DEVICE));
                                /* note: write from initiator perspective */
+       fod->next_sg = fod->data_sg;
 
        return 0;
 
@@ -1866,24 +1867,49 @@ nvmet_fc_transfer_fcp_data(struct nvmet_fc_tgtport *tgtport,
                                struct nvmet_fc_fcp_iod *fod, u8 op)
 {
        struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
+       struct scatterlist *sg = fod->next_sg;
        unsigned long flags;
-       u32 tlen;
+       u32 remaininglen = fod->req.transfer_len - fod->offset;
+       u32 tlen = 0;
        int ret;
 
        fcpreq->op = op;
        fcpreq->offset = fod->offset;
        fcpreq->timeout = NVME_FC_TGTOP_TIMEOUT_SEC;
 
-       tlen = min_t(u32, tgtport->max_sg_cnt * PAGE_SIZE,
-                       (fod->req.transfer_len - fod->offset));
+       /*
+        * for next sequence:
+        *  break at a sg element boundary
+        *  attempt to keep sequence length capped at
+        *    NVMET_FC_MAX_SEQ_LENGTH but allow sequence to
+        *    be longer if a single sg element is larger
+        *    than that amount. This is done to avoid creating
+        *    a new sg list to use for the tgtport api.
+        */
+       fcpreq->sg = sg;
+       fcpreq->sg_cnt = 0;
+       while (tlen < remaininglen &&
+              fcpreq->sg_cnt < tgtport->max_sg_cnt &&
+              tlen + sg_dma_len(sg) < NVMET_FC_MAX_SEQ_LENGTH) {
+               fcpreq->sg_cnt++;
+               tlen += sg_dma_len(sg);
+               sg = sg_next(sg);
+       }
+       if (tlen < remaininglen && fcpreq->sg_cnt == 0) {
+               fcpreq->sg_cnt++;
+               tlen += min_t(u32, sg_dma_len(sg), remaininglen);
+               sg = sg_next(sg);
+       }
+       if (tlen < remaininglen)
+               fod->next_sg = sg;
+       else
+               fod->next_sg = NULL;
+
        fcpreq->transfer_length = tlen;
        fcpreq->transferred_length = 0;
        fcpreq->fcp_error = 0;
        fcpreq->rsplen = 0;
 
-       fcpreq->sg = &fod->data_sg[fod->offset / PAGE_SIZE];
-       fcpreq->sg_cnt = DIV_ROUND_UP(tlen, PAGE_SIZE);
-
        /*
         * If the last READDATA request: check if LLDD supports
         * combined xfr with response.
index d8d91f04bd7eedae3e183c3a89dc3d42bd33a3ff..ae7586b8be07b6310a267217642f33147177eb98 100644 (file)
@@ -162,7 +162,7 @@ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
        blk_status_t ret;
 
        if (!nvmf_check_ready(&queue->ctrl->ctrl, req, queue_ready))
-               return nvmf_fail_nonready_command(req);
+               return nvmf_fail_nonready_command(&queue->ctrl->ctrl, req);
 
        ret = nvme_setup_cmd(ns, req, &iod->cmd);
        if (ret)
index b5b0cdc21d01b4cd940e4fddb6f06976258e782d..514d1dfc563059684d4fbc6a3e8b7aa8bd07da64 100644 (file)
@@ -936,6 +936,10 @@ struct nvmem_cell *nvmem_cell_get(struct device *dev, const char *cell_id)
                        return cell;
        }
 
+       /* NULL cell_id only allowed for device tree; invalid otherwise */
+       if (!cell_id)
+               return ERR_PTR(-EINVAL);
+
        return nvmem_cell_get_from_list(cell_id);
 }
 EXPORT_SYMBOL_GPL(nvmem_cell_get);
index 848f549164cd0434ae8a28210d47b9ade2b42de3..466e3c8582f0fd62628b90872b2046971e064776 100644 (file)
@@ -102,7 +102,7 @@ static u32 phandle_cache_mask;
  *   - the phandle lookup overhead reduction provided by the cache
  *     will likely be less
  */
-static void of_populate_phandle_cache(void)
+void of_populate_phandle_cache(void)
 {
        unsigned long flags;
        u32 cache_entries;
@@ -134,8 +134,7 @@ out:
        raw_spin_unlock_irqrestore(&devtree_lock, flags);
 }
 
-#ifndef CONFIG_MODULES
-static int __init of_free_phandle_cache(void)
+int of_free_phandle_cache(void)
 {
        unsigned long flags;
 
@@ -148,6 +147,7 @@ static int __init of_free_phandle_cache(void)
 
        return 0;
 }
+#if !defined(CONFIG_MODULES)
 late_initcall_sync(of_free_phandle_cache);
 #endif
 
index 891d780c076a12d14dde8d50b87d940e9c83707e..216175d11d3dc2ca3fdfaa429306ecf50218a01a 100644 (file)
@@ -79,6 +79,8 @@ int of_resolve_phandles(struct device_node *tree);
 #if defined(CONFIG_OF_OVERLAY)
 void of_overlay_mutex_lock(void);
 void of_overlay_mutex_unlock(void);
+int of_free_phandle_cache(void);
+void of_populate_phandle_cache(void);
 #else
 static inline void of_overlay_mutex_lock(void) {};
 static inline void of_overlay_mutex_unlock(void) {};
index 7baa53e5b1d74d469959341945e3bc239cf7d5c7..eda57ef12fd057b3d92c750dec983703e85f38e3 100644 (file)
@@ -804,6 +804,8 @@ static int of_overlay_apply(const void *fdt, struct device_node *tree,
                goto err_free_overlay_changeset;
        }
 
+       of_populate_phandle_cache();
+
        ret = __of_changeset_apply_notify(&ovcs->cset);
        if (ret)
                pr_err("overlay changeset entry notify error %d\n", ret);
@@ -1046,8 +1048,17 @@ int of_overlay_remove(int *ovcs_id)
 
        list_del(&ovcs->ovcs_list);
 
+       /*
+        * Disable phandle cache.  Avoids race condition that would arise
+        * from removing cache entry when the associated node is deleted.
+        */
+       of_free_phandle_cache();
+
        ret_apply = 0;
        ret = __of_changeset_revert_entries(&ovcs->cset, &ret_apply);
+
+       of_populate_phandle_cache();
+
        if (ret) {
                if (ret_apply)
                        devicetree_state_flags |= DTSF_REVERT_FAIL;
index ab2f3fead6b1ceee55b0ced767dafdacb1202b35..31ff03dbeb83771be1ba57fde89ea6c32f63c2aa 100644 (file)
@@ -598,7 +598,7 @@ static int _generic_set_opp_regulator(const struct opp_table *opp_table,
        }
 
        /* Scaling up? Scale voltage before frequency */
-       if (freq > old_freq) {
+       if (freq >= old_freq) {
                ret = _set_opp_voltage(dev, reg, new_supply);
                if (ret)
                        goto restore_voltage;
index 535201984b8b0c5c0c58d585528b7593bbcf61be..1b2cfe51e8d719ce6cd6976fbaad485cc8143fd8 100644 (file)
@@ -28,10 +28,10 @@ obj-$(CONFIG_PCI_PF_STUB)   += pci-pf-stub.o
 obj-$(CONFIG_PCI_ECAM)         += ecam.o
 obj-$(CONFIG_XEN_PCIDEV_FRONTEND) += xen-pcifront.o
 
-obj-y                          += controller/
-obj-y                          += switch/
-
 # Endpoint library must be initialized before its users
 obj-$(CONFIG_PCI_ENDPOINT)     += endpoint/
 
+obj-y                          += controller/
+obj-y                          += switch/
+
 ccflags-$(CONFIG_PCI_DEBUG) := -DDEBUG
index 35b7fc87eac506faf88ab3948e136195adfec823..5cb40b2518f9376dbe7edd4bf11d303daf97c025 100644 (file)
@@ -330,7 +330,7 @@ void pci_bus_add_device(struct pci_dev *dev)
                return;
        }
 
-       dev->is_added = 1;
+       pci_dev_assign_added(dev, true);
 }
 EXPORT_SYMBOL_GPL(pci_bus_add_device);
 
@@ -347,14 +347,14 @@ void pci_bus_add_devices(const struct pci_bus *bus)
 
        list_for_each_entry(dev, &bus->devices, bus_list) {
                /* Skip already-added devices */
-               if (dev->is_added)
+               if (pci_dev_is_added(dev))
                        continue;
                pci_bus_add_device(dev);
        }
 
        list_for_each_entry(dev, &bus->devices, bus_list) {
                /* Skip if device attach failed */
-               if (!dev->is_added)
+               if (!pci_dev_is_added(dev))
                        continue;
                child = dev->subordinate;
                if (child)
index 18fa09b3ac8f2c377ccd8e9ba890f01d66b3c367..cc9fa02d32a08e6051e2adbcee288b6b4f629f14 100644 (file)
@@ -96,7 +96,6 @@ config PCI_HOST_GENERIC
        depends on OF
        select PCI_HOST_COMMON
        select IRQ_DOMAIN
-       select PCI_DOMAINS
        help
          Say Y here if you want to support a simple generic PCI host
          controller, such as the one emulated by kvmtool.
@@ -138,7 +137,6 @@ config PCI_VERSATILE
 
 config PCIE_IPROC
        tristate
-       select PCI_DOMAINS
        help
          This enables the iProc PCIe core controller support for Broadcom's
          iProc family of SoCs. An appropriate bus interface driver needs
@@ -176,7 +174,6 @@ config PCIE_IPROC_MSI
 config PCIE_ALTERA
        bool "Altera PCIe controller"
        depends on ARM || NIOS2 || COMPILE_TEST
-       select PCI_DOMAINS
        help
          Say Y here if you want to enable PCIe controller support on Altera
          FPGA.
index 16f52c626b4bd5101afd9c517b48dc6ca014f082..91b0194240a57e8f1d1d78ab682f20135002a68d 100644 (file)
@@ -58,7 +58,6 @@ config PCIE_DW_PLAT_HOST
        depends on PCI && PCI_MSI_IRQ_DOMAIN
        select PCIE_DW_HOST
        select PCIE_DW_PLAT
-       default y
        help
          Enables support for the PCIe controller in the Designware IP to
          work in host mode. There are two instances of PCIe controller in
index 781aa03aeede34adbad23fa6b37b0d2275d00458..29a05759a29421aab29efb6026ffec9d083abef1 100644 (file)
@@ -363,7 +363,8 @@ int dw_pcie_host_init(struct pcie_port *pp)
        resource_list_for_each_entry_safe(win, tmp, &bridge->windows) {
                switch (resource_type(win->res)) {
                case IORESOURCE_IO:
-                       ret = pci_remap_iospace(win->res, pp->io_base);
+                       ret = devm_pci_remap_iospace(dev, win->res,
+                                                    pp->io_base);
                        if (ret) {
                                dev_warn(dev, "Error %d: failed to map resource %pR\n",
                                         ret, win->res);
index d3172d5d3d352f3ff665f753718e63534b99230d..0fae816fba39b3cc09e36850ab101ae64e8c6b9f 100644 (file)
@@ -849,7 +849,7 @@ static int advk_pcie_parse_request_of_pci_ranges(struct advk_pcie *pcie)
                                             0, 0xF8000000, 0,
                                             lower_32_bits(res->start),
                                             OB_PCIE_IO);
-                       err = pci_remap_iospace(res, iobase);
+                       err = devm_pci_remap_iospace(dev, res, iobase);
                        if (err) {
                                dev_warn(dev, "error %d: failed to map resource %pR\n",
                                         err, res);
index a1ebe9ed441f0aef256a55393bf296b44f08c9d6..bf5ece5d9291f18691b316a520db356910d09ffb 100644 (file)
@@ -355,11 +355,13 @@ static int faraday_pci_setup_cascaded_irq(struct faraday_pci *p)
        irq = of_irq_get(intc, 0);
        if (irq <= 0) {
                dev_err(p->dev, "failed to get parent IRQ\n");
+               of_node_put(intc);
                return irq ?: -EINVAL;
        }
 
        p->irqdomain = irq_domain_add_linear(intc, PCI_NUM_INTX,
                                             &faraday_pci_irqdomain_ops, p);
+       of_node_put(intc);
        if (!p->irqdomain) {
                dev_err(p->dev, "failed to create Gemini PCI IRQ domain\n");
                return -EINVAL;
@@ -501,7 +503,7 @@ static int faraday_pci_probe(struct platform_device *pdev)
                                dev_err(dev, "illegal IO mem size\n");
                                return -EINVAL;
                        }
-                       ret = pci_remap_iospace(io, io_base);
+                       ret = devm_pci_remap_iospace(dev, io, io_base);
                        if (ret) {
                                dev_warn(dev, "error %d: failed to map resource %pR\n",
                                         ret, io);
index 6cc5036ac83cface8941f2e58817c98a4eb80084..f6325f1a89e878ed69591b3ee8f96b0bec852a60 100644 (file)
@@ -1073,6 +1073,7 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
        struct pci_bus *pbus;
        struct pci_dev *pdev;
        struct cpumask *dest;
+       unsigned long flags;
        struct compose_comp_ctxt comp;
        struct tran_int_desc *int_desc;
        struct {
@@ -1164,14 +1165,15 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
                 * the channel callback directly when channel->target_cpu is
                 * the current CPU. When the higher level interrupt code
                 * calls us with interrupt enabled, let's add the
-                * local_bh_disable()/enable() to avoid race.
+                * local_irq_save()/restore() to avoid race:
+                * hv_pci_onchannelcallback() can also run in tasklet.
                 */
-               local_bh_disable();
+               local_irq_save(flags);
 
                if (hbus->hdev->channel->target_cpu == smp_processor_id())
                        hv_pci_onchannelcallback(hbus);
 
-               local_bh_enable();
+               local_irq_restore(flags);
 
                if (hpdev->state == hv_pcichild_ejecting) {
                        dev_err_once(&hbus->hdev->device,
index 68b8bfbdb867d0e53500e9a01a9b9ad041fc1c0d..d219404bad92b8394b902554b81f44d6c544869a 100644 (file)
@@ -537,7 +537,7 @@ static int v3_pci_setup_resource(struct v3_pci *v3,
                v3->io_bus_addr = io->start - win->offset;
                dev_dbg(dev, "I/O window %pR, bus addr %pap\n",
                        io, &v3->io_bus_addr);
-               ret = pci_remap_iospace(io, io_base);
+               ret = devm_pci_remap_iospace(dev, io, io_base);
                if (ret) {
                        dev_warn(dev,
                                 "error %d: failed to map resource %pR\n",
index 994f32061b325d90d6a790452d61e5b3b0558154..f59ad2728c0b3266c8308e788d25e9c72f8dd96d 100644 (file)
@@ -82,7 +82,7 @@ static int versatile_pci_parse_request_of_pci_ranges(struct device *dev,
 
                switch (resource_type(res)) {
                case IORESOURCE_IO:
-                       err = pci_remap_iospace(res, iobase);
+                       err = devm_pci_remap_iospace(dev, res, iobase);
                        if (err) {
                                dev_warn(dev, "error %d: failed to map resource %pR\n",
                                         err, res);
index d854d67e873cc1ee58d4a2ac7915cb6c72f024ac..ffda3e8b474268cebdbc807920b2f444f62102db 100644 (file)
@@ -423,7 +423,7 @@ static int xgene_pcie_map_ranges(struct xgene_pcie_port *port,
                case IORESOURCE_IO:
                        xgene_pcie_setup_ob_reg(port, res, OMR3BARL, io_base,
                                                res->start - window->offset);
-                       ret = pci_remap_iospace(res, io_base);
+                       ret = devm_pci_remap_iospace(dev, res, io_base);
                        if (ret < 0)
                                return ret;
                        break;
index 0baabe30858fd39d6d95310e2e5bfaf125684e7b..861dda69f3669970163d81bff102d849c2bd191f 100644 (file)
@@ -1109,7 +1109,7 @@ static int mtk_pcie_request_resources(struct mtk_pcie *pcie)
        if (err < 0)
                return err;
 
-       pci_remap_iospace(&pcie->pio, pcie->io.start);
+       devm_pci_remap_iospace(dev, &pcie->pio, pcie->io.start);
 
        return 0;
 }
index 4d6c20e47bed4f8c19ea4e44c0bfee78b40d28a6..cf0aa7cee5b0a80a5c578c9e94c27bbe410bebeb 100644 (file)
 #define CFG_WINDOW_TYPE        0
 #define IO_WINDOW_TYPE         1
 #define MEM_WINDOW_TYPE        2
-#define IB_WIN_SIZE            (256 * 1024 * 1024 * 1024)
+#define IB_WIN_SIZE            ((u64)256 * 1024 * 1024 * 1024)
 #define MAX_PIO_WINDOWS        8
 
 /* Parameters for the waiting for link up routine */
index 874d75c9ee4ac44513f584267272253d4cf84e7d..c8febb009454cdcfc0ed473f4cdb9540af0de59e 100644 (file)
@@ -680,7 +680,11 @@ static int rcar_pcie_phy_init_gen3(struct rcar_pcie *pcie)
        if (err)
                return err;
 
-       return phy_power_on(pcie->phy);
+       err = phy_power_on(pcie->phy);
+       if (err)
+               phy_exit(pcie->phy);
+
+       return err;
 }
 
 static int rcar_msi_alloc(struct rcar_msi *chip)
@@ -1165,7 +1169,7 @@ static int rcar_pcie_probe(struct platform_device *pdev)
        if (rcar_pcie_hw_init(pcie)) {
                dev_info(dev, "PCIe link down\n");
                err = -ENODEV;
-               goto err_clk_disable;
+               goto err_phy_shutdown;
        }
 
        data = rcar_pci_read_reg(pcie, MACSR);
@@ -1177,7 +1181,7 @@ static int rcar_pcie_probe(struct platform_device *pdev)
                        dev_err(dev,
                                "failed to enable MSI support: %d\n",
                                err);
-                       goto err_clk_disable;
+                       goto err_phy_shutdown;
                }
        }
 
@@ -1191,6 +1195,12 @@ err_msi_teardown:
        if (IS_ENABLED(CONFIG_PCI_MSI))
                rcar_pcie_teardown_msi(pcie);
 
+err_phy_shutdown:
+       if (pcie->phy) {
+               phy_power_off(pcie->phy);
+               phy_exit(pcie->phy);
+       }
+
 err_clk_disable:
        clk_disable_unprepare(pcie->bus_clk);
 
index 6a4bbb5b3de006f37f005bf0f198b28de16c7619..fb32840ce8e66ac75f7c164a402ac9a0e0ae094b 100644 (file)
@@ -559,7 +559,7 @@ static int nwl_pcie_init_irq_domain(struct nwl_pcie *pcie)
                                                        PCI_NUM_INTX,
                                                        &legacy_domain_ops,
                                                        pcie);
-
+       of_node_put(legacy_intc_node);
        if (!pcie->legacy_irq_domain) {
                dev_err(dev, "failed to create IRQ domain\n");
                return -ENOMEM;
index b110a3a814e35e3bbd7839e68994cef56f19ed43..7b1389d8e2a5711383a4e8d8a6a424a4a33448f4 100644 (file)
@@ -509,6 +509,7 @@ static int xilinx_pcie_init_irq_domain(struct xilinx_pcie_port *port)
        port->leg_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
                                                 &intx_domain_ops,
                                                 port);
+       of_node_put(pcie_intc_node);
        if (!port->leg_domain) {
                dev_err(dev, "Failed to get a INTx IRQ domain\n");
                return -ENODEV;
index 523a8cab3bfba16613a03d13916aef3b223ef2cb..825fa24427a396a711b734ee03643038e1260185 100644 (file)
@@ -137,6 +137,20 @@ void *pci_epf_alloc_space(struct pci_epf *epf, size_t size, enum pci_barno bar)
 }
 EXPORT_SYMBOL_GPL(pci_epf_alloc_space);
 
+static void pci_epf_remove_cfs(struct pci_epf_driver *driver)
+{
+       struct config_group *group, *tmp;
+
+       if (!IS_ENABLED(CONFIG_PCI_ENDPOINT_CONFIGFS))
+               return;
+
+       mutex_lock(&pci_epf_mutex);
+       list_for_each_entry_safe(group, tmp, &driver->epf_group, group_entry)
+               pci_ep_cfs_remove_epf_group(group);
+       list_del(&driver->epf_group);
+       mutex_unlock(&pci_epf_mutex);
+}
+
 /**
  * pci_epf_unregister_driver() - unregister the PCI EPF driver
  * @driver: the PCI EPF driver that has to be unregistered
@@ -145,17 +159,38 @@ EXPORT_SYMBOL_GPL(pci_epf_alloc_space);
  */
 void pci_epf_unregister_driver(struct pci_epf_driver *driver)
 {
-       struct config_group *group;
-
-       mutex_lock(&pci_epf_mutex);
-       list_for_each_entry(group, &driver->epf_group, group_entry)
-               pci_ep_cfs_remove_epf_group(group);
-       list_del(&driver->epf_group);
-       mutex_unlock(&pci_epf_mutex);
+       pci_epf_remove_cfs(driver);
        driver_unregister(&driver->driver);
 }
 EXPORT_SYMBOL_GPL(pci_epf_unregister_driver);
 
+static int pci_epf_add_cfs(struct pci_epf_driver *driver)
+{
+       struct config_group *group;
+       const struct pci_epf_device_id *id;
+
+       if (!IS_ENABLED(CONFIG_PCI_ENDPOINT_CONFIGFS))
+               return 0;
+
+       INIT_LIST_HEAD(&driver->epf_group);
+
+       id = driver->id_table;
+       while (id->name[0]) {
+               group = pci_ep_cfs_add_epf_group(id->name);
+               if (IS_ERR(group)) {
+                       pci_epf_remove_cfs(driver);
+                       return PTR_ERR(group);
+               }
+
+               mutex_lock(&pci_epf_mutex);
+               list_add_tail(&group->group_entry, &driver->epf_group);
+               mutex_unlock(&pci_epf_mutex);
+               id++;
+       }
+
+       return 0;
+}
+
 /**
  * __pci_epf_register_driver() - register a new PCI EPF driver
  * @driver: structure representing PCI EPF driver
@@ -167,8 +202,6 @@ int __pci_epf_register_driver(struct pci_epf_driver *driver,
                              struct module *owner)
 {
        int ret;
-       struct config_group *group;
-       const struct pci_epf_device_id *id;
 
        if (!driver->ops)
                return -EINVAL;
@@ -183,16 +216,7 @@ int __pci_epf_register_driver(struct pci_epf_driver *driver,
        if (ret)
                return ret;
 
-       INIT_LIST_HEAD(&driver->epf_group);
-
-       id = driver->id_table;
-       while (id->name[0]) {
-               group = pci_ep_cfs_add_epf_group(id->name);
-               mutex_lock(&pci_epf_mutex);
-               list_add_tail(&group->group_entry, &driver->epf_group);
-               mutex_unlock(&pci_epf_mutex);
-               id++;
-       }
+       pci_epf_add_cfs(driver);
 
        return 0;
 }
index 3979f89b250ad86ec622d4aa90d1450422324db8..5bd6c1573295696acf8387bd7cd953bf8f369546 100644 (file)
@@ -7,7 +7,6 @@
  * All rights reserved.
  *
  * Send feedback to <kristen.c.accardi@intel.com>
- *
  */
 
 #include <linux/module.h>
@@ -87,8 +86,17 @@ int acpi_get_hp_hw_control_from_firmware(struct pci_dev *pdev)
                return 0;
 
        /* If _OSC exists, we should not evaluate OSHP */
+
+       /*
+        * If there's no ACPI host bridge (i.e., ACPI support is compiled
+        * into the kernel but the hardware platform doesn't support ACPI),
+        * there's nothing to do here.
+        */
        host = pci_find_host_bridge(pdev->bus);
        root = acpi_pci_find_root(ACPI_HANDLE(&host->dev));
+       if (!root)
+               return 0;
+
        if (root->osc_support_set)
                goto no_control;
 
index 3a17b290df5dd74010740762eb5dc00e163b32ec..ef0b1b6ba86f8fad2a570187252e7579e12ea129 100644 (file)
@@ -509,7 +509,7 @@ static void enable_slot(struct acpiphp_slot *slot)
 
        list_for_each_entry(dev, &bus->devices, bus_list) {
                /* Assume that newly added devices are powered on already. */
-               if (!dev->is_added)
+               if (!pci_dev_is_added(dev))
                        dev->current_state = PCI_D0;
        }
 
index d0d73dbbd5ca4123fce20254a0bf6495dcbe1d9e..0f04ae648cf14bcc590ce55b5b91a5635b02c500 100644 (file)
@@ -574,6 +574,22 @@ void pci_iov_release(struct pci_dev *dev)
                sriov_release(dev);
 }
 
+/**
+ * pci_iov_remove - clean up SR-IOV state after PF driver is detached
+ * @dev: the PCI device
+ */
+void pci_iov_remove(struct pci_dev *dev)
+{
+       struct pci_sriov *iov = dev->sriov;
+
+       if (!dev->is_physfn)
+               return;
+
+       iov->driver_max_VFs = iov->total_VFs;
+       if (iov->num_VFs)
+               pci_warn(dev, "driver left SR-IOV enabled after remove\n");
+}
+
 /**
  * pci_iov_update_resource - update a VF BAR
  * @dev: the PCI device
index d088c9147f10534ef767dba007e57e72337c2b0e..69a60d6ebd7365f52fcdf0c5e1e9ce870cd990f7 100644 (file)
@@ -612,7 +612,7 @@ int pci_parse_request_of_pci_ranges(struct device *dev,
 
                switch (resource_type(res)) {
                case IORESOURCE_IO:
-                       err = pci_remap_iospace(res, iobase);
+                       err = devm_pci_remap_iospace(dev, res, iobase);
                        if (err) {
                                dev_warn(dev, "error %d: failed to map resource %pR\n",
                                         err, res);
index 65113b6eed1473aa00daf59b1dfd57b8abd6baaf..89ee6a2b6eb838f426d6d9d3773f70769d1a489f 100644 (file)
@@ -629,6 +629,18 @@ static bool acpi_pci_need_resume(struct pci_dev *dev)
 {
        struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
 
+       /*
+        * In some cases (eg. Samsung 305V4A) leaving a bridge in suspend over
+        * system-wide suspend/resume confuses the platform firmware, so avoid
+        * doing that, unless the bridge has a driver that should take care of
+        * the PM handling.  According to Section 16.1.6 of ACPI 6.2, endpoint
+        * devices are expected to be in D3 before invoking the S3 entry path
+        * from the firmware, so they should not be affected by this issue.
+        */
+       if (pci_is_bridge(dev) && !dev->driver &&
+           acpi_target_system_state() != ACPI_STATE_S0)
+               return true;
+
        if (!adev || !acpi_device_power_manageable(adev))
                return false;
 
index c125d53033c69cafe42734c32935561547abbdba..6792292b5fc7055ab145703a3e718e50d0d4751c 100644 (file)
@@ -445,6 +445,7 @@ static int pci_device_remove(struct device *dev)
                }
                pcibios_free_irq(pci_dev);
                pci_dev->driver = NULL;
+               pci_iov_remove(pci_dev);
        }
 
        /* Undo the runtime PM settings in local_pci_probe() */
index 97acba712e4e7f7191df5fd7ae7800597dc94fd5..316496e99da9ba56b10d52ba2e0e0f4c9494890b 100644 (file)
@@ -3579,6 +3579,44 @@ void pci_unmap_iospace(struct resource *res)
 }
 EXPORT_SYMBOL(pci_unmap_iospace);
 
+static void devm_pci_unmap_iospace(struct device *dev, void *ptr)
+{
+       struct resource **res = ptr;
+
+       pci_unmap_iospace(*res);
+}
+
+/**
+ * devm_pci_remap_iospace - Managed pci_remap_iospace()
+ * @dev: Generic device to remap IO address for
+ * @res: Resource describing the I/O space
+ * @phys_addr: physical address of range to be mapped
+ *
+ * Managed pci_remap_iospace().  Map is automatically unmapped on driver
+ * detach.
+ */
+int devm_pci_remap_iospace(struct device *dev, const struct resource *res,
+                          phys_addr_t phys_addr)
+{
+       const struct resource **ptr;
+       int error;
+
+       ptr = devres_alloc(devm_pci_unmap_iospace, sizeof(*ptr), GFP_KERNEL);
+       if (!ptr)
+               return -ENOMEM;
+
+       error = pci_remap_iospace(res, phys_addr);
+       if (error) {
+               devres_free(ptr);
+       } else  {
+               *ptr = res;
+               devres_add(dev, ptr);
+       }
+
+       return error;
+}
+EXPORT_SYMBOL(devm_pci_remap_iospace);
+
 /**
  * devm_pci_remap_cfgspace - Managed pci_remap_cfgspace()
  * @dev: Generic device to remap IO address for
index c358e7a07f3faf2abbfff9de4b9e219ae6558e58..08817253c8a2fa4851330dac05d5eb9c3f8e37d4 100644 (file)
@@ -288,6 +288,7 @@ struct pci_sriov {
 
 /* pci_dev priv_flags */
 #define PCI_DEV_DISCONNECTED 0
+#define PCI_DEV_ADDED 1
 
 static inline int pci_dev_set_disconnected(struct pci_dev *dev, void *unused)
 {
@@ -300,6 +301,16 @@ static inline bool pci_dev_is_disconnected(const struct pci_dev *dev)
        return test_bit(PCI_DEV_DISCONNECTED, &dev->priv_flags);
 }
 
+static inline void pci_dev_assign_added(struct pci_dev *dev, bool added)
+{
+       assign_bit(PCI_DEV_ADDED, &dev->priv_flags, added);
+}
+
+static inline bool pci_dev_is_added(const struct pci_dev *dev)
+{
+       return test_bit(PCI_DEV_ADDED, &dev->priv_flags);
+}
+
 #ifdef CONFIG_PCI_ATS
 void pci_restore_ats_state(struct pci_dev *dev);
 #else
@@ -311,6 +322,7 @@ static inline void pci_restore_ats_state(struct pci_dev *dev)
 #ifdef CONFIG_PCI_IOV
 int pci_iov_init(struct pci_dev *dev);
 void pci_iov_release(struct pci_dev *dev);
+void pci_iov_remove(struct pci_dev *dev);
 void pci_iov_update_resource(struct pci_dev *dev, int resno);
 resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev, int resno);
 void pci_restore_iov_state(struct pci_dev *dev);
@@ -323,6 +335,9 @@ static inline int pci_iov_init(struct pci_dev *dev)
 }
 static inline void pci_iov_release(struct pci_dev *dev)
 
+{
+}
+static inline void pci_iov_remove(struct pci_dev *dev)
 {
 }
 static inline void pci_restore_iov_state(struct pci_dev *dev)
index f7ce0cb0b0b70a48902010c3cea0fb1707e5ead9..f02e334beb457da586857736fc8fc45530847fe3 100644 (file)
@@ -295,6 +295,7 @@ void pcie_do_fatal_recovery(struct pci_dev *dev, u32 service)
 
        parent = udev->subordinate;
        pci_lock_rescan_remove();
+       pci_dev_get(dev);
        list_for_each_entry_safe_reverse(pdev, temp, &parent->devices,
                                         bus_list) {
                pci_dev_get(pdev);
@@ -328,6 +329,7 @@ void pcie_do_fatal_recovery(struct pci_dev *dev, u32 service)
                pci_info(dev, "Device recovery from fatal error failed\n");
        }
 
+       pci_dev_put(dev);
        pci_unlock_rescan_remove();
 }
 
index ac876e32de4b0fe4aaebf98209590720375daa50..611adcd9c16988efad379c83200d61f378a42881 100644 (file)
@@ -2433,13 +2433,13 @@ int pci_scan_slot(struct pci_bus *bus, int devfn)
        dev = pci_scan_single_device(bus, devfn);
        if (!dev)
                return 0;
-       if (!dev->is_added)
+       if (!pci_dev_is_added(dev))
                nr++;
 
        for (fn = next_fn(bus, dev, 0); fn > 0; fn = next_fn(bus, dev, fn)) {
                dev = pci_scan_single_device(bus, devfn + fn);
                if (dev) {
-                       if (!dev->is_added)
+                       if (!pci_dev_is_added(dev))
                                nr++;
                        dev->multifunction = 1;
                }
index 6f072eae4f7a59a5f7678a6d258bf7fe458e4c43..5e3d0dced2b8d6bbf54185612bfe896f4d2941c0 100644 (file)
@@ -19,11 +19,12 @@ static void pci_stop_dev(struct pci_dev *dev)
 {
        pci_pme_active(dev, false);
 
-       if (dev->is_added) {
+       if (pci_dev_is_added(dev)) {
                device_release_driver(&dev->dev);
                pci_proc_detach_device(dev);
                pci_remove_sysfs_dev_files(dev);
-               dev->is_added = 0;
+
+               pci_dev_assign_added(dev, false);
        }
 
        if (dev->bus->self)
index 6bdb1dad805f8198a6879aeab21a5ff9051b1510..0e31f1392a53ca042519bbbe4bc37dab3bfe87c0 100644 (file)
@@ -1463,7 +1463,7 @@ static char *xgene_pmu_dev_name(struct device *dev, u32 type, int id)
        case PMU_TYPE_IOB:
                return devm_kasprintf(dev, GFP_KERNEL, "iob%d", id);
        case PMU_TYPE_IOB_SLOW:
-               return devm_kasprintf(dev, GFP_KERNEL, "iob-slow%d", id);
+               return devm_kasprintf(dev, GFP_KERNEL, "iob_slow%d", id);
        case PMU_TYPE_MCB:
                return devm_kasprintf(dev, GFP_KERNEL, "mcb%d", id);
        case PMU_TYPE_MC:
index 1b7febc43da932628dc0223b8a37eac18320daa9..29d2c3b1913ac95bb9e49f705622e576643c01e1 100644 (file)
@@ -962,6 +962,10 @@ void brcm_usb_init_xhci(struct brcm_usb_init_params *params)
 {
        void __iomem *ctrl = params->ctrl_regs;
 
+       USB_CTRL_UNSET(ctrl, USB30_PCTL, PHY3_IDDQ_OVERRIDE);
+       /* 1 millisecond - for USB clocks to settle down */
+       usleep_range(1000, 2000);
+
        if (BRCM_ID(params->family_id) == 0x7366) {
                /*
                 * The PHY3_SOFT_RESETB bits default to the wrong state.
index 23705e1a002371327e059e48cccbf4828dabb33d..0075fb0bef8c55eab8d66804f9c6310003e8ec17 100644 (file)
@@ -182,13 +182,13 @@ static void phy_mdm6600_status(struct work_struct *work)
        ddata = container_of(work, struct phy_mdm6600, status_work.work);
        dev = ddata->dev;
 
-       error = gpiod_get_array_value_cansleep(PHY_MDM6600_NR_CMD_LINES,
+       error = gpiod_get_array_value_cansleep(PHY_MDM6600_NR_STATUS_LINES,
                                               ddata->status_gpios->desc,
                                               values);
        if (error)
                return;
 
-       for (i = 0; i < PHY_MDM6600_NR_CMD_LINES; i++) {
+       for (i = 0; i < PHY_MDM6600_NR_STATUS_LINES; i++) {
                val |= values[i] << i;
                dev_dbg(ddata->dev, "XXX %s: i: %i values[i]: %i val: %i\n",
                        __func__, i, values[i], val);
index 76243caa08c630c064ebd674f566089bea1fc4ba..b5c880b50bb371f5fb5eeddcd0799cd9d7057289 100644 (file)
@@ -333,7 +333,7 @@ static int owl_pin_config_set(struct pinctrl_dev *pctrldev,
        unsigned long flags;
        unsigned int param;
        u32 reg, bit, width, arg;
-       int ret, i;
+       int ret = 0, i;
 
        info = &pctrl->soc->padinfo[pin];
 
index 35c17653c694767c8e13d1a8d02d7d91718d23b3..87618a4e90e451f2834214a337ce81e12de560fc 100644 (file)
@@ -460,8 +460,8 @@ static int nsp_pinmux_enable(struct pinctrl_dev *pctrl_dev,
        const struct nsp_pin_function *func;
        const struct nsp_pin_group *grp;
 
-       if (grp_select > pinctrl->num_groups ||
-               func_select > pinctrl->num_functions)
+       if (grp_select >= pinctrl->num_groups ||
+           func_select >= pinctrl->num_functions)
                return -EINVAL;
 
        func = &pinctrl->functions[func_select];
@@ -577,6 +577,8 @@ static int nsp_pinmux_probe(struct platform_device *pdev)
                return PTR_ERR(pinctrl->base0);
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+       if (!res)
+               return -EINVAL;
        pinctrl->base1 = devm_ioremap_nocache(&pdev->dev, res->start,
                                              resource_size(res));
        if (!pinctrl->base1) {
index b601039d6c69a28d771eff622f0001d70bf84204..c4aa411f5935b7b0275c004a3924ffda3613630a 100644 (file)
@@ -101,10 +101,11 @@ struct pinctrl_dev *of_pinctrl_get(struct device_node *np)
 }
 
 static int dt_to_map_one_config(struct pinctrl *p,
-                               struct pinctrl_dev *pctldev,
+                               struct pinctrl_dev *hog_pctldev,
                                const char *statename,
                                struct device_node *np_config)
 {
+       struct pinctrl_dev *pctldev = NULL;
        struct device_node *np_pctldev;
        const struct pinctrl_ops *ops;
        int ret;
@@ -123,8 +124,10 @@ static int dt_to_map_one_config(struct pinctrl *p,
                        return -EPROBE_DEFER;
                }
                /* If we're creating a hog we can use the passed pctldev */
-               if (pctldev && (np_pctldev == p->dev->of_node))
+               if (hog_pctldev && (np_pctldev == p->dev->of_node)) {
+                       pctldev = hog_pctldev;
                        break;
+               }
                pctldev = get_pinctrl_dev_from_of_node(np_pctldev);
                if (pctldev)
                        break;
index ad6da1184c9f0b1117275df587f502d78abbe904..4c4740ffeb9ca0807f63d77271ad619407155c3b 100644 (file)
@@ -1424,7 +1424,7 @@ static struct pinctrl_desc mtk_desc = {
 
 static int mtk_gpio_get(struct gpio_chip *chip, unsigned int gpio)
 {
-       struct mtk_pinctrl *hw = dev_get_drvdata(chip->parent);
+       struct mtk_pinctrl *hw = gpiochip_get_data(chip);
        int value, err;
 
        err = mtk_hw_get_value(hw, gpio, PINCTRL_PIN_REG_DI, &value);
@@ -1436,7 +1436,7 @@ static int mtk_gpio_get(struct gpio_chip *chip, unsigned int gpio)
 
 static void mtk_gpio_set(struct gpio_chip *chip, unsigned int gpio, int value)
 {
-       struct mtk_pinctrl *hw = dev_get_drvdata(chip->parent);
+       struct mtk_pinctrl *hw = gpiochip_get_data(chip);
 
        mtk_hw_set_value(hw, gpio, PINCTRL_PIN_REG_DO, !!value);
 }
@@ -1459,6 +1459,9 @@ static int mtk_gpio_to_irq(struct gpio_chip *chip, unsigned int offset)
        struct mtk_pinctrl *hw = gpiochip_get_data(chip);
        unsigned long eint_n;
 
+       if (!hw->eint)
+               return -ENOTSUPP;
+
        eint_n = offset;
 
        return mtk_eint_find_irq(hw->eint, eint_n);
@@ -1471,7 +1474,8 @@ static int mtk_gpio_set_config(struct gpio_chip *chip, unsigned int offset,
        unsigned long eint_n;
        u32 debounce;
 
-       if (pinconf_to_config_param(config) != PIN_CONFIG_INPUT_DEBOUNCE)
+       if (!hw->eint ||
+           pinconf_to_config_param(config) != PIN_CONFIG_INPUT_DEBOUNCE)
                return -ENOTSUPP;
 
        debounce = pinconf_to_config_argument(config);
@@ -1504,11 +1508,20 @@ static int mtk_build_gpiochip(struct mtk_pinctrl *hw, struct device_node *np)
        if (ret < 0)
                return ret;
 
-       ret = gpiochip_add_pin_range(chip, dev_name(hw->dev), 0, 0,
-                                    chip->ngpio);
-       if (ret < 0) {
-               gpiochip_remove(chip);
-               return ret;
+       /* Just for backward compatible for these old pinctrl nodes without
+        * "gpio-ranges" property. Otherwise, called directly from a
+        * DeviceTree-supported pinctrl driver is DEPRECATED.
+        * Please see Section 2.1 of
+        * Documentation/devicetree/bindings/gpio/gpio.txt on how to
+        * bind pinctrl and gpio drivers via the "gpio-ranges" property.
+        */
+       if (!of_find_property(np, "gpio-ranges", NULL)) {
+               ret = gpiochip_add_pin_range(chip, dev_name(hw->dev), 0, 0,
+                                            chip->ngpio);
+               if (ret < 0) {
+                       gpiochip_remove(chip);
+                       return ret;
+               }
        }
 
        return 0;
@@ -1691,15 +1704,16 @@ static int mtk_pinctrl_probe(struct platform_device *pdev)
        mtk_desc.custom_conf_items = mtk_conf_items;
 #endif
 
-       hw->pctrl = devm_pinctrl_register(&pdev->dev, &mtk_desc, hw);
-       if (IS_ERR(hw->pctrl))
-               return PTR_ERR(hw->pctrl);
+       err = devm_pinctrl_register_and_init(&pdev->dev, &mtk_desc, hw,
+                                            &hw->pctrl);
+       if (err)
+               return err;
 
        /* Setup groups descriptions per SoC types */
        err = mtk_build_groups(hw);
        if (err) {
                dev_err(&pdev->dev, "Failed to build groups\n");
-               return 0;
+               return err;
        }
 
        /* Setup functions descriptions per SoC types */
@@ -1709,17 +1723,25 @@ static int mtk_pinctrl_probe(struct platform_device *pdev)
                return err;
        }
 
-       err = mtk_build_gpiochip(hw, pdev->dev.of_node);
-       if (err) {
-               dev_err(&pdev->dev, "Failed to add gpio_chip\n");
+       /* For able to make pinctrl_claim_hogs, we must not enable pinctrl
+        * until all groups and functions are being added one.
+        */
+       err = pinctrl_enable(hw->pctrl);
+       if (err)
                return err;
-       }
 
        err = mtk_build_eint(hw, pdev);
        if (err)
                dev_warn(&pdev->dev,
                         "Failed to add EINT, but pinctrl still can work\n");
 
+       /* Build gpiochip should be after pinctrl_enable is done */
+       err = mtk_build_gpiochip(hw, pdev->dev.of_node);
+       if (err) {
+               dev_err(&pdev->dev, "Failed to add gpio_chip\n");
+               return err;
+       }
+
        platform_set_drvdata(pdev, hw);
 
        return 0;
index b3799695d8db8264ce917232cb3b1439793fe845..16ff56f93501794edb33231b770afa3dc5d55b73 100644 (file)
@@ -1000,11 +1000,6 @@ static int mtk_eint_init(struct mtk_pinctrl *pctl, struct platform_device *pdev)
                return -ENOMEM;
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!res) {
-               dev_err(&pdev->dev, "Unable to get eint resource\n");
-               return -ENODEV;
-       }
-
        pctl->eint->base = devm_ioremap_resource(&pdev->dev, res);
        if (IS_ERR(pctl->eint->base))
                return PTR_ERR(pctl->eint->base);
index a1d7156d0a43ad49ac6312ab67b67a7312ad9e10..6a1b6058b9910269c60c448cbca3350bce399af6 100644 (file)
@@ -536,7 +536,7 @@ static int ingenic_pinmux_gpio_set_direction(struct pinctrl_dev *pctldev,
                ingenic_config_pin(jzpc, pin, JZ4770_GPIO_PAT1, input);
        } else {
                ingenic_config_pin(jzpc, pin, JZ4740_GPIO_SELECT, false);
-               ingenic_config_pin(jzpc, pin, JZ4740_GPIO_DIR, input);
+               ingenic_config_pin(jzpc, pin, JZ4740_GPIO_DIR, !input);
                ingenic_config_pin(jzpc, pin, JZ4740_GPIO_FUNC, false);
        }
 
index b3153c095199d3bed84d7b846432fa3e783c08f0..e5647dac0818d46353629a543733fe0af804210b 100644 (file)
@@ -1590,8 +1590,11 @@ static int pcs_save_context(struct pcs_device *pcs)
 
        mux_bytes = pcs->width / BITS_PER_BYTE;
 
-       if (!pcs->saved_vals)
+       if (!pcs->saved_vals) {
                pcs->saved_vals = devm_kzalloc(pcs->dev, pcs->size, GFP_ATOMIC);
+               if (!pcs->saved_vals)
+                       return -ENOMEM;
+       }
 
        switch (pcs->width) {
        case 64:
@@ -1651,8 +1654,13 @@ static int pinctrl_single_suspend(struct platform_device *pdev,
        if (!pcs)
                return -EINVAL;
 
-       if (pcs->flags & PCS_CONTEXT_LOSS_OFF)
-               pcs_save_context(pcs);
+       if (pcs->flags & PCS_CONTEXT_LOSS_OFF) {
+               int ret;
+
+               ret = pcs_save_context(pcs);
+               if (ret < 0)
+                       return ret;
+       }
 
        return pinctrl_force_sleep(pcs->pctl);
 }
index b02caf31671186d97ea194e612b94adeb70335f4..eeb58b3bbc9a0cef4b47f65c4b8855f683e9f7e0 100644 (file)
 #include "core.h"
 #include "sh_pfc.h"
 
-#define CFG_FLAGS SH_PFC_PIN_CFG_DRIVE_STRENGTH
-
 #define CPU_ALL_PORT(fn, sfx)                                          \
-       PORT_GP_CFG_22(0, fn, sfx, CFG_FLAGS | SH_PFC_PIN_CFG_IO_VOLTAGE), \
-       PORT_GP_CFG_28(1, fn, sfx, CFG_FLAGS),                          \
-       PORT_GP_CFG_17(2, fn, sfx, CFG_FLAGS | SH_PFC_PIN_CFG_IO_VOLTAGE), \
-       PORT_GP_CFG_17(3, fn, sfx, CFG_FLAGS | SH_PFC_PIN_CFG_IO_VOLTAGE), \
-       PORT_GP_CFG_6(4,  fn, sfx, CFG_FLAGS),                          \
-       PORT_GP_CFG_15(5, fn, sfx, CFG_FLAGS)
+       PORT_GP_CFG_22(0, fn, sfx, SH_PFC_PIN_CFG_IO_VOLTAGE),          \
+       PORT_GP_28(1, fn, sfx),                                         \
+       PORT_GP_CFG_17(2, fn, sfx, SH_PFC_PIN_CFG_IO_VOLTAGE),          \
+       PORT_GP_CFG_17(3, fn, sfx, SH_PFC_PIN_CFG_IO_VOLTAGE),          \
+       PORT_GP_6(4,  fn, sfx),                                         \
+       PORT_GP_15(5, fn, sfx)
 /*
  * F_() : just information
  * FM() : macro for FN_xxx / xxx_MARK
index 322de58eebaf57a0d15690a5c0805209093f4c5b..f66521c7f8462b3ec438f1b07beae4f0efd487cc 100644 (file)
@@ -30,7 +30,8 @@ int loongson3_cpu_temp(int cpu)
        case PRID_REV_LOONGSON3B_R2:
                reg = ((reg >> 8) & 0xff) - 100;
                break;
-       case PRID_REV_LOONGSON3A_R3:
+       case PRID_REV_LOONGSON3A_R3_0:
+       case PRID_REV_LOONGSON3A_R3_1:
                reg = (reg & 0xffff)*731/0x4000 - 273;
                break;
        }
index f1fa8612db406168f53db3d71a025255c0622af0..06978c14c83b23c5e35c4cb721863903a736486d 100644 (file)
@@ -2185,7 +2185,7 @@ static int __init dell_init(void)
                dell_fill_request(&buffer, token->location, 0, 0, 0);
                ret = dell_send_request(&buffer,
                                        CLASS_TOKEN_READ, SELECT_TOKEN_AC);
-               if (ret)
+               if (ret == 0)
                        max_intensity = buffer.output[3];
        }
 
index 767c485af59b2ee0583242b7dbf31581f76cb91a..01b0e2bb33190c78fb3818e34d5aebf4f60b2832 100644 (file)
@@ -89,6 +89,7 @@ int ptp_set_pinfunc(struct ptp_clock *ptp, unsigned int pin,
        case PTP_PF_PHYSYNC:
                if (chan != 0)
                        return -EINVAL;
+               break;
        default:
                return -EINVAL;
        }
@@ -221,7 +222,7 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
                }
                pct = &sysoff->ts[0];
                for (i = 0; i < sysoff->n_samples; i++) {
-                       getnstimeofday64(&ts);
+                       ktime_get_real_ts64(&ts);
                        pct->sec = ts.tv_sec;
                        pct->nsec = ts.tv_nsec;
                        pct++;
@@ -230,7 +231,7 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
                        pct->nsec = ts.tv_nsec;
                        pct++;
                }
-               getnstimeofday64(&ts);
+               ktime_get_real_ts64(&ts);
                pct->sec = ts.tv_sec;
                pct->nsec = ts.tv_nsec;
                if (copy_to_user((void __user *)arg, sysoff, sizeof(*sysoff)))
index 1468a1642b4978f5048ccace2af1846baf9c51e9..e8652c148c5223d24c67089539b3bb4e861e42d5 100644 (file)
@@ -374,7 +374,7 @@ static int qoriq_ptp_probe(struct platform_device *dev)
                pr_err("ioremap ptp registers failed\n");
                goto no_ioremap;
        }
-       getnstimeofday64(&now);
+       ktime_get_real_ts64(&now);
        ptp_qoriq_settime(&qoriq_ptp->caps, &now);
 
        tmr_ctrl =
index 6d4012dd69221a1ebc4b72866824be8a95f29468..bac1eeb3d31204d9e99a93e1e682972b5f7177bb 100644 (file)
@@ -265,8 +265,10 @@ int __rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
                        return err;
 
                /* full-function RTCs won't have such missing fields */
-               if (rtc_valid_tm(&alarm->time) == 0)
+               if (rtc_valid_tm(&alarm->time) == 0) {
+                       rtc_add_offset(rtc, &alarm->time);
                        return 0;
+               }
 
                /* get the "after" timestamp, to detect wrapped fields */
                err = rtc_read_time(rtc, &now);
@@ -409,7 +411,6 @@ static int __rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
        if (err)
                return err;
 
-       rtc_subtract_offset(rtc, &alarm->time);
        scheduled = rtc_tm_to_time64(&alarm->time);
 
        /* Make sure we're not setting alarms in the past */
@@ -426,6 +427,8 @@ static int __rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
         * over right here, before we set the alarm.
         */
 
+       rtc_subtract_offset(rtc, &alarm->time);
+
        if (!rtc->ops)
                err = -ENODEV;
        else if (!rtc->ops->set_alarm)
@@ -467,7 +470,6 @@ int rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
 
        mutex_unlock(&rtc->ops_lock);
 
-       rtc_add_offset(rtc, &alarm->time);
        return err;
 }
 EXPORT_SYMBOL_GPL(rtc_set_alarm);
index 097a4d4e2aba1e947ceaae3c3a7651a56927dac5..1925aaf09093713326553740db6db4358eb9fb51 100644 (file)
@@ -367,10 +367,8 @@ static int vrtc_mrst_do_probe(struct device *dev, struct resource *iomem,
        }
 
        retval = rtc_register_device(mrst_rtc.rtc);
-       if (retval) {
-               retval = PTR_ERR(mrst_rtc.rtc);
+       if (retval)
                goto cleanup0;
-       }
 
        dev_dbg(dev, "initialised\n");
        return 0;
index 73cce3ecb97fefbccc66266a4fd29f08e453079e..a23e7d394a0ad1f1a74a241f1676accb9a57901a 100644 (file)
 
 #define DASD_DIAG_MOD          "dasd_diag_mod"
 
+static unsigned int queue_depth = 32;
+static unsigned int nr_hw_queues = 4;
+
+module_param(queue_depth, uint, 0444);
+MODULE_PARM_DESC(queue_depth, "Default queue depth for new DASD devices");
+
+module_param(nr_hw_queues, uint, 0444);
+MODULE_PARM_DESC(nr_hw_queues, "Default number of hardware queues for new DASD devices");
+
 /*
  * SECTION: exported variables of dasd.c
  */
@@ -64,8 +73,8 @@ static int  dasd_alloc_queue(struct dasd_block *);
 static void dasd_setup_queue(struct dasd_block *);
 static void dasd_free_queue(struct dasd_block *);
 static int dasd_flush_block_queue(struct dasd_block *);
-static void dasd_device_tasklet(struct dasd_device *);
-static void dasd_block_tasklet(struct dasd_block *);
+static void dasd_device_tasklet(unsigned long);
+static void dasd_block_tasklet(unsigned long);
 static void do_kick_device(struct work_struct *);
 static void do_restore_device(struct work_struct *);
 static void do_reload_device(struct work_struct *);
@@ -116,8 +125,7 @@ struct dasd_device *dasd_alloc_device(void)
        dasd_init_chunklist(&device->erp_chunks, device->erp_mem, PAGE_SIZE);
        spin_lock_init(&device->mem_lock);
        atomic_set(&device->tasklet_scheduled, 0);
-       tasklet_init(&device->tasklet,
-                    (void (*)(unsigned long)) dasd_device_tasklet,
+       tasklet_init(&device->tasklet, dasd_device_tasklet,
                     (unsigned long) device);
        INIT_LIST_HEAD(&device->ccw_queue);
        timer_setup(&device->timer, dasd_device_timeout, 0);
@@ -157,8 +165,7 @@ struct dasd_block *dasd_alloc_block(void)
        atomic_set(&block->open_count, -1);
 
        atomic_set(&block->tasklet_scheduled, 0);
-       tasklet_init(&block->tasklet,
-                    (void (*)(unsigned long)) dasd_block_tasklet,
+       tasklet_init(&block->tasklet, dasd_block_tasklet,
                     (unsigned long) block);
        INIT_LIST_HEAD(&block->ccw_queue);
        spin_lock_init(&block->queue_lock);
@@ -1222,80 +1229,37 @@ static void dasd_hosts_init(struct dentry *base_dentry,
                device->hosts_dentry = pde;
 }
 
-/*
- * Allocate memory for a channel program with 'cplength' channel
- * command words and 'datasize' additional space. There are two
- * variantes: 1) dasd_kmalloc_request uses kmalloc to get the needed
- * memory and 2) dasd_smalloc_request uses the static ccw memory
- * that gets allocated for each device.
- */
-struct dasd_ccw_req *dasd_kmalloc_request(int magic, int cplength,
-                                         int datasize,
-                                         struct dasd_device *device)
-{
-       struct dasd_ccw_req *cqr;
-
-       /* Sanity checks */
-       BUG_ON(datasize > PAGE_SIZE ||
-            (cplength*sizeof(struct ccw1)) > PAGE_SIZE);
-
-       cqr = kzalloc(sizeof(struct dasd_ccw_req), GFP_ATOMIC);
-       if (cqr == NULL)
-               return ERR_PTR(-ENOMEM);
-       cqr->cpaddr = NULL;
-       if (cplength > 0) {
-               cqr->cpaddr = kcalloc(cplength, sizeof(struct ccw1),
-                                     GFP_ATOMIC | GFP_DMA);
-               if (cqr->cpaddr == NULL) {
-                       kfree(cqr);
-                       return ERR_PTR(-ENOMEM);
-               }
-       }
-       cqr->data = NULL;
-       if (datasize > 0) {
-               cqr->data = kzalloc(datasize, GFP_ATOMIC | GFP_DMA);
-               if (cqr->data == NULL) {
-                       kfree(cqr->cpaddr);
-                       kfree(cqr);
-                       return ERR_PTR(-ENOMEM);
-               }
-       }
-       cqr->magic =  magic;
-       set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
-       dasd_get_device(device);
-       return cqr;
-}
-EXPORT_SYMBOL(dasd_kmalloc_request);
-
-struct dasd_ccw_req *dasd_smalloc_request(int magic, int cplength,
-                                         int datasize,
-                                         struct dasd_device *device)
+struct dasd_ccw_req *dasd_smalloc_request(int magic, int cplength, int datasize,
+                                         struct dasd_device *device,
+                                         struct dasd_ccw_req *cqr)
 {
        unsigned long flags;
-       struct dasd_ccw_req *cqr;
-       char *data;
-       int size;
+       char *data, *chunk;
+       int size = 0;
 
-       size = (sizeof(struct dasd_ccw_req) + 7L) & -8L;
        if (cplength > 0)
                size += cplength * sizeof(struct ccw1);
        if (datasize > 0)
                size += datasize;
+       if (!cqr)
+               size += (sizeof(*cqr) + 7L) & -8L;
+
        spin_lock_irqsave(&device->mem_lock, flags);
-       cqr = (struct dasd_ccw_req *)
-               dasd_alloc_chunk(&device->ccw_chunks, size);
+       data = chunk = dasd_alloc_chunk(&device->ccw_chunks, size);
        spin_unlock_irqrestore(&device->mem_lock, flags);
-       if (cqr == NULL)
+       if (!chunk)
                return ERR_PTR(-ENOMEM);
-       memset(cqr, 0, sizeof(struct dasd_ccw_req));
-       data = (char *) cqr + ((sizeof(struct dasd_ccw_req) + 7L) & -8L);
-       cqr->cpaddr = NULL;
+       if (!cqr) {
+               cqr = (void *) data;
+               data += (sizeof(*cqr) + 7L) & -8L;
+       }
+       memset(cqr, 0, sizeof(*cqr));
+       cqr->mem_chunk = chunk;
        if (cplength > 0) {
-               cqr->cpaddr = (struct ccw1 *) data;
-               data += cplength*sizeof(struct ccw1);
-               memset(cqr->cpaddr, 0, cplength*sizeof(struct ccw1));
+               cqr->cpaddr = data;
+               data += cplength * sizeof(struct ccw1);
+               memset(cqr->cpaddr, 0, cplength * sizeof(struct ccw1));
        }
-       cqr->data = NULL;
        if (datasize > 0) {
                cqr->data = data;
                memset(cqr->data, 0, datasize);
@@ -1307,33 +1271,12 @@ struct dasd_ccw_req *dasd_smalloc_request(int magic, int cplength,
 }
 EXPORT_SYMBOL(dasd_smalloc_request);
 
-/*
- * Free memory of a channel program. This function needs to free all the
- * idal lists that might have been created by dasd_set_cda and the
- * struct dasd_ccw_req itself.
- */
-void dasd_kfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device)
-{
-       struct ccw1 *ccw;
-
-       /* Clear any idals used for the request. */
-       ccw = cqr->cpaddr;
-       do {
-               clear_normalized_cda(ccw);
-       } while (ccw++->flags & (CCW_FLAG_CC | CCW_FLAG_DC));
-       kfree(cqr->cpaddr);
-       kfree(cqr->data);
-       kfree(cqr);
-       dasd_put_device(device);
-}
-EXPORT_SYMBOL(dasd_kfree_request);
-
 void dasd_sfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device)
 {
        unsigned long flags;
 
        spin_lock_irqsave(&device->mem_lock, flags);
-       dasd_free_chunk(&device->ccw_chunks, cqr);
+       dasd_free_chunk(&device->ccw_chunks, cqr->mem_chunk);
        spin_unlock_irqrestore(&device->mem_lock, flags);
        dasd_put_device(device);
 }
@@ -1885,6 +1828,33 @@ static void __dasd_device_process_ccw_queue(struct dasd_device *device,
        }
 }
 
+static void __dasd_process_cqr(struct dasd_device *device,
+                              struct dasd_ccw_req *cqr)
+{
+       char errorstring[ERRORLENGTH];
+
+       switch (cqr->status) {
+       case DASD_CQR_SUCCESS:
+               cqr->status = DASD_CQR_DONE;
+               break;
+       case DASD_CQR_ERROR:
+               cqr->status = DASD_CQR_NEED_ERP;
+               break;
+       case DASD_CQR_CLEARED:
+               cqr->status = DASD_CQR_TERMINATED;
+               break;
+       default:
+               /* internal error 12 - wrong cqr status*/
+               snprintf(errorstring, ERRORLENGTH, "12 %p %x02", cqr, cqr->status);
+               dev_err(&device->cdev->dev,
+                       "An error occurred in the DASD device driver, "
+                       "reason=%s\n", errorstring);
+               BUG();
+       }
+       if (cqr->callback)
+               cqr->callback(cqr, cqr->callback_data);
+}
+
 /*
  * the cqrs from the final queue are returned to the upper layer
  * by setting a dasd_block state and calling the callback function
@@ -1895,40 +1865,18 @@ static void __dasd_device_process_final_queue(struct dasd_device *device,
        struct list_head *l, *n;
        struct dasd_ccw_req *cqr;
        struct dasd_block *block;
-       void (*callback)(struct dasd_ccw_req *, void *data);
-       void *callback_data;
-       char errorstring[ERRORLENGTH];
 
        list_for_each_safe(l, n, final_queue) {
                cqr = list_entry(l, struct dasd_ccw_req, devlist);
                list_del_init(&cqr->devlist);
                block = cqr->block;
-               callback = cqr->callback;
-               callback_data = cqr->callback_data;
-               if (block)
+               if (!block) {
+                       __dasd_process_cqr(device, cqr);
+               } else {
                        spin_lock_bh(&block->queue_lock);
-               switch (cqr->status) {
-               case DASD_CQR_SUCCESS:
-                       cqr->status = DASD_CQR_DONE;
-                       break;
-               case DASD_CQR_ERROR:
-                       cqr->status = DASD_CQR_NEED_ERP;
-                       break;
-               case DASD_CQR_CLEARED:
-                       cqr->status = DASD_CQR_TERMINATED;
-                       break;
-               default:
-                       /* internal error 12 - wrong cqr status*/
-                       snprintf(errorstring, ERRORLENGTH, "12 %p %x02", cqr, cqr->status);
-                       dev_err(&device->cdev->dev,
-                               "An error occurred in the DASD device driver, "
-                               "reason=%s\n", errorstring);
-                       BUG();
-               }
-               if (cqr->callback != NULL)
-                       (callback)(cqr, callback_data);
-               if (block)
+                       __dasd_process_cqr(device, cqr);
                        spin_unlock_bh(&block->queue_lock);
+               }
        }
 }
 
@@ -2114,8 +2062,9 @@ EXPORT_SYMBOL_GPL(dasd_flush_device_queue);
 /*
  * Acquire the device lock and process queues for the device.
  */
-static void dasd_device_tasklet(struct dasd_device *device)
+static void dasd_device_tasklet(unsigned long data)
 {
+       struct dasd_device *device = (struct dasd_device *) data;
        struct list_head final_queue;
 
        atomic_set (&device->tasklet_scheduled, 0);
@@ -2833,8 +2782,9 @@ static void __dasd_block_start_head(struct dasd_block *block)
  * block layer request queue, creates ccw requests, enqueues them on
  * a dasd_device and processes ccw requests that have been returned.
  */
-static void dasd_block_tasklet(struct dasd_block *block)
+static void dasd_block_tasklet(unsigned long data)
 {
+       struct dasd_block *block = (struct dasd_block *) data;
        struct list_head final_queue;
        struct list_head *l, *n;
        struct dasd_ccw_req *cqr;
@@ -3041,7 +2991,6 @@ static blk_status_t do_dasd_request(struct blk_mq_hw_ctx *hctx,
        cqr->callback_data = req;
        cqr->status = DASD_CQR_FILLED;
        cqr->dq = dq;
-       *((struct dasd_ccw_req **) blk_mq_rq_to_pdu(req)) = cqr;
 
        blk_mq_start_request(req);
        spin_lock(&block->queue_lock);
@@ -3072,7 +3021,7 @@ enum blk_eh_timer_return dasd_times_out(struct request *req, bool reserved)
        unsigned long flags;
        int rc = 0;
 
-       cqr = *((struct dasd_ccw_req **) blk_mq_rq_to_pdu(req));
+       cqr = blk_mq_rq_to_pdu(req);
        if (!cqr)
                return BLK_EH_DONE;
 
@@ -3174,10 +3123,11 @@ static int dasd_alloc_queue(struct dasd_block *block)
        int rc;
 
        block->tag_set.ops = &dasd_mq_ops;
-       block->tag_set.cmd_size = sizeof(struct dasd_ccw_req *);
-       block->tag_set.nr_hw_queues = DASD_NR_HW_QUEUES;
-       block->tag_set.queue_depth = DASD_MAX_LCU_DEV * DASD_REQ_PER_DEV;
+       block->tag_set.cmd_size = sizeof(struct dasd_ccw_req);
+       block->tag_set.nr_hw_queues = nr_hw_queues;
+       block->tag_set.queue_depth = queue_depth;
        block->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
+       block->tag_set.numa_node = NUMA_NO_NODE;
 
        rc = blk_mq_alloc_tag_set(&block->tag_set);
        if (rc)
@@ -4038,7 +3988,8 @@ static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device,
        struct ccw1 *ccw;
        unsigned long *idaw;
 
-       cqr = dasd_smalloc_request(magic, 1 /* RDC */, rdc_buffer_size, device);
+       cqr = dasd_smalloc_request(magic, 1 /* RDC */, rdc_buffer_size, device,
+                                  NULL);
 
        if (IS_ERR(cqr)) {
                /* internal error 13 - Allocating the RDC request failed*/
index 5e963fe0e38d4c2125c43ae801ca7e9b28d98d07..b9ce93e9df89295eb72132fcfc81d0257aaa1723 100644 (file)
@@ -407,9 +407,9 @@ static int read_unit_address_configuration(struct dasd_device *device,
        int rc;
        unsigned long flags;
 
-       cqr = dasd_kmalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
+       cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
                                   (sizeof(struct dasd_psf_prssd_data)),
-                                  device);
+                                  device, NULL);
        if (IS_ERR(cqr))
                return PTR_ERR(cqr);
        cqr->startdev = device;
@@ -457,7 +457,7 @@ static int read_unit_address_configuration(struct dasd_device *device,
                lcu->flags |= NEED_UAC_UPDATE;
                spin_unlock_irqrestore(&lcu->lock, flags);
        }
-       dasd_kfree_request(cqr, cqr->memdev);
+       dasd_sfree_request(cqr, cqr->memdev);
        return rc;
 }
 
@@ -708,7 +708,7 @@ static int reset_summary_unit_check(struct alias_lcu *lcu,
        struct ccw1 *ccw;
 
        cqr = lcu->rsu_cqr;
-       strncpy((char *) &cqr->magic, "ECKD", 4);
+       memcpy((char *) &cqr->magic, "ECKD", 4);
        ASCEBC((char *) &cqr->magic, 4);
        ccw = cqr->cpaddr;
        ccw->cmd_code = DASD_ECKD_CCW_RSCK;
index b9ebb565ee2c70aa4672d539a4ac2d2236dc7fa8..fab35c6170cc81ab067b7573ae501e6781dcf292 100644 (file)
@@ -426,7 +426,7 @@ dasd_add_busid(const char *bus_id, int features)
        if (!devmap) {
                /* This bus_id is new. */
                new->devindex = dasd_max_devindex++;
-               strncpy(new->bus_id, bus_id, DASD_BUS_ID_SIZE);
+               strlcpy(new->bus_id, bus_id, DASD_BUS_ID_SIZE);
                new->features = features;
                new->device = NULL;
                list_add(&new->list, &dasd_hashlists[hash]);
index 131f1989f6f3dff0345250c71943f5ac338af19c..e1fe02477ea8fca951232dabe7f89754c8f287ff 100644 (file)
@@ -536,7 +536,8 @@ static struct dasd_ccw_req *dasd_diag_build_cp(struct dasd_device *memdev,
        /* Build the request */
        datasize = sizeof(struct dasd_diag_req) +
                count*sizeof(struct dasd_diag_bio);
-       cqr = dasd_smalloc_request(DASD_DIAG_MAGIC, 0, datasize, memdev);
+       cqr = dasd_smalloc_request(DASD_DIAG_MAGIC, 0, datasize, memdev,
+                                  blk_mq_rq_to_pdu(req));
        if (IS_ERR(cqr))
                return cqr;
 
index be208e7adcb46087e7fb2436fadf8a737d7c472e..4e7b55a14b1a46d40920fde48460fa2400c46f84 100644 (file)
@@ -886,7 +886,7 @@ static int dasd_eckd_read_conf_lpm(struct dasd_device *device,
        }
        cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* RCD */,
                                   0, /* use rcd_buf as data ara */
-                                  device);
+                                  device, NULL);
        if (IS_ERR(cqr)) {
                DBF_DEV_EVENT(DBF_WARNING, device, "%s",
                              "Could not allocate RCD request");
@@ -1442,7 +1442,7 @@ static int dasd_eckd_read_features(struct dasd_device *device)
        cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
                                   (sizeof(struct dasd_psf_prssd_data) +
                                    sizeof(struct dasd_rssd_features)),
-                                  device);
+                                  device, NULL);
        if (IS_ERR(cqr)) {
                DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", "Could not "
                                "allocate initialization request");
@@ -1504,7 +1504,7 @@ static struct dasd_ccw_req *dasd_eckd_build_psf_ssc(struct dasd_device *device,
 
        cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ ,
                                  sizeof(struct dasd_psf_ssc_data),
-                                 device);
+                                  device, NULL);
 
        if (IS_ERR(cqr)) {
                DBF_DEV_EVENT(DBF_WARNING, device, "%s",
@@ -1780,6 +1780,9 @@ static void dasd_eckd_uncheck_device(struct dasd_device *device)
        struct dasd_eckd_private *private = device->private;
        int i;
 
+       if (!private)
+               return;
+
        dasd_alias_disconnect_device_from_lcu(device);
        private->ned = NULL;
        private->sneq = NULL;
@@ -1815,7 +1818,8 @@ dasd_eckd_analysis_ccw(struct dasd_device *device)
 
        cplength = 8;
        datasize = sizeof(struct DE_eckd_data) + 2*sizeof(struct LO_eckd_data);
-       cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize, device);
+       cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize, device,
+                                  NULL);
        if (IS_ERR(cqr))
                return cqr;
        ccw = cqr->cpaddr;
@@ -2034,8 +2038,11 @@ static int dasd_eckd_basic_to_ready(struct dasd_device *device)
 
 static int dasd_eckd_online_to_ready(struct dasd_device *device)
 {
-       cancel_work_sync(&device->reload_device);
-       cancel_work_sync(&device->kick_validate);
+       if (cancel_work_sync(&device->reload_device))
+               dasd_put_device(device);
+       if (cancel_work_sync(&device->kick_validate))
+               dasd_put_device(device);
+
        return 0;
 };
 
@@ -2092,7 +2099,8 @@ dasd_eckd_build_check_tcw(struct dasd_device *base, struct format_data_t *fdata,
         */
        itcw_size = itcw_calc_size(0, count, 0);
 
-       cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 0, itcw_size, startdev);
+       cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 0, itcw_size, startdev,
+                                  NULL);
        if (IS_ERR(cqr))
                return cqr;
 
@@ -2186,7 +2194,7 @@ dasd_eckd_build_check(struct dasd_device *base, struct format_data_t *fdata,
        cplength += count;
 
        cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize,
-                                 startdev);
+                                  startdev, NULL);
        if (IS_ERR(cqr))
                return cqr;
 
@@ -2332,7 +2340,7 @@ dasd_eckd_build_format(struct dasd_device *base,
        }
        /* Allocate the format ccw request. */
        fcp = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength,
-                                  datasize, startdev);
+                                  datasize, startdev, NULL);
        if (IS_ERR(fcp))
                return fcp;
 
@@ -3103,7 +3111,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single(
        }
        /* Allocate the ccw request. */
        cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize,
-                                  startdev);
+                                  startdev, blk_mq_rq_to_pdu(req));
        if (IS_ERR(cqr))
                return cqr;
        ccw = cqr->cpaddr;
@@ -3262,7 +3270,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track(
 
        /* Allocate the ccw request. */
        cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize,
-                                  startdev);
+                                  startdev, blk_mq_rq_to_pdu(req));
        if (IS_ERR(cqr))
                return cqr;
        ccw = cqr->cpaddr;
@@ -3533,7 +3541,7 @@ static int prepare_itcw(struct itcw *itcw,
 
        dcw = itcw_add_dcw(itcw, pfx_cmd, 0,
                     &pfxdata, sizeof(pfxdata), total_data_size);
-       return PTR_RET(dcw);
+       return PTR_ERR_OR_ZERO(dcw);
 }
 
 static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track(
@@ -3595,7 +3603,8 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track(
 
        /* Allocate the ccw request. */
        itcw_size = itcw_calc_size(0, ctidaw, 0);
-       cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 0, itcw_size, startdev);
+       cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 0, itcw_size, startdev,
+                                  blk_mq_rq_to_pdu(req));
        if (IS_ERR(cqr))
                return cqr;
 
@@ -3862,7 +3871,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_raw(struct dasd_device *startdev,
 
        /* Allocate the ccw request. */
        cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength,
-                                  datasize, startdev);
+                                  datasize, startdev, blk_mq_rq_to_pdu(req));
        if (IS_ERR(cqr))
                return cqr;
 
@@ -4102,7 +4111,7 @@ dasd_eckd_release(struct dasd_device *device)
                return -EACCES;
 
        useglobal = 0;
-       cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device);
+       cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device, NULL);
        if (IS_ERR(cqr)) {
                mutex_lock(&dasd_reserve_mutex);
                useglobal = 1;
@@ -4157,7 +4166,7 @@ dasd_eckd_reserve(struct dasd_device *device)
                return -EACCES;
 
        useglobal = 0;
-       cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device);
+       cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device, NULL);
        if (IS_ERR(cqr)) {
                mutex_lock(&dasd_reserve_mutex);
                useglobal = 1;
@@ -4211,7 +4220,7 @@ dasd_eckd_steal_lock(struct dasd_device *device)
                return -EACCES;
 
        useglobal = 0;
-       cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device);
+       cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device, NULL);
        if (IS_ERR(cqr)) {
                mutex_lock(&dasd_reserve_mutex);
                useglobal = 1;
@@ -4271,7 +4280,8 @@ static int dasd_eckd_snid(struct dasd_device *device,
 
        useglobal = 0;
        cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1,
-                                  sizeof(struct dasd_snid_data), device);
+                                  sizeof(struct dasd_snid_data), device,
+                                  NULL);
        if (IS_ERR(cqr)) {
                mutex_lock(&dasd_reserve_mutex);
                useglobal = 1;
@@ -4331,7 +4341,7 @@ dasd_eckd_performance(struct dasd_device *device, void __user *argp)
        cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */  + 1 /* RSSD */,
                                   (sizeof(struct dasd_psf_prssd_data) +
                                    sizeof(struct dasd_rssd_perf_stats_t)),
-                                  device);
+                                  device, NULL);
        if (IS_ERR(cqr)) {
                DBF_DEV_EVENT(DBF_WARNING, device, "%s",
                            "Could not allocate initialization request");
@@ -4477,7 +4487,7 @@ static int dasd_symm_io(struct dasd_device *device, void __user *argp)
        psf1 = psf_data[1];
 
        /* setup CCWs for PSF + RSSD */
-       cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 2 , 0, device);
+       cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 2, 0, device, NULL);
        if (IS_ERR(cqr)) {
                DBF_DEV_EVENT(DBF_WARNING, device, "%s",
                        "Could not allocate initialization request");
@@ -5037,7 +5047,7 @@ static int dasd_eckd_read_message_buffer(struct dasd_device *device,
        cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
                                   (sizeof(struct dasd_psf_prssd_data) +
                                    sizeof(struct dasd_rssd_messages)),
-                                  device);
+                                  device, NULL);
        if (IS_ERR(cqr)) {
                DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
                                "Could not allocate read message buffer request");
@@ -5126,7 +5136,7 @@ static int dasd_eckd_query_host_access(struct dasd_device *device,
 
        cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
                                   sizeof(struct dasd_psf_prssd_data) + 1,
-                                  device);
+                                  device, NULL);
        if (IS_ERR(cqr)) {
                DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
                                "Could not allocate read message buffer request");
@@ -5284,8 +5294,8 @@ dasd_eckd_psf_cuir_response(struct dasd_device *device, int response,
        int rc;
 
        cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ ,
-                                 sizeof(struct dasd_psf_cuir_response),
-                                 device);
+                                  sizeof(struct dasd_psf_cuir_response),
+                                  device, NULL);
 
        if (IS_ERR(cqr)) {
                DBF_DEV_EVENT(DBF_WARNING, device, "%s",
index 0af8c5295b650b1132e5946b123b558a08e91ccc..93bb09da7fdc4e6a2372a0d871826252498019fe 100644 (file)
@@ -313,7 +313,7 @@ static void dasd_eer_write_standard_trigger(struct dasd_device *device,
        ktime_get_real_ts64(&ts);
        header.tv_sec = ts.tv_sec;
        header.tv_usec = ts.tv_nsec / NSEC_PER_USEC;
-       strncpy(header.busid, dev_name(&device->cdev->dev),
+       strlcpy(header.busid, dev_name(&device->cdev->dev),
                DASD_EER_BUSID_SIZE);
 
        spin_lock_irqsave(&bufferlock, flags);
@@ -356,7 +356,7 @@ static void dasd_eer_write_snss_trigger(struct dasd_device *device,
        ktime_get_real_ts64(&ts);
        header.tv_sec = ts.tv_sec;
        header.tv_usec = ts.tv_nsec / NSEC_PER_USEC;
-       strncpy(header.busid, dev_name(&device->cdev->dev),
+       strlcpy(header.busid, dev_name(&device->cdev->dev),
                DASD_EER_BUSID_SIZE);
 
        spin_lock_irqsave(&bufferlock, flags);
@@ -447,7 +447,7 @@ static void dasd_eer_snss_cb(struct dasd_ccw_req *cqr, void *data)
                 * is a new ccw in device->eer_cqr. Free the "old"
                 * snss request now.
                 */
-               dasd_kfree_request(cqr, device);
+               dasd_sfree_request(cqr, device);
 }
 
 /*
@@ -472,8 +472,8 @@ int dasd_eer_enable(struct dasd_device *device)
        if (rc)
                goto out;
 
-       cqr = dasd_kmalloc_request(DASD_ECKD_MAGIC, 1 /* SNSS */,
-                                  SNSS_DATA_SIZE, device);
+       cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* SNSS */,
+                                  SNSS_DATA_SIZE, device, NULL);
        if (IS_ERR(cqr)) {
                rc = -ENOMEM;
                cqr = NULL;
@@ -505,7 +505,7 @@ out:
        spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
 
        if (cqr)
-               dasd_kfree_request(cqr, device);
+               dasd_sfree_request(cqr, device);
 
        return rc;
 }
@@ -528,7 +528,7 @@ void dasd_eer_disable(struct dasd_device *device)
        in_use = test_and_clear_bit(DASD_FLAG_EER_IN_USE, &device->flags);
        spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
        if (cqr && !in_use)
-               dasd_kfree_request(cqr, device);
+               dasd_sfree_request(cqr, device);
 }
 
 /*
index a6b132f7e869eb4eb804b3fa8407cd064c92b699..56007a3e7f110358e27ad74563f24e428cbae473 100644 (file)
@@ -356,7 +356,8 @@ static struct dasd_ccw_req *dasd_fba_build_cp_discard(
        datasize = sizeof(struct DE_fba_data) +
                nr_ccws * (sizeof(struct LO_fba_data) + sizeof(struct ccw1));
 
-       cqr = dasd_smalloc_request(DASD_FBA_MAGIC, cplength, datasize, memdev);
+       cqr = dasd_smalloc_request(DASD_FBA_MAGIC, cplength, datasize, memdev,
+                                  blk_mq_rq_to_pdu(req));
        if (IS_ERR(cqr))
                return cqr;
 
@@ -490,7 +491,8 @@ static struct dasd_ccw_req *dasd_fba_build_cp_regular(
                datasize += (count - 1)*sizeof(struct LO_fba_data);
        }
        /* Allocate the ccw request. */
-       cqr = dasd_smalloc_request(DASD_FBA_MAGIC, cplength, datasize, memdev);
+       cqr = dasd_smalloc_request(DASD_FBA_MAGIC, cplength, datasize, memdev,
+                                  blk_mq_rq_to_pdu(req));
        if (IS_ERR(cqr))
                return cqr;
        ccw = cqr->cpaddr;
index 96709b1a7bf8d8af0f4e0db7748cd5ac8e5a8650..de6b96036aa40fb104e84c6c9e58ba89beebb232 100644 (file)
@@ -158,40 +158,33 @@ do { \
 
 struct dasd_ccw_req {
        unsigned int magic;             /* Eye catcher */
+       int intrc;                      /* internal error, e.g. from start_IO */
        struct list_head devlist;       /* for dasd_device request queue */
        struct list_head blocklist;     /* for dasd_block request queue */
-
-       /* Where to execute what... */
        struct dasd_block *block;       /* the originating block device */
        struct dasd_device *memdev;     /* the device used to allocate this */
        struct dasd_device *startdev;   /* device the request is started on */
        struct dasd_device *basedev;    /* base device if no block->base */
        void *cpaddr;                   /* address of ccw or tcw */
+       short retries;                  /* A retry counter */
        unsigned char cpmode;           /* 0 = cmd mode, 1 = itcw */
        char status;                    /* status of this request */
-       short retries;                  /* A retry counter */
+       char lpm;                       /* logical path mask */
        unsigned long flags;            /* flags of this request */
        struct dasd_queue *dq;
-
-       /* ... and how */
        unsigned long starttime;        /* jiffies time of request start */
        unsigned long expires;          /* expiration period in jiffies */
-       char lpm;                       /* logical path mask */
        void *data;                     /* pointer to data area */
-
-       /* these are important for recovering erroneous requests          */
-       int intrc;                      /* internal error, e.g. from start_IO */
        struct irb irb;                 /* device status in case of an error */
        struct dasd_ccw_req *refers;    /* ERP-chain queueing. */
        void *function;                 /* originating ERP action */
+       void *mem_chunk;
 
-       /* these are for statistics only */
        unsigned long buildclk;         /* TOD-clock of request generation */
        unsigned long startclk;         /* TOD-clock of request start */
        unsigned long stopclk;          /* TOD-clock of request interrupt */
        unsigned long endclk;           /* TOD-clock of request termination */
 
-        /* Callback that is called after reaching final status. */
        void (*callback)(struct dasd_ccw_req *, void *data);
        void *callback_data;
 };
@@ -235,14 +228,6 @@ struct dasd_ccw_req {
 #define DASD_CQR_SUPPRESS_IL   6       /* Suppress 'Incorrect Length' error */
 #define DASD_CQR_SUPPRESS_CR   7       /* Suppress 'Command Reject' error */
 
-/*
- * There is no reliable way to determine the number of available CPUs on
- * LPAR but there is no big performance difference between 1 and the
- * maximum CPU number.
- * 64 is a good trade off performance wise.
- */
-#define DASD_NR_HW_QUEUES 64
-#define DASD_MAX_LCU_DEV 256
 #define DASD_REQ_PER_DEV 4
 
 /* Signature for error recovery functions. */
@@ -714,19 +699,10 @@ extern const struct block_device_operations dasd_device_operations;
 extern struct kmem_cache *dasd_page_cache;
 
 struct dasd_ccw_req *
-dasd_kmalloc_request(int , int, int, struct dasd_device *);
-struct dasd_ccw_req *
-dasd_smalloc_request(int , int, int, struct dasd_device *);
-void dasd_kfree_request(struct dasd_ccw_req *, struct dasd_device *);
+dasd_smalloc_request(int, int, int, struct dasd_device *, struct dasd_ccw_req *);
 void dasd_sfree_request(struct dasd_ccw_req *, struct dasd_device *);
 void dasd_wakeup_cb(struct dasd_ccw_req *, void *);
 
-static inline int
-dasd_kmalloc_set_cda(struct ccw1 *ccw, void *cda, struct dasd_device *device)
-{
-       return set_normalized_cda(ccw, cda);
-}
-
 struct dasd_device *dasd_alloc_device(void);
 void dasd_free_device(struct dasd_device *);
 
index b1fcb76dd272e00196f016942fc6aed8b0e10171..98f66b7b6794512f6a761bfd2cd13f9a1faacfbf 100644 (file)
@@ -455,6 +455,7 @@ int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev)
        bdev->tag_set.nr_hw_queues = nr_requests;
        bdev->tag_set.queue_depth = nr_requests_per_io * nr_requests;
        bdev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
+       bdev->tag_set.numa_node = NUMA_NO_NODE;
 
        ret = blk_mq_alloc_tag_set(&bdev->tag_set);
        if (ret)
index 0a4c13e1e76eae984a66758801b5ac75d471879b..c6ab34f94b1b54c96d704abf3f19e6aa16eaca78 100644 (file)
@@ -12,11 +12,6 @@ GCOV_PROFILE_sclp_early_core.o               := n
 KCOV_INSTRUMENT_sclp_early_core.o      := n
 UBSAN_SANITIZE_sclp_early_core.o       := n
 
-ifneq ($(CC_FLAGS_MARCH),-march=z900)
-CFLAGS_REMOVE_sclp_early_core.o        += $(CC_FLAGS_MARCH)
-CFLAGS_sclp_early_core.o               += -march=z900
-endif
-
 CFLAGS_sclp_early_core.o               += -D__NO_FORTIFY
 
 CFLAGS_REMOVE_sclp_early_core.o        += $(CC_FLAGS_EXPOLINE)
index 79eb60958015a5348818dfc94c23b2aa3585f0c1..bbb3001b0961f339535cb4f52424d2f008142ca2 100644 (file)
@@ -334,37 +334,41 @@ do_kdsk_ioctl(struct kbd_data *kbd, struct kbentry __user *user_kbe,
              int cmd, int perm)
 {
        struct kbentry tmp;
+       unsigned long kb_index, kb_table;
        ushort *key_map, val, ov;
 
        if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
                return -EFAULT;
+       kb_index = (unsigned long) tmp.kb_index;
 #if NR_KEYS < 256
-       if (tmp.kb_index >= NR_KEYS)
+       if (kb_index >= NR_KEYS)
                return -EINVAL;
 #endif
+       kb_table = (unsigned long) tmp.kb_table;
 #if MAX_NR_KEYMAPS < 256
-       if (tmp.kb_table >= MAX_NR_KEYMAPS)
+       if (kb_table >= MAX_NR_KEYMAPS)
                return -EINVAL; 
+       kb_table = array_index_nospec(kb_table , MAX_NR_KEYMAPS);
 #endif
 
        switch (cmd) {
        case KDGKBENT:
-               key_map = kbd->key_maps[tmp.kb_table];
+               key_map = kbd->key_maps[kb_table];
                if (key_map) {
-                   val = U(key_map[tmp.kb_index]);
+                   val = U(key_map[kb_index]);
                    if (KTYP(val) >= KBD_NR_TYPES)
                        val = K_HOLE;
                } else
-                   val = (tmp.kb_index ? K_HOLE : K_NOSUCHMAP);
+                   val = (kb_index ? K_HOLE : K_NOSUCHMAP);
                return put_user(val, &user_kbe->kb_value);
        case KDSKBENT:
                if (!perm)
                        return -EPERM;
-               if (!tmp.kb_index && tmp.kb_value == K_NOSUCHMAP) {
+               if (!kb_index && tmp.kb_value == K_NOSUCHMAP) {
                        /* disallocate map */
-                       key_map = kbd->key_maps[tmp.kb_table];
+                       key_map = kbd->key_maps[kb_table];
                        if (key_map) {
-                           kbd->key_maps[tmp.kb_table] = NULL;
+                           kbd->key_maps[kb_table] = NULL;
                            kfree(key_map);
                        }
                        break;
@@ -375,18 +379,18 @@ do_kdsk_ioctl(struct kbd_data *kbd, struct kbentry __user *user_kbe,
                if (KVAL(tmp.kb_value) > kbd_max_vals[KTYP(tmp.kb_value)])
                        return -EINVAL;
 
-               if (!(key_map = kbd->key_maps[tmp.kb_table])) {
+               if (!(key_map = kbd->key_maps[kb_table])) {
                        int j;
 
                        key_map = kmalloc(sizeof(plain_map),
                                                     GFP_KERNEL);
                        if (!key_map)
                                return -ENOMEM;
-                       kbd->key_maps[tmp.kb_table] = key_map;
+                       kbd->key_maps[kb_table] = key_map;
                        for (j = 0; j < NR_KEYS; j++)
                                key_map[j] = U(K_HOLE);
                }
-               ov = U(key_map[tmp.kb_index]);
+               ov = U(key_map[kb_index]);
                if (tmp.kb_value == ov)
                        break;  /* nothing to do */
                /*
@@ -395,7 +399,7 @@ do_kdsk_ioctl(struct kbd_data *kbd, struct kbentry __user *user_kbe,
                if (((ov == K_SAK) || (tmp.kb_value == K_SAK)) &&
                    !capable(CAP_SYS_ADMIN))
                        return -EPERM;
-               key_map[tmp.kb_index] = U(tmp.kb_value);
+               key_map[kb_index] = U(tmp.kb_value);
                break;
        }
        return 0;
index 76c158c41510374ac4b814aca55587193c8b8fe7..4f1a69c9d81d621e159ee8a103c4ded636d596cb 100644 (file)
@@ -61,7 +61,7 @@ static int monwrite_diag(struct monwrite_hdr *myhdr, char *buffer, int fcn)
        struct appldata_product_id id;
        int rc;
 
-       strncpy(id.prod_nr, "LNXAPPL", 7);
+       memcpy(id.prod_nr, "LNXAPPL", 7);
        id.prod_fn = myhdr->applid;
        id.record_nr = myhdr->record_num;
        id.version_nr = myhdr->version;
index ee6f3b563728319ba5c3d4964f05843453e3ce99..e69b12a406362f9d0415f1d880e1f506eb3b7383 100644 (file)
@@ -64,42 +64,18 @@ static struct notifier_block call_home_panic_nb = {
        .priority = INT_MAX,
 };
 
-static int proc_handler_callhome(struct ctl_table *ctl, int write,
-                                void __user *buffer, size_t *count,
-                                loff_t *ppos)
-{
-       unsigned long val;
-       int len, rc;
-       char buf[3];
-
-       if (!*count || (*ppos && !write)) {
-               *count = 0;
-               return 0;
-       }
-       if (!write) {
-               len = snprintf(buf, sizeof(buf), "%d\n", callhome_enabled);
-               rc = copy_to_user(buffer, buf, sizeof(buf));
-               if (rc != 0)
-                       return -EFAULT;
-       } else {
-               len = *count;
-               rc = kstrtoul_from_user(buffer, len, 0, &val);
-               if (rc)
-                       return rc;
-               if (val != 0 && val != 1)
-                       return -EINVAL;
-               callhome_enabled = val;
-       }
-       *count = len;
-       *ppos += len;
-       return 0;
-}
+static int zero;
+static int one = 1;
 
 static struct ctl_table callhome_table[] = {
        {
                .procname       = "callhome",
+               .data           = &callhome_enabled,
+               .maxlen         = sizeof(int),
                .mode           = 0644,
-               .proc_handler   = proc_handler_callhome,
+               .proc_handler   = proc_dointvec_minmax,
+               .extra1         = &zero,
+               .extra2         = &one,
        },
        {}
 };
index 37e65a05517f50606f73db539e0871e76452d142..cdcde18e72203eeeb07e975adb5aaa8ca0258199 100644 (file)
@@ -113,16 +113,16 @@ static int crypt_enabled(struct tape_device *device)
 static void ext_to_int_kekl(struct tape390_kekl *in,
                            struct tape3592_kekl *out)
 {
-       int i;
+       int len;
 
        memset(out, 0, sizeof(*out));
        if (in->type == TAPE390_KEKL_TYPE_HASH)
                out->flags |= 0x40;
        if (in->type_on_tape == TAPE390_KEKL_TYPE_HASH)
                out->flags |= 0x80;
-       strncpy(out->label, in->label, 64);
-       for (i = strlen(in->label); i < sizeof(out->label); i++)
-               out->label[i] = ' ';
+       len = min(sizeof(out->label), strlen(in->label));
+       memcpy(out->label, in->label, len);
+       memset(out->label + len, ' ', sizeof(out->label) - len);
        ASCEBC(out->label, sizeof(out->label));
 }
 
index a07102472ce97eba06a526dcb56d5690b9be2fd4..b58df0dd0039c0d3a7f97fade839d0bd600bfd2c 100644 (file)
@@ -54,10 +54,10 @@ struct tape_class_device *register_tape_dev(
        if (!tcd)
                return ERR_PTR(-ENOMEM);
 
-       strncpy(tcd->device_name, device_name, TAPECLASS_NAME_LEN);
+       strlcpy(tcd->device_name, device_name, TAPECLASS_NAME_LEN);
        for (s = strchr(tcd->device_name, '/'); s; s = strchr(s, '/'))
                *s = '!';
-       strncpy(tcd->mode_name, mode_name, TAPECLASS_NAME_LEN);
+       strlcpy(tcd->mode_name, mode_name, TAPECLASS_NAME_LEN);
        for (s = strchr(tcd->mode_name, '/'); s; s = strchr(s, '/'))
                *s = '!';
 
@@ -77,7 +77,7 @@ struct tape_class_device *register_tape_dev(
        tcd->class_device = device_create(tape_class, device,
                                          tcd->char_device->dev, NULL,
                                          "%s", tcd->device_name);
-       rc = PTR_RET(tcd->class_device);
+       rc = PTR_ERR_OR_ZERO(tcd->class_device);
        if (rc)
                goto fail_with_cdev;
        rc = sysfs_create_link(
index a070ef0efe65d0079cc10245b1ed8b79b8e8fba9..f230516abb96d31b4eabb2689a7230905857c48f 100644 (file)
@@ -5,6 +5,7 @@
 
 # The following is required for define_trace.h to find ./trace.h
 CFLAGS_trace.o := -I$(src)
+CFLAGS_vfio_ccw_fsm.o := -I$(src)
 
 obj-y += airq.o blacklist.o chsc.o cio.o css.o chp.o idset.o isc.o \
        fcx.o itcw.o crw.o ccwreq.o trace.o ioasm.o
index afbdee74147dd01553ce136bd2a1f3e5f349ad37..51038ec309c12ef3f3eae351d332ed7a13694eda 100644 (file)
@@ -471,14 +471,17 @@ int chp_new(struct chp_id chpid)
 {
        struct channel_subsystem *css = css_by_id(chpid.cssid);
        struct channel_path *chp;
-       int ret;
+       int ret = 0;
 
+       mutex_lock(&css->mutex);
        if (chp_is_registered(chpid))
-               return 0;
-       chp = kzalloc(sizeof(struct channel_path), GFP_KERNEL);
-       if (!chp)
-               return -ENOMEM;
+               goto out;
 
+       chp = kzalloc(sizeof(struct channel_path), GFP_KERNEL);
+       if (!chp) {
+               ret = -ENOMEM;
+               goto out;
+       }
        /* fill in status, etc. */
        chp->chpid = chpid;
        chp->state = 1;
@@ -505,21 +508,20 @@ int chp_new(struct chp_id chpid)
                put_device(&chp->dev);
                goto out;
        }
-       mutex_lock(&css->mutex);
+
        if (css->cm_enabled) {
                ret = chp_add_cmg_attr(chp);
                if (ret) {
                        device_unregister(&chp->dev);
-                       mutex_unlock(&css->mutex);
                        goto out;
                }
        }
        css->chps[chpid.id] = chp;
-       mutex_unlock(&css->mutex);
        goto out;
 out_free:
        kfree(chp);
 out:
+       mutex_unlock(&css->mutex);
        return ret;
 }
 
@@ -585,8 +587,7 @@ static void chp_process_crw(struct crw *crw0, struct crw *crw1,
        switch (crw0->erc) {
        case CRW_ERC_IPARM: /* Path has come. */
        case CRW_ERC_INIT:
-               if (!chp_is_registered(chpid))
-                       chp_new(chpid);
+               chp_new(chpid);
                chsc_chp_online(chpid);
                break;
        case CRW_ERC_PERRI: /* Path has gone. */
index 9029804dcd225b85d5609c8af6c6ebc95522f12e..a0baee25134c0cd0b6a6e35df694ae763a4b0a21 100644 (file)
@@ -91,7 +91,7 @@ struct chsc_ssd_area {
        u16 sch;          /* subchannel */
        u8 chpid[8];      /* chpids 0-7 */
        u16 fla[8];       /* full link addresses 0-7 */
-} __attribute__ ((packed));
+} __packed __aligned(PAGE_SIZE);
 
 int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd)
 {
@@ -319,7 +319,7 @@ struct chsc_sei {
                struct chsc_sei_nt2_area nt2_area;
                u8 nt_area[PAGE_SIZE - 24];
        } u;
-} __packed;
+} __packed __aligned(PAGE_SIZE);
 
 /*
  * Node Descriptor as defined in SA22-7204, "Common I/O-Device Commands"
@@ -841,7 +841,7 @@ int __chsc_do_secm(struct channel_subsystem *css, int enable)
                u32 : 4;
                u32 fmt : 4;
                u32 : 16;
-       } __attribute__ ((packed)) *secm_area;
+       } *secm_area;
        unsigned long flags;
        int ret, ccode;
 
@@ -1014,7 +1014,7 @@ int chsc_get_channel_measurement_chars(struct channel_path *chp)
                u32 cmg : 8;
                u32 zeroes3;
                u32 data[NR_MEASUREMENT_CHARS];
-       } __attribute__ ((packed)) *scmc_area;
+       } *scmc_area;
 
        chp->shared = -1;
        chp->cmg = -1;
@@ -1142,7 +1142,7 @@ int __init chsc_get_cssid(int idx)
                        u8 cssid;
                        u32 : 24;
                } list[0];
-       } __packed *sdcal_area;
+       } *sdcal_area;
        int ret;
 
        spin_lock_irq(&chsc_page_lock);
@@ -1192,7 +1192,7 @@ chsc_determine_css_characteristics(void)
                u32 reserved4;
                u32 general_char[510];
                u32 chsc_char[508];
-       } __attribute__ ((packed)) *scsc_area;
+       } *scsc_area;
 
        spin_lock_irqsave(&chsc_page_lock, flags);
        memset(chsc_page, 0, PAGE_SIZE);
@@ -1236,7 +1236,7 @@ int chsc_sstpc(void *page, unsigned int op, u16 ctrl, u64 *clock_delta)
                unsigned int rsvd3[3];
                u64 clock_delta;
                unsigned int rsvd4[2];
-       } __attribute__ ((packed)) *rr;
+       } *rr;
        int rc;
 
        memset(page, 0, PAGE_SIZE);
@@ -1261,7 +1261,7 @@ int chsc_sstpi(void *page, void *result, size_t size)
                unsigned int rsvd0[3];
                struct chsc_header response;
                char data[];
-       } __attribute__ ((packed)) *rr;
+       } *rr;
        int rc;
 
        memset(page, 0, PAGE_SIZE);
@@ -1284,7 +1284,7 @@ int chsc_siosl(struct subchannel_id schid)
                u32 word3;
                struct chsc_header response;
                u32 word[11];
-       } __attribute__ ((packed)) *siosl_area;
+       } *siosl_area;
        unsigned long flags;
        int ccode;
        int rc;
index 5c9f0dd33f4ee38634d6cb3b0383d364b2c1f310..78aba8d94eec33eae2c0594dde4a79bf12c8ba82 100644 (file)
 #define NR_MEASUREMENT_CHARS 5
 struct cmg_chars {
        u32 values[NR_MEASUREMENT_CHARS];
-} __attribute__ ((packed));
+};
 
 #define NR_MEASUREMENT_ENTRIES 8
 struct cmg_entry {
        u32 values[NR_MEASUREMENT_ENTRIES];
-} __attribute__ ((packed));
+};
 
 struct channel_path_desc_fmt1 {
        u8 flags;
@@ -38,7 +38,7 @@ struct channel_path_desc_fmt1 {
        u8 s:1;
        u8 f:1;
        u32 zeros[2];
-} __attribute__ ((packed));
+};
 
 struct channel_path_desc_fmt3 {
        struct channel_path_desc_fmt1 fmt1_desc;
@@ -59,7 +59,7 @@ struct css_chsc_char {
        u32:7;
        u32 pnso:1; /* bit 116 */
        u32:11;
-}__attribute__((packed));
+} __packed;
 
 extern struct css_chsc_char css_chsc_characteristics;
 
@@ -82,7 +82,7 @@ struct chsc_ssqd_area {
        struct chsc_header response;
        u32:32;
        struct qdio_ssqd_desc qdio_ssqd;
-} __packed;
+} __packed __aligned(PAGE_SIZE);
 
 struct chsc_scssc_area {
        struct chsc_header request;
@@ -102,7 +102,7 @@ struct chsc_scssc_area {
        u32 reserved[1004];
        struct chsc_header response;
        u32:32;
-} __packed;
+} __packed __aligned(PAGE_SIZE);
 
 struct chsc_scpd {
        struct chsc_header request;
@@ -120,7 +120,7 @@ struct chsc_scpd {
        struct chsc_header response;
        u32:32;
        u8 data[0];
-} __packed;
+} __packed __aligned(PAGE_SIZE);
 
 struct chsc_sda_area {
        struct chsc_header request;
@@ -199,7 +199,7 @@ struct chsc_scm_info {
        u32 reserved2[10];
        u64 restok;
        struct sale scmal[248];
-} __packed;
+} __packed __aligned(PAGE_SIZE);
 
 int chsc_scm_info(struct chsc_scm_info *scm_area, u64 token);
 
@@ -243,7 +243,7 @@ struct chsc_pnso_area {
                struct qdio_brinfo_entry_l3_ipv4 l3_ipv4[0];
                struct qdio_brinfo_entry_l2      l2[0];
        } entries;
-} __packed;
+} __packed __aligned(PAGE_SIZE);
 
 int chsc_pnso_brinfo(struct subchannel_id schid,
                struct chsc_pnso_area *brinfo_area,
index 5130d7c67239b337c71eeca67762f7061ee06abb..de744ca158fdf7044b6a07035546fb98fa5d80b9 100644 (file)
@@ -526,76 +526,6 @@ int cio_disable_subchannel(struct subchannel *sch)
 }
 EXPORT_SYMBOL_GPL(cio_disable_subchannel);
 
-static int cio_check_devno_blacklisted(struct subchannel *sch)
-{
-       if (is_blacklisted(sch->schid.ssid, sch->schib.pmcw.dev)) {
-               /*
-                * This device must not be known to Linux. So we simply
-                * say that there is no device and return ENODEV.
-                */
-               CIO_MSG_EVENT(6, "Blacklisted device detected "
-                             "at devno %04X, subchannel set %x\n",
-                             sch->schib.pmcw.dev, sch->schid.ssid);
-               return -ENODEV;
-       }
-       return 0;
-}
-
-/**
- * cio_validate_subchannel - basic validation of subchannel
- * @sch: subchannel structure to be filled out
- * @schid: subchannel id
- *
- * Find out subchannel type and initialize struct subchannel.
- * Return codes:
- *   0 on success
- *   -ENXIO for non-defined subchannels
- *   -ENODEV for invalid subchannels or blacklisted devices
- *   -EIO for subchannels in an invalid subchannel set
- */
-int cio_validate_subchannel(struct subchannel *sch, struct subchannel_id schid)
-{
-       char dbf_txt[15];
-       int ccode;
-       int err;
-
-       sprintf(dbf_txt, "valsch%x", schid.sch_no);
-       CIO_TRACE_EVENT(4, dbf_txt);
-
-       /*
-        * The first subchannel that is not-operational (ccode==3)
-        * indicates that there aren't any more devices available.
-        * If stsch gets an exception, it means the current subchannel set
-        * is not valid.
-        */
-       ccode = stsch(schid, &sch->schib);
-       if (ccode) {
-               err = (ccode == 3) ? -ENXIO : ccode;
-               goto out;
-       }
-       sch->st = sch->schib.pmcw.st;
-       sch->schid = schid;
-
-       switch (sch->st) {
-       case SUBCHANNEL_TYPE_IO:
-       case SUBCHANNEL_TYPE_MSG:
-               if (!css_sch_is_valid(&sch->schib))
-                       err = -ENODEV;
-               else
-                       err = cio_check_devno_blacklisted(sch);
-               break;
-       default:
-               err = 0;
-       }
-       if (err)
-               goto out;
-
-       CIO_MSG_EVENT(4, "Subchannel 0.%x.%04x reports subchannel type %04X\n",
-                     sch->schid.ssid, sch->schid.sch_no, sch->st);
-out:
-       return err;
-}
-
 /*
  * do_cio_interrupt() handles all normal I/O device IRQ's
  */
@@ -719,6 +649,7 @@ struct subchannel *cio_probe_console(void)
 {
        struct subchannel_id schid;
        struct subchannel *sch;
+       struct schib schib;
        int sch_no, ret;
 
        sch_no = cio_get_console_sch_no();
@@ -728,7 +659,11 @@ struct subchannel *cio_probe_console(void)
        }
        init_subchannel_id(&schid);
        schid.sch_no = sch_no;
-       sch = css_alloc_subchannel(schid);
+       ret = stsch(schid, &schib);
+       if (ret)
+               return ERR_PTR(-ENODEV);
+
+       sch = css_alloc_subchannel(schid, &schib);
        if (IS_ERR(sch))
                return sch;
 
index 94cd813bdcfef8d2a281eceef1d9fae79be97d08..9811fd8a0c7310b119a1e9c84da2b583ef763485 100644 (file)
@@ -119,7 +119,6 @@ DECLARE_PER_CPU(struct irb, cio_irb);
 
 #define to_subchannel(n) container_of(n, struct subchannel, dev)
 
-extern int cio_validate_subchannel (struct subchannel *, struct subchannel_id);
 extern int cio_enable_subchannel(struct subchannel *, u32);
 extern int cio_disable_subchannel (struct subchannel *);
 extern int cio_cancel (struct subchannel *);
index 9263a0fb385845588ceb7a80a5f6a2edc9449556..aea50292264629e63f131dfd2dfd87d3ff108799 100644 (file)
@@ -25,6 +25,7 @@
 
 #include "css.h"
 #include "cio.h"
+#include "blacklist.h"
 #include "cio_debug.h"
 #include "ioasm.h"
 #include "chsc.h"
@@ -168,18 +169,53 @@ static void css_subchannel_release(struct device *dev)
        kfree(sch);
 }
 
-struct subchannel *css_alloc_subchannel(struct subchannel_id schid)
+static int css_validate_subchannel(struct subchannel_id schid,
+                                  struct schib *schib)
+{
+       int err;
+
+       switch (schib->pmcw.st) {
+       case SUBCHANNEL_TYPE_IO:
+       case SUBCHANNEL_TYPE_MSG:
+               if (!css_sch_is_valid(schib))
+                       err = -ENODEV;
+               else if (is_blacklisted(schid.ssid, schib->pmcw.dev)) {
+                       CIO_MSG_EVENT(6, "Blacklisted device detected "
+                                     "at devno %04X, subchannel set %x\n",
+                                     schib->pmcw.dev, schid.ssid);
+                       err = -ENODEV;
+               } else
+                       err = 0;
+               break;
+       default:
+               err = 0;
+       }
+       if (err)
+               goto out;
+
+       CIO_MSG_EVENT(4, "Subchannel 0.%x.%04x reports subchannel type %04X\n",
+                     schid.ssid, schid.sch_no, schib->pmcw.st);
+out:
+       return err;
+}
+
+struct subchannel *css_alloc_subchannel(struct subchannel_id schid,
+                                       struct schib *schib)
 {
        struct subchannel *sch;
        int ret;
 
+       ret = css_validate_subchannel(schid, schib);
+       if (ret < 0)
+               return ERR_PTR(ret);
+
        sch = kzalloc(sizeof(*sch), GFP_KERNEL | GFP_DMA);
        if (!sch)
                return ERR_PTR(-ENOMEM);
 
-       ret = cio_validate_subchannel(sch, schid);
-       if (ret < 0)
-               goto err;
+       sch->schid = schid;
+       sch->schib = *schib;
+       sch->st = schib->pmcw.st;
 
        ret = css_sch_create_locks(sch);
        if (ret)
@@ -244,8 +280,7 @@ static void ssd_register_chpids(struct chsc_ssd_info *ssd)
        for (i = 0; i < 8; i++) {
                mask = 0x80 >> i;
                if (ssd->path_mask & mask)
-                       if (!chp_is_registered(ssd->chpid[i]))
-                               chp_new(ssd->chpid[i]);
+                       chp_new(ssd->chpid[i]);
        }
 }
 
@@ -382,12 +417,12 @@ int css_register_subchannel(struct subchannel *sch)
        return ret;
 }
 
-static int css_probe_device(struct subchannel_id schid)
+static int css_probe_device(struct subchannel_id schid, struct schib *schib)
 {
        struct subchannel *sch;
        int ret;
 
-       sch = css_alloc_subchannel(schid);
+       sch = css_alloc_subchannel(schid, schib);
        if (IS_ERR(sch))
                return PTR_ERR(sch);
 
@@ -436,23 +471,23 @@ EXPORT_SYMBOL_GPL(css_sch_is_valid);
 static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow)
 {
        struct schib schib;
+       int ccode;
 
        if (!slow) {
                /* Will be done on the slow path. */
                return -EAGAIN;
        }
-       if (stsch(schid, &schib)) {
-               /* Subchannel is not provided. */
-               return -ENXIO;
-       }
-       if (!css_sch_is_valid(&schib)) {
-               /* Unusable - ignore. */
-               return 0;
-       }
-       CIO_MSG_EVENT(4, "event: sch 0.%x.%04x, new\n", schid.ssid,
-                     schid.sch_no);
+       /*
+        * The first subchannel that is not-operational (ccode==3)
+        * indicates that there aren't any more devices available.
+        * If stsch gets an exception, it means the current subchannel set
+        * is not valid.
+        */
+       ccode = stsch(schid, &schib);
+       if (ccode)
+               return (ccode == 3) ? -ENXIO : ccode;
 
-       return css_probe_device(schid);
+       return css_probe_device(schid, &schib);
 }
 
 static int css_evaluate_known_subchannel(struct subchannel *sch, int slow)
@@ -1081,6 +1116,11 @@ static int __init channel_subsystem_init(void)
        if (ret)
                goto out_wq;
 
+       /* Register subchannels which are already in use. */
+       cio_register_early_subchannels();
+       /* Start initial subchannel evaluation. */
+       css_schedule_eval_all();
+
        return ret;
 out_wq:
        destroy_workqueue(cio_work_q);
@@ -1120,10 +1160,6 @@ int css_complete_work(void)
  */
 static int __init channel_subsystem_init_sync(void)
 {
-       /* Register subchannels which are already in use. */
-       cio_register_early_subchannels();
-       /* Start initial subchannel evaluation. */
-       css_schedule_eval_all();
        css_complete_work();
        return 0;
 }
index 30357cbf350afbe95fd76189af3bc2fc59810d21..8d832900a63dd44081119f2905fd0d69321f4580 100644 (file)
@@ -103,7 +103,8 @@ extern void css_driver_unregister(struct css_driver *);
 
 extern void css_sch_device_unregister(struct subchannel *);
 extern int css_register_subchannel(struct subchannel *);
-extern struct subchannel *css_alloc_subchannel(struct subchannel_id);
+extern struct subchannel *css_alloc_subchannel(struct subchannel_id,
+                                              struct schib *schib);
 extern struct subchannel *get_subchannel_by_schid(struct subchannel_id);
 extern int css_init_done;
 extern int max_ssid;
index f4ca72dd862ff37db969271e56c0ecbbb095a199..9c7d9da42ba0829692d0d8dadbbd1f42935962f3 100644 (file)
@@ -631,21 +631,20 @@ static inline unsigned long qdio_aob_for_buffer(struct qdio_output_q *q,
        unsigned long phys_aob = 0;
 
        if (!q->use_cq)
-               goto out;
+               return 0;
 
        if (!q->aobs[bufnr]) {
                struct qaob *aob = qdio_allocate_aob();
                q->aobs[bufnr] = aob;
        }
        if (q->aobs[bufnr]) {
-               q->sbal_state[bufnr].flags = QDIO_OUTBUF_STATE_FLAG_NONE;
                q->sbal_state[bufnr].aob = q->aobs[bufnr];
                q->aobs[bufnr]->user1 = (u64) q->sbal_state[bufnr].user;
                phys_aob = virt_to_phys(q->aobs[bufnr]);
                WARN_ON_ONCE(phys_aob & 0xFF);
        }
 
-out:
+       q->sbal_state[bufnr].flags = 0;
        return phys_aob;
 }
 
index 1f8d1c1e566de7749a2d3177ea24c415bba9a921..0ebb29b6fd6df2fa65b1cce91fc3e0478eb37345 100644 (file)
@@ -30,6 +30,17 @@ DECLARE_EVENT_CLASS(s390_class_schib,
                __field(u16, schno)
                __field(u16, devno)
                __field_struct(struct schib, schib)
+               __field(u8, pmcw_ena)
+               __field(u8, pmcw_st)
+               __field(u8, pmcw_dnv)
+               __field(u16, pmcw_dev)
+               __field(u8, pmcw_lpm)
+               __field(u8, pmcw_pnom)
+               __field(u8, pmcw_lpum)
+               __field(u8, pmcw_pim)
+               __field(u8, pmcw_pam)
+               __field(u8, pmcw_pom)
+               __field(u64, pmcw_chpid)
                __field(int, cc)
        ),
        TP_fast_assign(
@@ -38,18 +49,29 @@ DECLARE_EVENT_CLASS(s390_class_schib,
                __entry->schno = schid.sch_no;
                __entry->devno = schib->pmcw.dev;
                __entry->schib = *schib;
+               __entry->pmcw_ena = schib->pmcw.ena;
+               __entry->pmcw_st = schib->pmcw.ena;
+               __entry->pmcw_dnv = schib->pmcw.dnv;
+               __entry->pmcw_dev = schib->pmcw.dev;
+               __entry->pmcw_lpm = schib->pmcw.lpm;
+               __entry->pmcw_pnom = schib->pmcw.pnom;
+               __entry->pmcw_lpum = schib->pmcw.lpum;
+               __entry->pmcw_pim = schib->pmcw.pim;
+               __entry->pmcw_pam = schib->pmcw.pam;
+               __entry->pmcw_pom = schib->pmcw.pom;
+               memcpy(&__entry->pmcw_chpid, &schib->pmcw.chpid, 8);
                __entry->cc = cc;
        ),
        TP_printk("schid=%x.%x.%04x cc=%d ena=%d st=%d dnv=%d dev=%04x "
                  "lpm=0x%02x pnom=0x%02x lpum=0x%02x pim=0x%02x pam=0x%02x "
                  "pom=0x%02x chpids=%016llx",
                  __entry->cssid, __entry->ssid, __entry->schno, __entry->cc,
-                 __entry->schib.pmcw.ena, __entry->schib.pmcw.st,
-                 __entry->schib.pmcw.dnv, __entry->schib.pmcw.dev,
-                 __entry->schib.pmcw.lpm, __entry->schib.pmcw.pnom,
-                 __entry->schib.pmcw.lpum, __entry->schib.pmcw.pim,
-                 __entry->schib.pmcw.pam, __entry->schib.pmcw.pom,
-                 *((u64 *) __entry->schib.pmcw.chpid)
+                 __entry->pmcw_ena, __entry->pmcw_st,
+                 __entry->pmcw_dnv, __entry->pmcw_dev,
+                 __entry->pmcw_lpm, __entry->pmcw_pnom,
+                 __entry->pmcw_lpum, __entry->pmcw_pim,
+                 __entry->pmcw_pam, __entry->pmcw_pom,
+                 __entry->pmcw_chpid
        )
 );
 
@@ -89,6 +111,13 @@ TRACE_EVENT(s390_cio_tsch,
                __field(u8, ssid)
                __field(u16, schno)
                __field_struct(struct irb, irb)
+               __field(u8, scsw_dcc)
+               __field(u8, scsw_pno)
+               __field(u8, scsw_fctl)
+               __field(u8, scsw_actl)
+               __field(u8, scsw_stctl)
+               __field(u8, scsw_dstat)
+               __field(u8, scsw_cstat)
                __field(int, cc)
        ),
        TP_fast_assign(
@@ -96,15 +125,22 @@ TRACE_EVENT(s390_cio_tsch,
                __entry->ssid = schid.ssid;
                __entry->schno = schid.sch_no;
                __entry->irb = *irb;
+               __entry->scsw_dcc = scsw_cc(&irb->scsw);
+               __entry->scsw_pno = scsw_pno(&irb->scsw);
+               __entry->scsw_fctl = scsw_fctl(&irb->scsw);
+               __entry->scsw_actl = scsw_actl(&irb->scsw);
+               __entry->scsw_stctl = scsw_stctl(&irb->scsw);
+               __entry->scsw_dstat = scsw_dstat(&irb->scsw);
+               __entry->scsw_cstat = scsw_cstat(&irb->scsw);
                __entry->cc = cc;
        ),
        TP_printk("schid=%x.%x.%04x cc=%d dcc=%d pno=%d fctl=0x%x actl=0x%x "
                  "stctl=0x%x dstat=0x%x cstat=0x%x",
                  __entry->cssid, __entry->ssid, __entry->schno, __entry->cc,
-                 scsw_cc(&__entry->irb.scsw), scsw_pno(&__entry->irb.scsw),
-                 scsw_fctl(&__entry->irb.scsw), scsw_actl(&__entry->irb.scsw),
-                 scsw_stctl(&__entry->irb.scsw),
-                 scsw_dstat(&__entry->irb.scsw), scsw_cstat(&__entry->irb.scsw)
+                 __entry->scsw_dcc, __entry->scsw_pno,
+                 __entry->scsw_fctl, __entry->scsw_actl,
+                 __entry->scsw_stctl,
+                 __entry->scsw_dstat, __entry->scsw_cstat
        )
 );
 
@@ -122,6 +158,9 @@ TRACE_EVENT(s390_cio_tpi,
                __field(u8, cssid)
                __field(u8, ssid)
                __field(u16, schno)
+               __field(u8, adapter_IO)
+               __field(u8, isc)
+               __field(u8, type)
        ),
        TP_fast_assign(
                __entry->cc = cc;
@@ -136,11 +175,14 @@ TRACE_EVENT(s390_cio_tpi,
                __entry->cssid = __entry->tpi_info.schid.cssid;
                __entry->ssid = __entry->tpi_info.schid.ssid;
                __entry->schno = __entry->tpi_info.schid.sch_no;
+               __entry->adapter_IO = __entry->tpi_info.adapter_IO;
+               __entry->isc = __entry->tpi_info.isc;
+               __entry->type = __entry->tpi_info.type;
        ),
        TP_printk("schid=%x.%x.%04x cc=%d a=%d isc=%d type=%d",
                  __entry->cssid, __entry->ssid, __entry->schno, __entry->cc,
-                 __entry->tpi_info.adapter_IO, __entry->tpi_info.isc,
-                 __entry->tpi_info.type
+                 __entry->adapter_IO, __entry->isc,
+                 __entry->type
        )
 );
 
@@ -299,16 +341,20 @@ TRACE_EVENT(s390_cio_interrupt,
                __field(u8, cssid)
                __field(u8, ssid)
                __field(u16, schno)
+               __field(u8, isc)
+               __field(u8, type)
        ),
        TP_fast_assign(
                __entry->tpi_info = *tpi_info;
-               __entry->cssid = __entry->tpi_info.schid.cssid;
-               __entry->ssid = __entry->tpi_info.schid.ssid;
-               __entry->schno = __entry->tpi_info.schid.sch_no;
+               __entry->cssid = tpi_info->schid.cssid;
+               __entry->ssid = tpi_info->schid.ssid;
+               __entry->schno = tpi_info->schid.sch_no;
+               __entry->isc = tpi_info->isc;
+               __entry->type = tpi_info->type;
        ),
        TP_printk("schid=%x.%x.%04x isc=%d type=%d",
                  __entry->cssid, __entry->ssid, __entry->schno,
-                 __entry->tpi_info.isc, __entry->tpi_info.type
+                 __entry->isc, __entry->type
        )
 );
 
@@ -321,11 +367,13 @@ TRACE_EVENT(s390_cio_adapter_int,
        TP_ARGS(tpi_info),
        TP_STRUCT__entry(
                __field_struct(struct tpi_info, tpi_info)
+               __field(u8, isc)
        ),
        TP_fast_assign(
                __entry->tpi_info = *tpi_info;
+               __entry->isc = tpi_info->isc;
        ),
-       TP_printk("isc=%d", __entry->tpi_info.isc)
+       TP_printk("isc=%d", __entry->isc)
 );
 
 /**
@@ -339,16 +387,30 @@ TRACE_EVENT(s390_cio_stcrw,
        TP_STRUCT__entry(
                __field_struct(struct crw, crw)
                __field(int, cc)
+               __field(u8, slct)
+               __field(u8, oflw)
+               __field(u8, chn)
+               __field(u8, rsc)
+               __field(u8, anc)
+               __field(u8, erc)
+               __field(u16, rsid)
        ),
        TP_fast_assign(
                __entry->crw = *crw;
                __entry->cc = cc;
+               __entry->slct = crw->slct;
+               __entry->oflw = crw->oflw;
+               __entry->chn = crw->chn;
+               __entry->rsc = crw->rsc;
+               __entry->anc = crw->anc;
+               __entry->erc = crw->erc;
+               __entry->rsid = crw->rsid;
        ),
        TP_printk("cc=%d slct=%d oflw=%d chn=%d rsc=%d anc=%d erc=0x%x "
                  "rsid=0x%x",
-                 __entry->cc, __entry->crw.slct, __entry->crw.oflw,
-                 __entry->crw.chn, __entry->crw.rsc,  __entry->crw.anc,
-                 __entry->crw.erc, __entry->crw.rsid
+                 __entry->cc, __entry->slct, __entry->oflw,
+                 __entry->chn, __entry->rsc,  __entry->anc,
+                 __entry->erc, __entry->rsid
        )
 );
 
index dce92b2a895d6ff3bbe38104ed08ea32c7979432..dbe7c7ac9ac8c8c4456f142b14c740d3bdc0c5e6 100644 (file)
 #define CCWCHAIN_LEN_MAX       256
 
 struct pfn_array {
+       /* Starting guest physical I/O address. */
        unsigned long           pa_iova;
+       /* Array that stores PFNs of the pages need to pin. */
        unsigned long           *pa_iova_pfn;
+       /* Array that receives PFNs of the pages pinned. */
        unsigned long           *pa_pfn;
+       /* Number of pages pinned from @pa_iova. */
        int                     pa_nr;
 };
 
@@ -46,70 +50,33 @@ struct ccwchain {
 };
 
 /*
- * pfn_array_pin() - pin user pages in memory
+ * pfn_array_alloc_pin() - alloc memory for PFNs, then pin user pages in memory
  * @pa: pfn_array on which to perform the operation
  * @mdev: the mediated device to perform pin/unpin operations
+ * @iova: target guest physical address
+ * @len: number of bytes that should be pinned from @iova
  *
- * Attempt to pin user pages in memory.
+ * Attempt to allocate memory for PFNs, and pin user pages in memory.
  *
  * Usage of pfn_array:
- * @pa->pa_iova     starting guest physical I/O address. Assigned by caller.
- * @pa->pa_iova_pfn array that stores PFNs of the pages need to pin. Allocated
- *                  by caller.
- * @pa->pa_pfn      array that receives PFNs of the pages pinned. Allocated by
- *                  caller.
- * @pa->pa_nr       number of pages from @pa->pa_iova to pin. Assigned by
- *                  caller.
- *                  number of pages pinned. Assigned by callee.
+ * We expect (pa_nr == 0) and (pa_iova_pfn == NULL), any field in
+ * this structure will be filled in by this function.
  *
  * Returns:
  *   Number of pages pinned on success.
- *   If @pa->pa_nr is 0 or negative, returns 0.
+ *   If @pa->pa_nr is not 0, or @pa->pa_iova_pfn is not NULL initially,
+ *   returns -EINVAL.
  *   If no pages were pinned, returns -errno.
  */
-static int pfn_array_pin(struct pfn_array *pa, struct device *mdev)
-{
-       int i, ret;
-
-       if (pa->pa_nr <= 0) {
-               pa->pa_nr = 0;
-               return 0;
-       }
-
-       pa->pa_iova_pfn[0] = pa->pa_iova >> PAGE_SHIFT;
-       for (i = 1; i < pa->pa_nr; i++)
-               pa->pa_iova_pfn[i] = pa->pa_iova_pfn[i - 1] + 1;
-
-       ret = vfio_pin_pages(mdev, pa->pa_iova_pfn, pa->pa_nr,
-                            IOMMU_READ | IOMMU_WRITE, pa->pa_pfn);
-
-       if (ret > 0 && ret != pa->pa_nr) {
-               vfio_unpin_pages(mdev, pa->pa_iova_pfn, ret);
-               pa->pa_nr = 0;
-               return 0;
-       }
-
-       return ret;
-}
-
-/* Unpin the pages before releasing the memory. */
-static void pfn_array_unpin_free(struct pfn_array *pa, struct device *mdev)
-{
-       vfio_unpin_pages(mdev, pa->pa_iova_pfn, pa->pa_nr);
-       pa->pa_nr = 0;
-       kfree(pa->pa_iova_pfn);
-}
-
-/* Alloc memory for PFNs, then pin pages with them. */
 static int pfn_array_alloc_pin(struct pfn_array *pa, struct device *mdev,
                               u64 iova, unsigned int len)
 {
-       int ret = 0;
+       int i, ret = 0;
 
        if (!len)
                return 0;
 
-       if (pa->pa_nr)
+       if (pa->pa_nr || pa->pa_iova_pfn)
                return -EINVAL;
 
        pa->pa_iova = iova;
@@ -126,18 +93,39 @@ static int pfn_array_alloc_pin(struct pfn_array *pa, struct device *mdev,
                return -ENOMEM;
        pa->pa_pfn = pa->pa_iova_pfn + pa->pa_nr;
 
-       ret = pfn_array_pin(pa, mdev);
+       pa->pa_iova_pfn[0] = pa->pa_iova >> PAGE_SHIFT;
+       for (i = 1; i < pa->pa_nr; i++)
+               pa->pa_iova_pfn[i] = pa->pa_iova_pfn[i - 1] + 1;
 
-       if (ret > 0)
-               return ret;
-       else if (!ret)
+       ret = vfio_pin_pages(mdev, pa->pa_iova_pfn, pa->pa_nr,
+                            IOMMU_READ | IOMMU_WRITE, pa->pa_pfn);
+
+       if (ret < 0) {
+               goto err_out;
+       } else if (ret > 0 && ret != pa->pa_nr) {
+               vfio_unpin_pages(mdev, pa->pa_iova_pfn, ret);
                ret = -EINVAL;
+               goto err_out;
+       }
 
+       return ret;
+
+err_out:
+       pa->pa_nr = 0;
        kfree(pa->pa_iova_pfn);
+       pa->pa_iova_pfn = NULL;
 
        return ret;
 }
 
+/* Unpin the pages before releasing the memory. */
+static void pfn_array_unpin_free(struct pfn_array *pa, struct device *mdev)
+{
+       vfio_unpin_pages(mdev, pa->pa_iova_pfn, pa->pa_nr);
+       pa->pa_nr = 0;
+       kfree(pa->pa_iova_pfn);
+}
+
 static int pfn_array_table_init(struct pfn_array_table *pat, int nr)
 {
        pat->pat_pa = kcalloc(nr, sizeof(*pat->pat_pa), GFP_KERNEL);
@@ -365,6 +353,9 @@ static void cp_unpin_free(struct channel_program *cp)
  * This is the chain length not considering any TICs.
  * You need to do a new round for each TIC target.
  *
+ * The program is also validated for absence of not yet supported
+ * indirect data addressing scenarios.
+ *
  * Returns: the length of the ccw chain or -errno.
  */
 static int ccwchain_calc_length(u64 iova, struct channel_program *cp)
@@ -391,6 +382,14 @@ static int ccwchain_calc_length(u64 iova, struct channel_program *cp)
        do {
                cnt++;
 
+               /*
+                * As we don't want to fail direct addressing even if the
+                * orb specified one of the unsupported formats, we defer
+                * checking for IDAWs in unsupported formats to here.
+                */
+               if ((!cp->orb.cmd.c64 || cp->orb.cmd.i2k) && ccw_is_idal(ccw))
+                       return -EOPNOTSUPP;
+
                if ((!ccw_is_chain(ccw)) && (!ccw_is_tic(ccw)))
                        break;
 
@@ -503,7 +502,7 @@ static int ccwchain_fetch_direct(struct ccwchain *chain,
        struct ccw1 *ccw;
        struct pfn_array_table *pat;
        unsigned long *idaws;
-       int idaw_nr;
+       int ret;
 
        ccw = chain->ch_ccw + idx;
 
@@ -523,18 +522,19 @@ static int ccwchain_fetch_direct(struct ccwchain *chain,
         * needed when translating a direct ccw to a idal ccw.
         */
        pat = chain->ch_pat + idx;
-       if (pfn_array_table_init(pat, 1))
-               return -ENOMEM;
-       idaw_nr = pfn_array_alloc_pin(pat->pat_pa, cp->mdev,
-                                     ccw->cda, ccw->count);
-       if (idaw_nr < 0)
-               return idaw_nr;
+       ret = pfn_array_table_init(pat, 1);
+       if (ret)
+               goto out_init;
+
+       ret = pfn_array_alloc_pin(pat->pat_pa, cp->mdev, ccw->cda, ccw->count);
+       if (ret < 0)
+               goto out_init;
 
        /* Translate this direct ccw to a idal ccw. */
-       idaws = kcalloc(idaw_nr, sizeof(*idaws), GFP_DMA | GFP_KERNEL);
+       idaws = kcalloc(ret, sizeof(*idaws), GFP_DMA | GFP_KERNEL);
        if (!idaws) {
-               pfn_array_table_unpin_free(pat, cp->mdev);
-               return -ENOMEM;
+               ret = -ENOMEM;
+               goto out_unpin;
        }
        ccw->cda = (__u32) virt_to_phys(idaws);
        ccw->flags |= CCW_FLAG_IDA;
@@ -542,6 +542,12 @@ static int ccwchain_fetch_direct(struct ccwchain *chain,
        pfn_array_table_idal_create_words(pat, idaws);
 
        return 0;
+
+out_unpin:
+       pfn_array_table_unpin_free(pat, cp->mdev);
+out_init:
+       ccw->cda = 0;
+       return ret;
 }
 
 static int ccwchain_fetch_idal(struct ccwchain *chain,
@@ -571,7 +577,7 @@ static int ccwchain_fetch_idal(struct ccwchain *chain,
        pat = chain->ch_pat + idx;
        ret = pfn_array_table_init(pat, idaw_nr);
        if (ret)
-               return ret;
+               goto out_init;
 
        /* Translate idal ccw to use new allocated idaws. */
        idaws = kzalloc(idaw_len, GFP_DMA | GFP_KERNEL);
@@ -603,6 +609,8 @@ out_free_idaws:
        kfree(idaws);
 out_unpin:
        pfn_array_table_unpin_free(pat, cp->mdev);
+out_init:
+       ccw->cda = 0;
        return ret;
 }
 
@@ -656,10 +664,8 @@ int cp_init(struct channel_program *cp, struct device *mdev, union orb *orb)
        /*
         * XXX:
         * Only support prefetch enable mode now.
-        * Only support 64bit addressing idal.
-        * Only support 4k IDAW.
         */
-       if (!orb->cmd.pfch || !orb->cmd.c64 || orb->cmd.i2k)
+       if (!orb->cmd.pfch)
                return -EOPNOTSUPP;
 
        INIT_LIST_HEAD(&cp->ccwchain_list);
@@ -688,6 +694,10 @@ int cp_init(struct channel_program *cp, struct device *mdev, union orb *orb)
        ret = ccwchain_loop_tic(chain, cp);
        if (ret)
                cp_unpin_free(cp);
+       /* It is safe to force: if not set but idals used
+        * ccwchain_calc_length returns an error.
+        */
+       cp->orb.cmd.c64 = 1;
 
        return ret;
 }
index ea6a2d0b2894decac95c3421c544183ee89c3383..770fa9cfc31041dd84a78a00f0f4135bef5a79ed 100644 (file)
@@ -177,6 +177,7 @@ static int vfio_ccw_sch_event(struct subchannel *sch, int process)
 {
        struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev);
        unsigned long flags;
+       int rc = -EAGAIN;
 
        spin_lock_irqsave(sch->lock, flags);
        if (!device_is_registered(&sch->dev))
@@ -187,6 +188,7 @@ static int vfio_ccw_sch_event(struct subchannel *sch, int process)
 
        if (cio_update_schib(sch)) {
                vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_NOT_OPER);
+               rc = 0;
                goto out_unlock;
        }
 
@@ -195,11 +197,12 @@ static int vfio_ccw_sch_event(struct subchannel *sch, int process)
                private->state = private->mdev ? VFIO_CCW_STATE_IDLE :
                                 VFIO_CCW_STATE_STANDBY;
        }
+       rc = 0;
 
 out_unlock:
        spin_unlock_irqrestore(sch->lock, flags);
 
-       return 0;
+       return rc;
 }
 
 static struct css_device_id vfio_ccw_sch_ids[] = {
index 3c800642134e4330d62bb8c0053df62618840ff3..797a82731159a5f9f584810f924adc3467b1e702 100644 (file)
@@ -13,6 +13,9 @@
 #include "ioasm.h"
 #include "vfio_ccw_private.h"
 
+#define CREATE_TRACE_POINTS
+#include "vfio_ccw_trace.h"
+
 static int fsm_io_helper(struct vfio_ccw_private *private)
 {
        struct subchannel *sch;
@@ -110,6 +113,10 @@ static void fsm_disabled_irq(struct vfio_ccw_private *private,
         */
        cio_disable_subchannel(sch);
 }
+inline struct subchannel_id get_schid(struct vfio_ccw_private *p)
+{
+       return p->sch->schid;
+}
 
 /*
  * Deal with the ccw command request from the userspace.
@@ -121,6 +128,7 @@ static void fsm_io_request(struct vfio_ccw_private *private,
        union scsw *scsw = &private->scsw;
        struct ccw_io_region *io_region = &private->io_region;
        struct mdev_device *mdev = private->mdev;
+       char *errstr = "request";
 
        private->state = VFIO_CCW_STATE_BOXED;
 
@@ -132,15 +140,19 @@ static void fsm_io_request(struct vfio_ccw_private *private,
                /* Don't try to build a cp if transport mode is specified. */
                if (orb->tm.b) {
                        io_region->ret_code = -EOPNOTSUPP;
+                       errstr = "transport mode";
                        goto err_out;
                }
                io_region->ret_code = cp_init(&private->cp, mdev_dev(mdev),
                                              orb);
-               if (io_region->ret_code)
+               if (io_region->ret_code) {
+                       errstr = "cp init";
                        goto err_out;
+               }
 
                io_region->ret_code = cp_prefetch(&private->cp);
                if (io_region->ret_code) {
+                       errstr = "cp prefetch";
                        cp_free(&private->cp);
                        goto err_out;
                }
@@ -148,6 +160,7 @@ static void fsm_io_request(struct vfio_ccw_private *private,
                /* Start channel program and wait for I/O interrupt. */
                io_region->ret_code = fsm_io_helper(private);
                if (io_region->ret_code) {
+                       errstr = "cp fsm_io_helper";
                        cp_free(&private->cp);
                        goto err_out;
                }
@@ -164,6 +177,8 @@ static void fsm_io_request(struct vfio_ccw_private *private,
 
 err_out:
        private->state = VFIO_CCW_STATE_IDLE;
+       trace_vfio_ccw_io_fctl(scsw->cmd.fctl, get_schid(private),
+                              io_region->ret_code, errstr);
 }
 
 /*
diff --git a/drivers/s390/cio/vfio_ccw_trace.h b/drivers/s390/cio/vfio_ccw_trace.h
new file mode 100644 (file)
index 0000000..b1da53d
--- /dev/null
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Tracepoints for vfio_ccw driver
+ *
+ * Copyright IBM Corp. 2018
+ *
+ * Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com>
+ *            Halil Pasic <pasic@linux.vnet.ibm.com>
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM vfio_ccw
+
+#if !defined(_VFIO_CCW_TRACE_) || defined(TRACE_HEADER_MULTI_READ)
+#define _VFIO_CCW_TRACE_
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(vfio_ccw_io_fctl,
+       TP_PROTO(int fctl, struct subchannel_id schid, int errno, char *errstr),
+       TP_ARGS(fctl, schid, errno, errstr),
+
+       TP_STRUCT__entry(
+               __field(int, fctl)
+               __field_struct(struct subchannel_id, schid)
+               __field(int, errno)
+               __field(char*, errstr)
+       ),
+
+       TP_fast_assign(
+               __entry->fctl = fctl;
+               __entry->schid = schid;
+               __entry->errno = errno;
+               __entry->errstr = errstr;
+       ),
+
+       TP_printk("schid=%x.%x.%04x fctl=%x errno=%d info=%s",
+                 __entry->schid.cssid,
+                 __entry->schid.ssid,
+                 __entry->schid.sch_no,
+                 __entry->fctl,
+                 __entry->errno,
+                 __entry->errstr)
+);
+
+#endif /* _VFIO_CCW_TRACE_ */
+
+/* This part must be outside protection */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE vfio_ccw_trace
+
+#include <trace/define_trace.h>
diff --git a/drivers/s390/crypto/ap_asm.h b/drivers/s390/crypto/ap_asm.h
deleted file mode 100644 (file)
index 16b59ce..0000000
+++ /dev/null
@@ -1,236 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright IBM Corp. 2016
- * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
- *
- * Adjunct processor bus inline assemblies.
- */
-
-#ifndef _AP_ASM_H_
-#define _AP_ASM_H_
-
-#include <asm/isc.h>
-
-/**
- * ap_intructions_available() - Test if AP instructions are available.
- *
- * Returns 0 if the AP instructions are installed.
- */
-static inline int ap_instructions_available(void)
-{
-       register unsigned long reg0 asm ("0") = AP_MKQID(0, 0);
-       register unsigned long reg1 asm ("1") = -ENODEV;
-       register unsigned long reg2 asm ("2") = 0UL;
-
-       asm volatile(
-               "   .long 0xb2af0000\n"         /* PQAP(TAPQ) */
-               "0: la    %1,0\n"
-               "1:\n"
-               EX_TABLE(0b, 1b)
-               : "+d" (reg0), "+d" (reg1), "+d" (reg2) : : "cc");
-       return reg1;
-}
-
-/**
- * ap_tapq(): Test adjunct processor queue.
- * @qid: The AP queue number
- * @info: Pointer to queue descriptor
- *
- * Returns AP queue status structure.
- */
-static inline struct ap_queue_status ap_tapq(ap_qid_t qid, unsigned long *info)
-{
-       register unsigned long reg0 asm ("0") = qid;
-       register struct ap_queue_status reg1 asm ("1");
-       register unsigned long reg2 asm ("2") = 0UL;
-
-       asm volatile(".long 0xb2af0000"         /* PQAP(TAPQ) */
-                    : "+d" (reg0), "=d" (reg1), "+d" (reg2) : : "cc");
-       if (info)
-               *info = reg2;
-       return reg1;
-}
-
-/**
- * ap_pqap_rapq(): Reset adjunct processor queue.
- * @qid: The AP queue number
- *
- * Returns AP queue status structure.
- */
-static inline struct ap_queue_status ap_rapq(ap_qid_t qid)
-{
-       register unsigned long reg0 asm ("0") = qid | 0x01000000UL;
-       register struct ap_queue_status reg1 asm ("1");
-       register unsigned long reg2 asm ("2") = 0UL;
-
-       asm volatile(
-               ".long 0xb2af0000"              /* PQAP(RAPQ) */
-               : "+d" (reg0), "=d" (reg1), "+d" (reg2) : : "cc");
-       return reg1;
-}
-
-/**
- * ap_aqic(): Control interruption for a specific AP.
- * @qid: The AP queue number
- * @qirqctrl: struct ap_qirq_ctrl (64 bit value)
- * @ind: The notification indicator byte
- *
- * Returns AP queue status.
- */
-static inline struct ap_queue_status ap_aqic(ap_qid_t qid,
-                                            struct ap_qirq_ctrl qirqctrl,
-                                            void *ind)
-{
-       register unsigned long reg0 asm ("0") = qid | (3UL << 24);
-       register struct ap_qirq_ctrl reg1_in asm ("1") = qirqctrl;
-       register struct ap_queue_status reg1_out asm ("1");
-       register void *reg2 asm ("2") = ind;
-
-       asm volatile(
-               ".long 0xb2af0000"              /* PQAP(AQIC) */
-               : "+d" (reg0), "+d" (reg1_in), "=d" (reg1_out), "+d" (reg2)
-               :
-               : "cc");
-       return reg1_out;
-}
-
-/**
- * ap_qci(): Get AP configuration data
- *
- * Returns 0 on success, or -EOPNOTSUPP.
- */
-static inline int ap_qci(void *config)
-{
-       register unsigned long reg0 asm ("0") = 0x04000000UL;
-       register unsigned long reg1 asm ("1") = -EINVAL;
-       register void *reg2 asm ("2") = (void *) config;
-
-       asm volatile(
-               ".long 0xb2af0000\n"            /* PQAP(QCI) */
-               "0: la    %1,0\n"
-               "1:\n"
-               EX_TABLE(0b, 1b)
-               : "+d" (reg0), "+d" (reg1), "+d" (reg2)
-               :
-               : "cc", "memory");
-
-       return reg1;
-}
-
-/*
- * union ap_qact_ap_info - used together with the
- * ap_aqic() function to provide a convenient way
- * to handle the ap info needed by the qact function.
- */
-union ap_qact_ap_info {
-       unsigned long val;
-       struct {
-               unsigned int      : 3;
-               unsigned int mode : 3;
-               unsigned int      : 26;
-               unsigned int cat  : 8;
-               unsigned int      : 8;
-               unsigned char ver[2];
-       };
-};
-
-/**
- * ap_qact(): Query AP combatibility type.
- * @qid: The AP queue number
- * @apinfo: On input the info about the AP queue. On output the
- *         alternate AP queue info provided by the qact function
- *         in GR2 is stored in.
- *
- * Returns AP queue status. Check response_code field for failures.
- */
-static inline struct ap_queue_status ap_qact(ap_qid_t qid, int ifbit,
-                                            union ap_qact_ap_info *apinfo)
-{
-       register unsigned long reg0 asm ("0") = qid | (5UL << 24)
-               | ((ifbit & 0x01) << 22);
-       register unsigned long reg1_in asm ("1") = apinfo->val;
-       register struct ap_queue_status reg1_out asm ("1");
-       register unsigned long reg2 asm ("2") = 0;
-
-       asm volatile(
-               ".long 0xb2af0000"              /* PQAP(QACT) */
-               : "+d" (reg0), "+d" (reg1_in), "=d" (reg1_out), "+d" (reg2)
-               : : "cc");
-       apinfo->val = reg2;
-       return reg1_out;
-}
-
-/**
- * ap_nqap(): Send message to adjunct processor queue.
- * @qid: The AP queue number
- * @psmid: The program supplied message identifier
- * @msg: The message text
- * @length: The message length
- *
- * Returns AP queue status structure.
- * Condition code 1 on NQAP can't happen because the L bit is 1.
- * Condition code 2 on NQAP also means the send is incomplete,
- * because a segment boundary was reached. The NQAP is repeated.
- */
-static inline struct ap_queue_status ap_nqap(ap_qid_t qid,
-                                            unsigned long long psmid,
-                                            void *msg, size_t length)
-{
-       register unsigned long reg0 asm ("0") = qid | 0x40000000UL;
-       register struct ap_queue_status reg1 asm ("1");
-       register unsigned long reg2 asm ("2") = (unsigned long) msg;
-       register unsigned long reg3 asm ("3") = (unsigned long) length;
-       register unsigned long reg4 asm ("4") = (unsigned int) (psmid >> 32);
-       register unsigned long reg5 asm ("5") = psmid & 0xffffffff;
-
-       asm volatile (
-               "0: .long 0xb2ad0042\n"         /* NQAP */
-               "   brc   2,0b"
-               : "+d" (reg0), "=d" (reg1), "+d" (reg2), "+d" (reg3)
-               : "d" (reg4), "d" (reg5)
-               : "cc", "memory");
-       return reg1;
-}
-
-/**
- * ap_dqap(): Receive message from adjunct processor queue.
- * @qid: The AP queue number
- * @psmid: Pointer to program supplied message identifier
- * @msg: The message text
- * @length: The message length
- *
- * Returns AP queue status structure.
- * Condition code 1 on DQAP means the receive has taken place
- * but only partially. The response is incomplete, hence the
- * DQAP is repeated.
- * Condition code 2 on DQAP also means the receive is incomplete,
- * this time because a segment boundary was reached. Again, the
- * DQAP is repeated.
- * Note that gpr2 is used by the DQAP instruction to keep track of
- * any 'residual' length, in case the instruction gets interrupted.
- * Hence it gets zeroed before the instruction.
- */
-static inline struct ap_queue_status ap_dqap(ap_qid_t qid,
-                                            unsigned long long *psmid,
-                                            void *msg, size_t length)
-{
-       register unsigned long reg0 asm("0") = qid | 0x80000000UL;
-       register struct ap_queue_status reg1 asm ("1");
-       register unsigned long reg2 asm("2") = 0UL;
-       register unsigned long reg4 asm("4") = (unsigned long) msg;
-       register unsigned long reg5 asm("5") = (unsigned long) length;
-       register unsigned long reg6 asm("6") = 0UL;
-       register unsigned long reg7 asm("7") = 0UL;
-
-
-       asm volatile(
-               "0: .long 0xb2ae0064\n"         /* DQAP */
-               "   brc   6,0b\n"
-               : "+d" (reg0), "=d" (reg1), "+d" (reg2),
-                 "+d" (reg4), "+d" (reg5), "+d" (reg6), "+d" (reg7)
-               : : "cc", "memory");
-       *psmid = (((unsigned long long) reg6) << 32) + reg7;
-       return reg1;
-}
-
-#endif /* _AP_ASM_H_ */
index 35a0c2b52f823777bc2dc72820c8d8a97195211b..bf27fc4d1335911850dd073ae13a42bf43101c4b 100644 (file)
@@ -36,7 +36,6 @@
 #include <linux/debugfs.h>
 
 #include "ap_bus.h"
-#include "ap_asm.h"
 #include "ap_debug.h"
 
 /*
@@ -174,24 +173,6 @@ static inline int ap_qact_available(void)
        return 0;
 }
 
-/**
- * ap_test_queue(): Test adjunct processor queue.
- * @qid: The AP queue number
- * @tbit: Test facilities bit
- * @info: Pointer to queue descriptor
- *
- * Returns AP queue status structure.
- */
-struct ap_queue_status ap_test_queue(ap_qid_t qid,
-                                    int tbit,
-                                    unsigned long *info)
-{
-       if (tbit)
-               qid |= 1UL << 23; /* set T bit*/
-       return ap_tapq(qid, info);
-}
-EXPORT_SYMBOL(ap_test_queue);
-
 /*
  * ap_query_configuration(): Fetch cryptographic config info
  *
@@ -200,7 +181,7 @@ EXPORT_SYMBOL(ap_test_queue);
  * is returned, e.g. if the PQAP(QCI) instruction is not
  * available, the return value will be -EOPNOTSUPP.
  */
-int ap_query_configuration(struct ap_config_info *info)
+static inline int ap_query_configuration(struct ap_config_info *info)
 {
        if (!ap_configuration_available())
                return -EOPNOTSUPP;
@@ -493,7 +474,7 @@ static int ap_poll_thread_start(void)
                return 0;
        mutex_lock(&ap_poll_thread_mutex);
        ap_poll_kthread = kthread_run(ap_poll_thread, NULL, "appoll");
-       rc = PTR_RET(ap_poll_kthread);
+       rc = PTR_ERR_OR_ZERO(ap_poll_kthread);
        if (rc)
                ap_poll_kthread = NULL;
        mutex_unlock(&ap_poll_thread_mutex);
@@ -1261,7 +1242,7 @@ static int __init ap_module_init(void)
 
        /* Create /sys/devices/ap. */
        ap_root_device = root_device_register("ap");
-       rc = PTR_RET(ap_root_device);
+       rc = PTR_ERR_OR_ZERO(ap_root_device);
        if (rc)
                goto out_bus;
 
index 6a273c5ebca5b00475091c8d5b75543a13f97fee..936541937e15d18016ee1debcec0208a30a6a3a3 100644 (file)
@@ -15,6 +15,7 @@
 
 #include <linux/device.h>
 #include <linux/types.h>
+#include <asm/isc.h>
 #include <asm/ap.h>
 
 #define AP_DEVICES 256         /* Number of AP devices. */
index 2c726df210f664bff4ead0693310d9f48806a0e6..c13e43292cb7400a0412951aabb42c7aac8a7e20 100644 (file)
@@ -14,7 +14,6 @@
 #include <asm/facility.h>
 
 #include "ap_bus.h"
-#include "ap_asm.h"
 
 /*
  * AP card related attributes.
index ba3a2e13b0ebe252effaa866910421234518e57d..e365171fe28f3b927d9fc7efde8abf259b982954 100644 (file)
 #include <asm/facility.h>
 
 #include "ap_bus.h"
-#include "ap_asm.h"
-
-/**
- * ap_queue_irq_ctrl(): Control interruption on a AP queue.
- * @qirqctrl: struct ap_qirq_ctrl (64 bit value)
- * @ind: The notification indicator byte
- *
- * Returns AP queue status.
- *
- * Control interruption on the given AP queue.
- * Just a simple wrapper function for the low level PQAP(AQIC)
- * instruction available for other kernel modules.
- */
-struct ap_queue_status ap_queue_irq_ctrl(ap_qid_t qid,
-                                        struct ap_qirq_ctrl qirqctrl,
-                                        void *ind)
-{
-       return ap_aqic(qid, qirqctrl, ind);
-}
-EXPORT_SYMBOL(ap_queue_irq_ctrl);
 
 /**
  * ap_queue_enable_interruption(): Enable interruption on an AP queue.
index 3929c8be8098b4098d5752d6697db506796fa9b4..e663432395c1da374f47187156d50fb9cdf36f72 100644 (file)
@@ -699,7 +699,7 @@ static int query_crypto_facility(u16 cardnr, u16 domain,
        /* fill request cprb param block with FQ request */
        preqparm = (struct fqreqparm *) preqcblk->req_parmb;
        memcpy(preqparm->subfunc_code, "FQ", 2);
-       strncpy(preqparm->rule_array, keyword, sizeof(preqparm->rule_array));
+       memcpy(preqparm->rule_array, keyword, sizeof(preqparm->rule_array));
        preqparm->rule_array_len =
                sizeof(preqparm->rule_array_len) + sizeof(preqparm->rule_array);
        preqparm->lv1.len = sizeof(preqparm->lv1);
index 233e1e695208b9b870edb4259127c41e2ee3357a..da2c8dfd4d741edc6c8b380e850667f79a44440f 100644 (file)
@@ -83,9 +83,21 @@ static ssize_t zcrypt_card_online_store(struct device *dev,
 static DEVICE_ATTR(online, 0644, zcrypt_card_online_show,
                   zcrypt_card_online_store);
 
+static ssize_t zcrypt_card_load_show(struct device *dev,
+                                    struct device_attribute *attr,
+                                    char *buf)
+{
+       struct zcrypt_card *zc = to_ap_card(dev)->private;
+
+       return snprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&zc->load));
+}
+
+static DEVICE_ATTR(load, 0444, zcrypt_card_load_show, NULL);
+
 static struct attribute *zcrypt_card_attrs[] = {
        &dev_attr_type.attr,
        &dev_attr_online.attr,
+       &dev_attr_load.attr,
        NULL,
 };
 
index 011d61d8a4ae5869e7d41d1654a218c5f53f96bc..1752622b95f701eca7f1f3604251929983117d60 100644 (file)
@@ -99,7 +99,7 @@ struct cca_pvt_ext_CRT_sec {
  * @mex: pointer to user input data
  * @p: pointer to memory area for the key
  *
- * Returns the size of the key area or -EFAULT
+ * Returns the size of the key area or negative errno value.
  */
 static inline int zcrypt_type6_mex_key_en(struct ica_rsa_modexpo *mex, void *p)
 {
@@ -118,6 +118,15 @@ static inline int zcrypt_type6_mex_key_en(struct ica_rsa_modexpo *mex, void *p)
        unsigned char *temp;
        int i;
 
+       /*
+        * The inputdatalength was a selection criteria in the dispatching
+        * function zcrypt_rsa_modexpo(). However, do a plausibility check
+        * here to make sure the following copy_from_user() can't be utilized
+        * to compromise the system.
+        */
+       if (WARN_ON_ONCE(mex->inputdatalength > 512))
+               return -EINVAL;
+
        memset(key, 0, sizeof(*key));
 
        key->pubHdr = static_pub_hdr;
@@ -178,6 +187,15 @@ static inline int zcrypt_type6_crt_key(struct ica_rsa_modexpo_crt *crt, void *p)
        struct cca_public_sec *pub;
        int short_len, long_len, pad_len, key_len, size;
 
+       /*
+        * The inputdatalength was a selection criteria in the dispatching
+        * function zcrypt_rsa_crt(). However, do a plausibility check
+        * here to make sure the following copy_from_user() can't be utilized
+        * to compromise the system.
+        */
+       if (WARN_ON_ONCE(crt->inputdatalength > 512))
+               return -EINVAL;
+
        memset(key, 0, sizeof(*key));
 
        short_len = (crt->inputdatalength + 1) / 2;
index 97d4bacbc442209ddd6a6645d7e18db5d942da16..e70ae078c86b8542fa5d3c2799583bf8b55775f7 100644 (file)
@@ -246,7 +246,7 @@ int speed_idx_ep11(int req_type)
  * @ap_msg: pointer to AP message
  * @mex: pointer to user input data
  *
- * Returns 0 on success or -EFAULT.
+ * Returns 0 on success or negative errno value.
  */
 static int ICAMEX_msg_to_type6MEX_msgX(struct zcrypt_queue *zq,
                                       struct ap_message *ap_msg,
@@ -272,6 +272,14 @@ static int ICAMEX_msg_to_type6MEX_msgX(struct zcrypt_queue *zq,
        } __packed * msg = ap_msg->message;
        int size;
 
+       /*
+        * The inputdatalength was a selection criteria in the dispatching
+        * function zcrypt_rsa_modexpo(). However, make sure the following
+        * copy_from_user() never exceeds the allocated buffer space.
+        */
+       if (WARN_ON_ONCE(mex->inputdatalength > PAGE_SIZE))
+               return -EINVAL;
+
        /* VUD.ciphertext */
        msg->length = mex->inputdatalength + 2;
        if (copy_from_user(msg->text, mex->inputdata, mex->inputdatalength))
@@ -307,7 +315,7 @@ static int ICAMEX_msg_to_type6MEX_msgX(struct zcrypt_queue *zq,
  * @ap_msg: pointer to AP message
  * @crt: pointer to user input data
  *
- * Returns 0 on success or -EFAULT.
+ * Returns 0 on success or negative errno value.
  */
 static int ICACRT_msg_to_type6CRT_msgX(struct zcrypt_queue *zq,
                                       struct ap_message *ap_msg,
@@ -334,6 +342,14 @@ static int ICACRT_msg_to_type6CRT_msgX(struct zcrypt_queue *zq,
        } __packed * msg = ap_msg->message;
        int size;
 
+       /*
+        * The inputdatalength was a selection criteria in the dispatching
+        * function zcrypt_rsa_crt(). However, make sure the following
+        * copy_from_user() never exceeds the allocated buffer space.
+        */
+       if (WARN_ON_ONCE(crt->inputdatalength > PAGE_SIZE))
+               return -EINVAL;
+
        /* VUD.ciphertext */
        msg->length = crt->inputdatalength + 2;
        if (copy_from_user(msg->text, crt->inputdata, crt->inputdatalength))
index 720434e18007e3a8e1c9e5228c4841ba0cb782a5..91a52f26835307f3b04c54a8bb9544fe694495a2 100644 (file)
@@ -75,8 +75,20 @@ static ssize_t zcrypt_queue_online_store(struct device *dev,
 static DEVICE_ATTR(online, 0644, zcrypt_queue_online_show,
                   zcrypt_queue_online_store);
 
+static ssize_t zcrypt_queue_load_show(struct device *dev,
+                                     struct device_attribute *attr,
+                                     char *buf)
+{
+       struct zcrypt_queue *zq = to_ap_queue(dev)->private;
+
+       return snprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&zq->load));
+}
+
+static DEVICE_ATTR(load, 0444, zcrypt_queue_load_show, NULL);
+
 static struct attribute *zcrypt_queue_attrs[] = {
        &dev_attr_online.attr,
+       &dev_attr_load.attr,
        NULL,
 };
 
index 2a5fec55bf60f6f30fd684e1c8c5c9a594aa8e75..a246a618f9a497047e4a81614f38da1eb295ef0b 100644 (file)
@@ -829,6 +829,17 @@ struct qeth_trap_id {
 /*some helper functions*/
 #define QETH_CARD_IFNAME(card) (((card)->dev)? (card)->dev->name : "")
 
+static inline void qeth_scrub_qdio_buffer(struct qdio_buffer *buf,
+                                         unsigned int elements)
+{
+       unsigned int i;
+
+       for (i = 0; i < elements; i++)
+               memset(&buf->element[i], 0, sizeof(struct qdio_buffer_element));
+       buf->element[14].sflags = 0;
+       buf->element[15].sflags = 0;
+}
+
 /**
  * qeth_get_elements_for_range() -     find number of SBALEs to cover range.
  * @start:                             Start of the address range.
@@ -1029,7 +1040,7 @@ struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *,
                                                 __u16, __u16,
                                                 enum qeth_prot_versions);
 int qeth_set_features(struct net_device *, netdev_features_t);
-void qeth_recover_features(struct net_device *dev);
+void qeth_enable_hw_features(struct net_device *dev);
 netdev_features_t qeth_fix_features(struct net_device *, netdev_features_t);
 netdev_features_t qeth_features_check(struct sk_buff *skb,
                                      struct net_device *dev,
index 8e1474f1ffacfb22b773b02aa1bff6ff91c61ce9..d01ac29fd986d82b84b7215c5268c37e94aaadaa 100644 (file)
@@ -73,9 +73,6 @@ static void qeth_notify_skbs(struct qeth_qdio_out_q *queue,
                struct qeth_qdio_out_buffer *buf,
                enum iucv_tx_notify notification);
 static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf);
-static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
-               struct qeth_qdio_out_buffer *buf,
-               enum qeth_qdio_buffer_states newbufstate);
 static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *, int);
 
 struct workqueue_struct *qeth_wq;
@@ -489,6 +486,7 @@ static void qeth_qdio_handle_aob(struct qeth_card *card,
        struct qaob *aob;
        struct qeth_qdio_out_buffer *buffer;
        enum iucv_tx_notify notification;
+       unsigned int i;
 
        aob = (struct qaob *) phys_to_virt(phys_aob_addr);
        QETH_CARD_TEXT(card, 5, "haob");
@@ -513,10 +511,18 @@ static void qeth_qdio_handle_aob(struct qeth_card *card,
        qeth_notify_skbs(buffer->q, buffer, notification);
 
        buffer->aob = NULL;
-       qeth_clear_output_buffer(buffer->q, buffer,
-                                QETH_QDIO_BUF_HANDLED_DELAYED);
+       /* Free dangling allocations. The attached skbs are handled by
+        * qeth_cleanup_handled_pending().
+        */
+       for (i = 0;
+            i < aob->sb_count && i < QETH_MAX_BUFFER_ELEMENTS(card);
+            i++) {
+               if (aob->sba[i] && buffer->is_header[i])
+                       kmem_cache_free(qeth_core_header_cache,
+                                       (void *) aob->sba[i]);
+       }
+       atomic_set(&buffer->state, QETH_QDIO_BUF_HANDLED_DELAYED);
 
-       /* from here on: do not touch buffer anymore */
        qdio_release_aob(aob);
 }
 
@@ -3759,6 +3765,10 @@ static void qeth_qdio_output_handler(struct ccw_device *ccwdev,
                        QETH_CARD_TEXT(queue->card, 5, "aob");
                        QETH_CARD_TEXT_(queue->card, 5, "%lx",
                                        virt_to_phys(buffer->aob));
+
+                       /* prepare the queue slot for re-use: */
+                       qeth_scrub_qdio_buffer(buffer->buffer,
+                                              QETH_MAX_BUFFER_ELEMENTS(card));
                        if (qeth_init_qdio_out_buf(queue, bidx)) {
                                QETH_CARD_TEXT(card, 2, "outofbuf");
                                qeth_schedule_recovery(card);
@@ -4834,7 +4844,7 @@ int qeth_vm_request_mac(struct qeth_card *card)
                goto out;
        }
 
-       ccw_device_get_id(CARD_RDEV(card), &id);
+       ccw_device_get_id(CARD_DDEV(card), &id);
        request->resp_buf_len = sizeof(*response);
        request->resp_version = DIAG26C_VERSION2;
        request->op_code = DIAG26C_GET_MAC;
@@ -6459,28 +6469,27 @@ static int qeth_set_ipa_rx_csum(struct qeth_card *card, bool on)
 #define QETH_HW_FEATURES (NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_TSO | \
                          NETIF_F_IPV6_CSUM)
 /**
- * qeth_recover_features() - Restore device features after recovery
- * @dev:       the recovering net_device
- *
- * Caller must hold rtnl lock.
+ * qeth_enable_hw_features() - (Re-)Enable HW functions for device features
+ * @dev:       a net_device
  */
-void qeth_recover_features(struct net_device *dev)
+void qeth_enable_hw_features(struct net_device *dev)
 {
-       netdev_features_t features = dev->features;
        struct qeth_card *card = dev->ml_priv;
+       netdev_features_t features;
 
+       rtnl_lock();
+       features = dev->features;
        /* force-off any feature that needs an IPA sequence.
         * netdev_update_features() will restart them.
         */
        dev->features &= ~QETH_HW_FEATURES;
        netdev_update_features(dev);
-
-       if (features == dev->features)
-               return;
-       dev_warn(&card->gdev->dev,
-                "Device recovery failed to restore all offload features\n");
+       if (features != dev->features)
+               dev_warn(&card->gdev->dev,
+                        "Device recovery failed to restore all offload features\n");
+       rtnl_unlock();
 }
-EXPORT_SYMBOL_GPL(qeth_recover_features);
+EXPORT_SYMBOL_GPL(qeth_enable_hw_features);
 
 int qeth_set_features(struct net_device *dev, netdev_features_t features)
 {
index a7cb37da6a21313eda8d03119135f1475d35f47d..2487f0aeb165c1afae905540d1ff547f7fab4f54 100644 (file)
@@ -140,7 +140,7 @@ static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac)
 
 static int qeth_l2_write_mac(struct qeth_card *card, u8 *mac)
 {
-       enum qeth_ipa_cmds cmd = is_multicast_ether_addr_64bits(mac) ?
+       enum qeth_ipa_cmds cmd = is_multicast_ether_addr(mac) ?
                                        IPA_CMD_SETGMAC : IPA_CMD_SETVMAC;
        int rc;
 
@@ -157,7 +157,7 @@ static int qeth_l2_write_mac(struct qeth_card *card, u8 *mac)
 
 static int qeth_l2_remove_mac(struct qeth_card *card, u8 *mac)
 {
-       enum qeth_ipa_cmds cmd = is_multicast_ether_addr_64bits(mac) ?
+       enum qeth_ipa_cmds cmd = is_multicast_ether_addr(mac) ?
                                        IPA_CMD_DELGMAC : IPA_CMD_DELVMAC;
        int rc;
 
@@ -501,27 +501,34 @@ static int qeth_l2_set_mac_address(struct net_device *dev, void *p)
                return -ERESTARTSYS;
        }
 
+       /* avoid racing against concurrent state change: */
+       if (!mutex_trylock(&card->conf_mutex))
+               return -EAGAIN;
+
        if (!qeth_card_hw_is_reachable(card)) {
                ether_addr_copy(dev->dev_addr, addr->sa_data);
-               return 0;
+               goto out_unlock;
        }
 
        /* don't register the same address twice */
        if (ether_addr_equal_64bits(dev->dev_addr, addr->sa_data) &&
            (card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED))
-               return 0;
+               goto out_unlock;
 
        /* add the new address, switch over, drop the old */
        rc = qeth_l2_send_setmac(card, addr->sa_data);
        if (rc)
-               return rc;
+               goto out_unlock;
        ether_addr_copy(old_addr, dev->dev_addr);
        ether_addr_copy(dev->dev_addr, addr->sa_data);
 
        if (card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED)
                qeth_l2_remove_mac(card, old_addr);
        card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED;
-       return 0;
+
+out_unlock:
+       mutex_unlock(&card->conf_mutex);
+       return rc;
 }
 
 static void qeth_promisc_to_bridge(struct qeth_card *card)
@@ -1112,6 +1119,8 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
                netif_carrier_off(card->dev);
 
        qeth_set_allowed_threads(card, 0xffffffff, 0);
+
+       qeth_enable_hw_features(card->dev);
        if (recover_flag == CARD_STATE_RECOVER) {
                if (recovery_mode &&
                    card->info.type != QETH_CARD_TYPE_OSN) {
@@ -1123,9 +1132,6 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
                }
                /* this also sets saved unicast addresses */
                qeth_l2_set_rx_mode(card->dev);
-               rtnl_lock();
-               qeth_recover_features(card->dev);
-               rtnl_unlock();
        }
        /* let user_space know that device is online */
        kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE);
index e7fa479adf47e0dd41bfacaed8fd347bc40b5581..5905dc63e2569baf761611ad25bf3b91786a3235 100644 (file)
@@ -2662,6 +2662,8 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
                netif_carrier_on(card->dev);
        else
                netif_carrier_off(card->dev);
+
+       qeth_enable_hw_features(card->dev);
        if (recover_flag == CARD_STATE_RECOVER) {
                rtnl_lock();
                if (recovery_mode)
@@ -2669,7 +2671,6 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
                else
                        dev_open(card->dev);
                qeth_l3_set_rx_mode(card->dev);
-               qeth_recover_features(card->dev);
                rtnl_unlock();
        }
        qeth_trace_features(card);
index a3a8c8d9d7171a8d6994548212084ff3380ba493..94f4d8fe85e0e5bc595470d8d0e161ab664ae735 100644 (file)
@@ -101,7 +101,7 @@ static void __init zfcp_init_device_setup(char *devstr)
        token = strsep(&str, ",");
        if (!token || strlen(token) >= ZFCP_BUS_ID_SIZE)
                goto err_out;
-       strncpy(busid, token, ZFCP_BUS_ID_SIZE);
+       strlcpy(busid, token, ZFCP_BUS_ID_SIZE);
 
        token = strsep(&str, ",");
        if (!token || kstrtoull(token, 0, (unsigned long long *) &wwpn))
index a9831bd37a73d52462489d025c1631a576ca2fbc..a57f3a7d47488e5aac06d24e47b14eb90fc9615c 100644 (file)
@@ -1974,7 +1974,6 @@ static void aac_set_safw_attr_all_targets(struct aac_dev *dev)
        u32 lun_count, nexus;
        u32 i, bus, target;
        u8 expose_flag, attribs;
-       u8 devtype;
 
        lun_count = aac_get_safw_phys_lun_count(dev);
 
@@ -1992,23 +1991,23 @@ static void aac_set_safw_attr_all_targets(struct aac_dev *dev)
                        continue;
 
                if (expose_flag != 0) {
-                       devtype = AAC_DEVTYPE_RAID_MEMBER;
-                       goto update_devtype;
+                       dev->hba_map[bus][target].devtype =
+                               AAC_DEVTYPE_RAID_MEMBER;
+                       continue;
                }
 
                if (nexus != 0 && (attribs & 8)) {
-                       devtype = AAC_DEVTYPE_NATIVE_RAW;
+                       dev->hba_map[bus][target].devtype =
+                               AAC_DEVTYPE_NATIVE_RAW;
                        dev->hba_map[bus][target].rmw_nexus =
                                        nexus;
                } else
-                       devtype = AAC_DEVTYPE_ARC_RAW;
+                       dev->hba_map[bus][target].devtype =
+                               AAC_DEVTYPE_ARC_RAW;
 
                dev->hba_map[bus][target].scan_counter = dev->scan_counter;
 
                aac_set_safw_target_qd(dev, bus, target);
-
-update_devtype:
-               dev->hba_map[bus][target].devtype = devtype;
        }
 }
 
index 2a3977823812cce8889e6577f65d3f862b626918..a39be94d110cdad1fc3153a58641c6d9f5a08eab 100644 (file)
@@ -107,12 +107,12 @@ cxlflash_assign_ops(struct dev_dependent_vals *ddv)
 {
        const struct cxlflash_backend_ops *ops = NULL;
 
-#ifdef CONFIG_OCXL
+#ifdef CONFIG_OCXL_BASE
        if (ddv->flags & CXLFLASH_OCXL_DEV)
                ops = &cxlflash_ocxl_ops;
 #endif
 
-#ifdef CONFIG_CXL
+#ifdef CONFIG_CXL_BASE
        if (!(ddv->flags & CXLFLASH_OCXL_DEV))
                ops = &cxlflash_cxl_ops;
 #endif
index 0a95b5f253807888a8fa680397e5e344236d81d6..a43d44e7e7dd86aece1d9f2545386921878d129a 100644 (file)
@@ -88,10 +88,8 @@ static struct file *ocxlflash_getfile(struct device *dev, const char *name,
                                      const struct file_operations *fops,
                                      void *priv, int flags)
 {
-       struct qstr this;
-       struct path path;
        struct file *file;
-       struct inode *inode = NULL;
+       struct inode *inode;
        int rc;
 
        if (fops->owner && !try_module_get(fops->owner)) {
@@ -116,33 +114,18 @@ static struct file *ocxlflash_getfile(struct device *dev, const char *name,
                goto err3;
        }
 
-       this.name = name;
-       this.len = strlen(name);
-       this.hash = 0;
-       path.dentry = d_alloc_pseudo(ocxlflash_vfs_mount->mnt_sb, &this);
-       if (!path.dentry) {
-               dev_err(dev, "%s: d_alloc_pseudo failed\n", __func__);
-               rc = -ENOMEM;
-               goto err4;
-       }
-
-       path.mnt = mntget(ocxlflash_vfs_mount);
-       d_instantiate(path.dentry, inode);
-
-       file = alloc_file(&path, OPEN_FMODE(flags), fops);
+       file = alloc_file_pseudo(inode, ocxlflash_vfs_mount, name,
+                                flags & (O_ACCMODE | O_NONBLOCK), fops);
        if (IS_ERR(file)) {
                rc = PTR_ERR(file);
                dev_err(dev, "%s: alloc_file failed rc=%d\n",
                        __func__, rc);
-               goto err5;
+               goto err4;
        }
 
-       file->f_flags = flags & (O_ACCMODE | O_NONBLOCK);
        file->private_data = priv;
 out:
        return file;
-err5:
-       path_put(&path);
 err4:
        iput(inode);
 err3:
index ea23c8dffc252af3d0209167e99118c98575d848..ffec695e0bfb2309c8c2b658c42f964dd5b5a996 100644 (file)
@@ -754,9 +754,9 @@ int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct fc_lport *lport,
        case ELS_LOGO:
                if (fip->mode == FIP_MODE_VN2VN) {
                        if (fip->state != FIP_ST_VNMP_UP)
-                               return -EINVAL;
+                               goto drop;
                        if (ntoh24(fh->fh_d_id) == FC_FID_FLOGI)
-                               return -EINVAL;
+                               goto drop;
                } else {
                        if (fip->state != FIP_ST_ENABLED)
                                return 0;
@@ -799,9 +799,9 @@ int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct fc_lport *lport,
        fip->send(fip, skb);
        return -EINPROGRESS;
 drop:
-       kfree_skb(skb);
        LIBFCOE_FIP_DBG(fip, "drop els_send op %u d_id %x\n",
                        op, ntoh24(fh->fh_d_id));
+       kfree_skb(skb);
        return -EINVAL;
 }
 EXPORT_SYMBOL(fcoe_ctlr_els_send);
index 15c7f3b6f35eecee2ca3ca88c16c63809c2791bf..58bb70b886d70d714ee6b448843aa113c2b313c0 100644 (file)
@@ -3440,11 +3440,11 @@ static void hpsa_get_enclosure_info(struct ctlr_info *h,
        struct ext_report_lun_entry *rle = &rlep->LUN[rle_index];
        u16 bmic_device_index = 0;
 
-       bmic_device_index = GET_BMIC_DRIVE_NUMBER(&rle->lunid[0]);
-
-       encl_dev->sas_address =
+       encl_dev->eli =
                hpsa_get_enclosure_logical_identifier(h, scsi3addr);
 
+       bmic_device_index = GET_BMIC_DRIVE_NUMBER(&rle->lunid[0]);
+
        if (encl_dev->target == -1 || encl_dev->lun == -1) {
                rc = IO_OK;
                goto out;
@@ -9697,7 +9697,24 @@ hpsa_sas_get_linkerrors(struct sas_phy *phy)
 static int
 hpsa_sas_get_enclosure_identifier(struct sas_rphy *rphy, u64 *identifier)
 {
-       *identifier = rphy->identify.sas_address;
+       struct Scsi_Host *shost = phy_to_shost(rphy);
+       struct ctlr_info *h;
+       struct hpsa_scsi_dev_t *sd;
+
+       if (!shost)
+               return -ENXIO;
+
+       h = shost_to_hba(shost);
+
+       if (!h)
+               return -ENXIO;
+
+       sd = hpsa_find_device_by_sas_rphy(h, rphy);
+       if (!sd)
+               return -ENXIO;
+
+       *identifier = sd->eli;
+
        return 0;
 }
 
index fb9f5e7f8209447771d07016bca7924774b143af..59e023696fffe96d3fb7869de5233161dce8fa1d 100644 (file)
@@ -68,6 +68,7 @@ struct hpsa_scsi_dev_t {
 #define RAID_CTLR_LUNID "\0\0\0\0\0\0\0\0"
        unsigned char device_id[16];    /* from inquiry pg. 0x83 */
        u64 sas_address;
+       u64 eli;                        /* from report diags. */
        unsigned char vendor[8];        /* bytes 8-15 of inquiry data */
        unsigned char model[16];        /* bytes 16-31 of inquiry data */
        unsigned char rev;              /* byte 2 of inquiry data */
index 0a9b8b387bd2e70e87310ef7908012a46f32942f..02d65dce74e504230ceb3080b58972d8d5dff950 100644 (file)
@@ -760,7 +760,6 @@ static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
                ioa_cfg->hrrq[i].allow_interrupts = 0;
                spin_unlock(&ioa_cfg->hrrq[i]._lock);
        }
-       wmb();
 
        /* Set interrupt mask to stop all new interrupts */
        if (ioa_cfg->sis64)
@@ -8403,7 +8402,6 @@ static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
                ioa_cfg->hrrq[i].allow_interrupts = 1;
                spin_unlock(&ioa_cfg->hrrq[i]._lock);
        }
-       wmb();
        if (ioa_cfg->sis64) {
                /* Set the adapter to the correct endian mode. */
                writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
index 31d31aad3de1d3fd0f2ff58d2141cabddec474bf..89b1f1af2fd456c38e45e0a905af9a76385c1d89 100644 (file)
@@ -2164,6 +2164,7 @@ static void fc_rport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp)
                FC_RPORT_DBG(rdata, "Received LOGO request while in state %s\n",
                             fc_rport_state(rdata));
 
+               rdata->flags &= ~FC_RP_STARTED;
                fc_rport_enter_delete(rdata, RPORT_EV_STOP);
                mutex_unlock(&rdata->rp_mutex);
                kref_put(&rdata->kref, fc_rport_destroy);
index d6093838f5f203dfc0968f2c2ee34f402af96093..c972cc2b3d5b7962e36bb6a428bd2f2795f34358 100644 (file)
@@ -284,11 +284,11 @@ static int iscsi_check_tmf_restrictions(struct iscsi_task *task, int opcode)
                 */
                if (opcode != ISCSI_OP_SCSI_DATA_OUT) {
                        iscsi_conn_printk(KERN_INFO, conn,
-                                         "task [op %x/%x itt "
+                                         "task [op %x itt "
                                          "0x%x/0x%x] "
                                          "rejected.\n",
-                                         task->hdr->opcode, opcode,
-                                         task->itt, task->hdr_itt);
+                                         opcode, task->itt,
+                                         task->hdr_itt);
                        return -EACCES;
                }
                /*
@@ -297,10 +297,10 @@ static int iscsi_check_tmf_restrictions(struct iscsi_task *task, int opcode)
                 */
                if (conn->session->fast_abort) {
                        iscsi_conn_printk(KERN_INFO, conn,
-                                         "task [op %x/%x itt "
+                                         "task [op %x itt "
                                          "0x%x/0x%x] fast abort.\n",
-                                         task->hdr->opcode, opcode,
-                                         task->itt, task->hdr_itt);
+                                         opcode, task->itt,
+                                         task->hdr_itt);
                        return -EACCES;
                }
                break;
index 569392d0d4c9e478b8e7d1fc745cb7e741005ff3..e44c91edf92d0fae3e73dd38f2b65961a07e5ed9 100644 (file)
@@ -3343,11 +3343,10 @@ _base_mpi_ep_writeq(__u64 b, volatile void __iomem *addr,
                                        spinlock_t *writeq_lock)
 {
        unsigned long flags;
-       __u64 data_out = b;
 
        spin_lock_irqsave(writeq_lock, flags);
-       writel((u32)(data_out), addr);
-       writel((u32)(data_out >> 32), (addr + 4));
+       __raw_writel((u32)(b), addr);
+       __raw_writel((u32)(b >> 32), (addr + 4));
        mmiowb();
        spin_unlock_irqrestore(writeq_lock, flags);
 }
@@ -3367,7 +3366,8 @@ _base_mpi_ep_writeq(__u64 b, volatile void __iomem *addr,
 static inline void
 _base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock)
 {
-       writeq(b, addr);
+       __raw_writeq(b, addr);
+       mmiowb();
 }
 #else
 static inline void
@@ -5268,7 +5268,7 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
 
        /* send message 32-bits at a time */
        for (i = 0, failed = 0; i < request_bytes/4 && !failed; i++) {
-               writel((u32)(request[i]), &ioc->chip->Doorbell);
+               writel(cpu_to_le32(request[i]), &ioc->chip->Doorbell);
                if ((_base_wait_for_doorbell_ack(ioc, 5)))
                        failed = 1;
        }
@@ -5289,7 +5289,7 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
        }
 
        /* read the first two 16-bits, it gives the total length of the reply */
-       reply[0] = (u16)(readl(&ioc->chip->Doorbell)
+       reply[0] = le16_to_cpu(readl(&ioc->chip->Doorbell)
            & MPI2_DOORBELL_DATA_MASK);
        writel(0, &ioc->chip->HostInterruptStatus);
        if ((_base_wait_for_doorbell_int(ioc, 5))) {
@@ -5298,7 +5298,7 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
                        ioc->name, __LINE__);
                return -EFAULT;
        }
-       reply[1] = (u16)(readl(&ioc->chip->Doorbell)
+       reply[1] = le16_to_cpu(readl(&ioc->chip->Doorbell)
            & MPI2_DOORBELL_DATA_MASK);
        writel(0, &ioc->chip->HostInterruptStatus);
 
@@ -5312,7 +5312,7 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
                if (i >=  reply_bytes/2) /* overflow case */
                        readl(&ioc->chip->Doorbell);
                else
-                       reply[i] = (u16)(readl(&ioc->chip->Doorbell)
+                       reply[i] = le16_to_cpu(readl(&ioc->chip->Doorbell)
                            & MPI2_DOORBELL_DATA_MASK);
                writel(0, &ioc->chip->HostInterruptStatus);
        }
index 90394cef0f414cdac6a5990ecb0bbc5ed9961a89..0a5dd5595dd3c42179543d8453e9d3afc98ba3f7 100644 (file)
@@ -3295,6 +3295,11 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
 
        init_completion(&qedf->flogi_compl);
 
+       status = qed_ops->common->update_drv_state(qedf->cdev, true);
+       if (status)
+               QEDF_ERR(&(qedf->dbg_ctx),
+                       "Failed to send drv state to MFW.\n");
+
        memset(&link_params, 0, sizeof(struct qed_link_params));
        link_params.link_up = true;
        status = qed_ops->common->set_link(qedf->cdev, &link_params);
@@ -3343,6 +3348,7 @@ static int qedf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 static void __qedf_remove(struct pci_dev *pdev, int mode)
 {
        struct qedf_ctx *qedf;
+       int rc;
 
        if (!pdev) {
                QEDF_ERR(NULL, "pdev is NULL.\n");
@@ -3437,6 +3443,12 @@ static void __qedf_remove(struct pci_dev *pdev, int mode)
                qed_ops->common->set_power_state(qedf->cdev, PCI_D0);
                pci_set_drvdata(pdev, NULL);
        }
+
+       rc = qed_ops->common->update_drv_state(qedf->cdev, false);
+       if (rc)
+               QEDF_ERR(&(qedf->dbg_ctx),
+                       "Failed to send drv state to MFW.\n");
+
        qed_ops->common->slowpath_stop(qedf->cdev);
        qed_ops->common->remove(qedf->cdev);
 
index cf274a79e77aac86d338d358a71a753636002812..cff83b9457f7fe95bb39784996fedc06b21969ee 100644 (file)
@@ -888,7 +888,7 @@ static void qedi_get_boot_tgt_info(struct nvm_iscsi_block *block,
        ipv6_en = !!(block->generic.ctrl_flags &
                     NVM_ISCSI_CFG_GEN_IPV6_ENABLED);
 
-       snprintf(tgt->iscsi_name, NVM_ISCSI_CFG_ISCSI_NAME_MAX_LEN, "%s\n",
+       snprintf(tgt->iscsi_name, sizeof(tgt->iscsi_name), "%s\n",
                 block->target[index].target_name.byte);
 
        tgt->ipv6_en = ipv6_en;
@@ -2273,6 +2273,7 @@ kset_free:
 static void __qedi_remove(struct pci_dev *pdev, int mode)
 {
        struct qedi_ctx *qedi = pci_get_drvdata(pdev);
+       int rval;
 
        if (qedi->tmf_thread) {
                flush_workqueue(qedi->tmf_thread);
@@ -2302,6 +2303,10 @@ static void __qedi_remove(struct pci_dev *pdev, int mode)
        if (mode == QEDI_MODE_NORMAL)
                qedi_free_iscsi_pf_param(qedi);
 
+       rval = qedi_ops->common->update_drv_state(qedi->cdev, false);
+       if (rval)
+               QEDI_ERR(&qedi->dbg_ctx, "Failed to send drv state to MFW\n");
+
        if (!test_bit(QEDI_IN_OFFLINE, &qedi->flags)) {
                qedi_ops->common->slowpath_stop(qedi->cdev);
                qedi_ops->common->remove(qedi->cdev);
@@ -2576,6 +2581,12 @@ static int __qedi_probe(struct pci_dev *pdev, int mode)
                if (qedi_setup_boot_info(qedi))
                        QEDI_ERR(&qedi->dbg_ctx,
                                 "No iSCSI boot target configured\n");
+
+               rc = qedi_ops->common->update_drv_state(qedi->cdev, true);
+               if (rc)
+                       QEDI_ERR(&qedi->dbg_ctx,
+                                "Failed to send drv state to MFW\n");
+
        }
 
        return 0;
index 89a4999fa631f213b52ac1ca15eacba2f9b48f93..c8731568f9c477788b7cc0507730c0ae4ec4b7dd 100644 (file)
@@ -2141,6 +2141,7 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
                msleep(1000);
 
        qla24xx_disable_vp(vha);
+       qla2x00_wait_for_sess_deletion(vha);
 
        vha->flags.delete_progress = 1;
 
index 9442e18aef6fdbf818ba5999e0773417fde9149a..0f94b1d62d3f2f8611fb5a0924bf374b368a7028 100644 (file)
@@ -361,6 +361,8 @@ struct ct_arg {
        dma_addr_t      rsp_dma;
        u32             req_size;
        u32             rsp_size;
+       u32             req_allocated_size;
+       u32             rsp_allocated_size;
        void            *req;
        void            *rsp;
        port_id_t       id;
index f68eb60965592e2539caf409380a753cbe832a4e..2660a48d918a5cdd9d7a27863736a854b5d64345 100644 (file)
@@ -214,6 +214,7 @@ void qla2x00_handle_login_done_event(struct scsi_qla_host *, fc_port_t *,
 int qla24xx_post_gnl_work(struct scsi_qla_host *, fc_port_t *);
 int qla24xx_async_abort_cmd(srb_t *);
 int qla24xx_post_relogin_work(struct scsi_qla_host *vha);
+void qla2x00_wait_for_sess_deletion(scsi_qla_host_t *);
 
 /*
  * Global Functions in qla_mid.c source file.
index 4bc2b66b299f234b098fdecb2e80e53555b9d684..7a3744006419589232322520594e635bdd247d36 100644 (file)
@@ -556,7 +556,7 @@ err2:
                /* please ignore kernel warning. otherwise, we have mem leak. */
                if (sp->u.iocb_cmd.u.ctarg.req) {
                        dma_free_coherent(&vha->hw->pdev->dev,
-                           sizeof(struct ct_sns_pkt),
+                           sp->u.iocb_cmd.u.ctarg.req_allocated_size,
                            sp->u.iocb_cmd.u.ctarg.req,
                            sp->u.iocb_cmd.u.ctarg.req_dma);
                        sp->u.iocb_cmd.u.ctarg.req = NULL;
@@ -564,7 +564,7 @@ err2:
 
                if (sp->u.iocb_cmd.u.ctarg.rsp) {
                        dma_free_coherent(&vha->hw->pdev->dev,
-                           sizeof(struct ct_sns_pkt),
+                           sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
                            sp->u.iocb_cmd.u.ctarg.rsp,
                            sp->u.iocb_cmd.u.ctarg.rsp_dma);
                        sp->u.iocb_cmd.u.ctarg.rsp = NULL;
@@ -617,6 +617,7 @@ static int qla_async_rftid(scsi_qla_host_t *vha, port_id_t *d_id)
        sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
            sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
            GFP_KERNEL);
+       sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt);
        if (!sp->u.iocb_cmd.u.ctarg.req) {
                ql_log(ql_log_warn, vha, 0xd041,
                    "%s: Failed to allocate ct_sns request.\n",
@@ -627,6 +628,7 @@ static int qla_async_rftid(scsi_qla_host_t *vha, port_id_t *d_id)
        sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
            sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
            GFP_KERNEL);
+       sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt);
        if (!sp->u.iocb_cmd.u.ctarg.rsp) {
                ql_log(ql_log_warn, vha, 0xd042,
                    "%s: Failed to allocate ct_sns request.\n",
@@ -712,6 +714,7 @@ static int qla_async_rffid(scsi_qla_host_t *vha, port_id_t *d_id,
        sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
            sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
            GFP_KERNEL);
+       sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt);
        if (!sp->u.iocb_cmd.u.ctarg.req) {
                ql_log(ql_log_warn, vha, 0xd041,
                    "%s: Failed to allocate ct_sns request.\n",
@@ -722,6 +725,7 @@ static int qla_async_rffid(scsi_qla_host_t *vha, port_id_t *d_id,
        sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
            sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
            GFP_KERNEL);
+       sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt);
        if (!sp->u.iocb_cmd.u.ctarg.rsp) {
                ql_log(ql_log_warn, vha, 0xd042,
                    "%s: Failed to allocate ct_sns request.\n",
@@ -802,6 +806,7 @@ static int qla_async_rnnid(scsi_qla_host_t *vha, port_id_t *d_id,
        sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
            sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
            GFP_KERNEL);
+       sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt);
        if (!sp->u.iocb_cmd.u.ctarg.req) {
                ql_log(ql_log_warn, vha, 0xd041,
                    "%s: Failed to allocate ct_sns request.\n",
@@ -812,6 +817,7 @@ static int qla_async_rnnid(scsi_qla_host_t *vha, port_id_t *d_id,
        sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
            sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
            GFP_KERNEL);
+       sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt);
        if (!sp->u.iocb_cmd.u.ctarg.rsp) {
                ql_log(ql_log_warn, vha, 0xd042,
                    "%s: Failed to allocate ct_sns request.\n",
@@ -909,6 +915,7 @@ static int qla_async_rsnn_nn(scsi_qla_host_t *vha)
        sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
            sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
            GFP_KERNEL);
+       sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt);
        if (!sp->u.iocb_cmd.u.ctarg.req) {
                ql_log(ql_log_warn, vha, 0xd041,
                    "%s: Failed to allocate ct_sns request.\n",
@@ -919,6 +926,7 @@ static int qla_async_rsnn_nn(scsi_qla_host_t *vha)
        sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
            sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
            GFP_KERNEL);
+       sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt);
        if (!sp->u.iocb_cmd.u.ctarg.rsp) {
                ql_log(ql_log_warn, vha, 0xd042,
                    "%s: Failed to allocate ct_sns request.\n",
@@ -3388,14 +3396,14 @@ void qla24xx_sp_unmap(scsi_qla_host_t *vha, srb_t *sp)
 {
        if (sp->u.iocb_cmd.u.ctarg.req) {
                dma_free_coherent(&vha->hw->pdev->dev,
-                       sizeof(struct ct_sns_pkt),
+                       sp->u.iocb_cmd.u.ctarg.req_allocated_size,
                        sp->u.iocb_cmd.u.ctarg.req,
                        sp->u.iocb_cmd.u.ctarg.req_dma);
                sp->u.iocb_cmd.u.ctarg.req = NULL;
        }
        if (sp->u.iocb_cmd.u.ctarg.rsp) {
                dma_free_coherent(&vha->hw->pdev->dev,
-                       sizeof(struct ct_sns_pkt),
+                       sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
                        sp->u.iocb_cmd.u.ctarg.rsp,
                        sp->u.iocb_cmd.u.ctarg.rsp_dma);
                sp->u.iocb_cmd.u.ctarg.rsp = NULL;
@@ -3596,14 +3604,14 @@ static void qla2x00_async_gpnid_sp_done(void *s, int res)
                /* please ignore kernel warning. otherwise, we have mem leak. */
                if (sp->u.iocb_cmd.u.ctarg.req) {
                        dma_free_coherent(&vha->hw->pdev->dev,
-                               sizeof(struct ct_sns_pkt),
+                               sp->u.iocb_cmd.u.ctarg.req_allocated_size,
                                sp->u.iocb_cmd.u.ctarg.req,
                                sp->u.iocb_cmd.u.ctarg.req_dma);
                        sp->u.iocb_cmd.u.ctarg.req = NULL;
                }
                if (sp->u.iocb_cmd.u.ctarg.rsp) {
                        dma_free_coherent(&vha->hw->pdev->dev,
-                               sizeof(struct ct_sns_pkt),
+                               sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
                                sp->u.iocb_cmd.u.ctarg.rsp,
                                sp->u.iocb_cmd.u.ctarg.rsp_dma);
                        sp->u.iocb_cmd.u.ctarg.rsp = NULL;
@@ -3654,6 +3662,7 @@ int qla24xx_async_gpnid(scsi_qla_host_t *vha, port_id_t *id)
        sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
                sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
                GFP_KERNEL);
+       sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt);
        if (!sp->u.iocb_cmd.u.ctarg.req) {
                ql_log(ql_log_warn, vha, 0xd041,
                    "Failed to allocate ct_sns request.\n");
@@ -3663,6 +3672,7 @@ int qla24xx_async_gpnid(scsi_qla_host_t *vha, port_id_t *id)
        sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
                sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
                GFP_KERNEL);
+       sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt);
        if (!sp->u.iocb_cmd.u.ctarg.rsp) {
                ql_log(ql_log_warn, vha, 0xd042,
                    "Failed to allocate ct_sns request.\n");
@@ -3698,6 +3708,10 @@ int qla24xx_async_gpnid(scsi_qla_host_t *vha, port_id_t *id)
        return rval;
 
 done_free_sp:
+       spin_lock_irqsave(&vha->hw->vport_slock, flags);
+       list_del(&sp->elem);
+       spin_unlock_irqrestore(&vha->hw->vport_slock, flags);
+
        if (sp->u.iocb_cmd.u.ctarg.req) {
                dma_free_coherent(&vha->hw->pdev->dev,
                        sizeof(struct ct_sns_pkt),
@@ -4142,14 +4156,14 @@ static void qla2x00_async_gpnft_gnnft_sp_done(void *s, int res)
                         */
                        if (sp->u.iocb_cmd.u.ctarg.req) {
                                dma_free_coherent(&vha->hw->pdev->dev,
-                                   sizeof(struct ct_sns_pkt),
+                                   sp->u.iocb_cmd.u.ctarg.req_allocated_size,
                                    sp->u.iocb_cmd.u.ctarg.req,
                                    sp->u.iocb_cmd.u.ctarg.req_dma);
                                sp->u.iocb_cmd.u.ctarg.req = NULL;
                        }
                        if (sp->u.iocb_cmd.u.ctarg.rsp) {
                                dma_free_coherent(&vha->hw->pdev->dev,
-                                   sizeof(struct ct_sns_pkt),
+                                   sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
                                    sp->u.iocb_cmd.u.ctarg.rsp,
                                    sp->u.iocb_cmd.u.ctarg.rsp_dma);
                                sp->u.iocb_cmd.u.ctarg.rsp = NULL;
@@ -4179,14 +4193,14 @@ static void qla2x00_async_gpnft_gnnft_sp_done(void *s, int res)
                /* please ignore kernel warning. Otherwise, we have mem leak. */
                if (sp->u.iocb_cmd.u.ctarg.req) {
                        dma_free_coherent(&vha->hw->pdev->dev,
-                           sizeof(struct ct_sns_pkt),
+                           sp->u.iocb_cmd.u.ctarg.req_allocated_size,
                            sp->u.iocb_cmd.u.ctarg.req,
                            sp->u.iocb_cmd.u.ctarg.req_dma);
                        sp->u.iocb_cmd.u.ctarg.req = NULL;
                }
                if (sp->u.iocb_cmd.u.ctarg.rsp) {
                        dma_free_coherent(&vha->hw->pdev->dev,
-                           sizeof(struct ct_sns_pkt),
+                           sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
                            sp->u.iocb_cmd.u.ctarg.rsp,
                            sp->u.iocb_cmd.u.ctarg.rsp_dma);
                        sp->u.iocb_cmd.u.ctarg.rsp = NULL;
@@ -4281,14 +4295,14 @@ static int qla24xx_async_gnnft(scsi_qla_host_t *vha, struct srb *sp,
 done_free_sp:
        if (sp->u.iocb_cmd.u.ctarg.req) {
                dma_free_coherent(&vha->hw->pdev->dev,
-                   sizeof(struct ct_sns_pkt),
+                   sp->u.iocb_cmd.u.ctarg.req_allocated_size,
                    sp->u.iocb_cmd.u.ctarg.req,
                    sp->u.iocb_cmd.u.ctarg.req_dma);
                sp->u.iocb_cmd.u.ctarg.req = NULL;
        }
        if (sp->u.iocb_cmd.u.ctarg.rsp) {
                dma_free_coherent(&vha->hw->pdev->dev,
-                   sizeof(struct ct_sns_pkt),
+                   sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
                    sp->u.iocb_cmd.u.ctarg.rsp,
                    sp->u.iocb_cmd.u.ctarg.rsp_dma);
                sp->u.iocb_cmd.u.ctarg.rsp = NULL;
@@ -4349,6 +4363,7 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp)
                sp->u.iocb_cmd.u.ctarg.req = dma_zalloc_coherent(
                        &vha->hw->pdev->dev, sizeof(struct ct_sns_pkt),
                        &sp->u.iocb_cmd.u.ctarg.req_dma, GFP_KERNEL);
+               sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt);
                if (!sp->u.iocb_cmd.u.ctarg.req) {
                        ql_log(ql_log_warn, vha, 0xffff,
                            "Failed to allocate ct_sns request.\n");
@@ -4366,6 +4381,7 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp)
                sp->u.iocb_cmd.u.ctarg.rsp = dma_zalloc_coherent(
                        &vha->hw->pdev->dev, rspsz,
                        &sp->u.iocb_cmd.u.ctarg.rsp_dma, GFP_KERNEL);
+               sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt);
                if (!sp->u.iocb_cmd.u.ctarg.rsp) {
                        ql_log(ql_log_warn, vha, 0xffff,
                            "Failed to allocate ct_sns request.\n");
@@ -4425,14 +4441,14 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp)
 done_free_sp:
        if (sp->u.iocb_cmd.u.ctarg.req) {
                dma_free_coherent(&vha->hw->pdev->dev,
-                   sizeof(struct ct_sns_pkt),
+                   sp->u.iocb_cmd.u.ctarg.req_allocated_size,
                    sp->u.iocb_cmd.u.ctarg.req,
                    sp->u.iocb_cmd.u.ctarg.req_dma);
                sp->u.iocb_cmd.u.ctarg.req = NULL;
        }
        if (sp->u.iocb_cmd.u.ctarg.rsp) {
                dma_free_coherent(&vha->hw->pdev->dev,
-                   sizeof(struct ct_sns_pkt),
+                   sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
                    sp->u.iocb_cmd.u.ctarg.rsp,
                    sp->u.iocb_cmd.u.ctarg.rsp_dma);
                sp->u.iocb_cmd.u.ctarg.rsp = NULL;
index 7b675243bd16c61a703cffa69c0f5f0a55a62ea6..1b19b954bbae723be04d1ed80b1036bdd94436c6 100644 (file)
@@ -591,12 +591,14 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
                                conflict_fcport =
                                        qla2x00_find_fcport_by_wwpn(vha,
                                            e->port_name, 0);
-                               ql_dbg(ql_dbg_disc, vha, 0x20e6,
-                                   "%s %d %8phC post del sess\n",
-                                   __func__, __LINE__,
-                                   conflict_fcport->port_name);
-                               qlt_schedule_sess_for_deletion
-                                       (conflict_fcport);
+                               if (conflict_fcport) {
+                                       qlt_schedule_sess_for_deletion
+                                               (conflict_fcport);
+                                       ql_dbg(ql_dbg_disc, vha, 0x20e6,
+                                           "%s %d %8phC post del sess\n",
+                                           __func__, __LINE__,
+                                           conflict_fcport->port_name);
+                               }
                        }
 
                        /* FW already picked this loop id for another fcport */
@@ -1487,11 +1489,10 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun,
 
        wait_for_completion(&tm_iocb->u.tmf.comp);
 
-       rval = tm_iocb->u.tmf.comp_status == CS_COMPLETE ?
-           QLA_SUCCESS : QLA_FUNCTION_FAILED;
+       rval = tm_iocb->u.tmf.data;
 
-       if ((rval != QLA_SUCCESS) || tm_iocb->u.tmf.data) {
-               ql_dbg(ql_dbg_taskm, vha, 0x8030,
+       if (rval != QLA_SUCCESS) {
+               ql_log(ql_log_warn, vha, 0x8030,
                    "TM IOCB failed (%x).\n", rval);
        }
 
index 37ae0f6d8ae5743a182cc9fb963cbc868ddd925b..59fd5a9dfeb8703f437685fa7b4221419b42319e 100644 (file)
@@ -222,6 +222,8 @@ qla2xxx_get_qpair_sp(struct qla_qpair *qpair, fc_port_t *fcport, gfp_t flag)
        sp->fcport = fcport;
        sp->iocbs = 1;
        sp->vha = qpair->vha;
+       INIT_LIST_HEAD(&sp->elem);
+
 done:
        if (!sp)
                QLA_QPAIR_MARK_NOT_BUSY(qpair);
index a91cca52b5d5be5a262270ae6b0cbefe5a72a8bf..dd93a22fe84368fc827441983bcb64a0807be3ce 100644 (file)
@@ -2130,34 +2130,11 @@ __qla2x00_alloc_iocbs(struct qla_qpair *qpair, srb_t *sp)
        req_cnt = 1;
        handle = 0;
 
-       if (!sp)
-               goto skip_cmd_array;
-
-       /* Check for room in outstanding command list. */
-       handle = req->current_outstanding_cmd;
-       for (index = 1; index < req->num_outstanding_cmds; index++) {
-               handle++;
-               if (handle == req->num_outstanding_cmds)
-                       handle = 1;
-               if (!req->outstanding_cmds[handle])
-                       break;
-       }
-       if (index == req->num_outstanding_cmds) {
-               ql_log(ql_log_warn, vha, 0x700b,
-                   "No room on outstanding cmd array.\n");
-               goto queuing_error;
-       }
-
-       /* Prep command array. */
-       req->current_outstanding_cmd = handle;
-       req->outstanding_cmds[handle] = sp;
-       sp->handle = handle;
-
-       /* Adjust entry-counts as needed. */
-       if (sp->type != SRB_SCSI_CMD)
+       if (sp && (sp->type != SRB_SCSI_CMD)) {
+               /* Adjust entry-counts as needed. */
                req_cnt = sp->iocbs;
+       }
 
-skip_cmd_array:
        /* Check for room on request queue. */
        if (req->cnt < req_cnt + 2) {
                if (qpair->use_shadow_reg)
@@ -2183,6 +2160,28 @@ skip_cmd_array:
        if (req->cnt < req_cnt + 2)
                goto queuing_error;
 
+       if (sp) {
+               /* Check for room in outstanding command list. */
+               handle = req->current_outstanding_cmd;
+               for (index = 1; index < req->num_outstanding_cmds; index++) {
+                       handle++;
+                       if (handle == req->num_outstanding_cmds)
+                               handle = 1;
+                       if (!req->outstanding_cmds[handle])
+                               break;
+               }
+               if (index == req->num_outstanding_cmds) {
+                       ql_log(ql_log_warn, vha, 0x700b,
+                           "No room on outstanding cmd array.\n");
+                       goto queuing_error;
+               }
+
+               /* Prep command array. */
+               req->current_outstanding_cmd = handle;
+               req->outstanding_cmds[handle] = sp;
+               sp->handle = handle;
+       }
+
        /* Prep packet */
        req->cnt -= req_cnt;
        pkt = req->ring_ptr;
@@ -2195,6 +2194,8 @@ skip_cmd_array:
                pkt->handle = handle;
        }
 
+       return pkt;
+
 queuing_error:
        qpair->tgt_counters.num_alloc_iocb_failed++;
        return pkt;
index 9fa5a2557f2c7cc475430ed363ef3c7775768ef8..7756106d45554956d32522801e42ccf5e0ad71d3 100644 (file)
@@ -631,6 +631,9 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
        unsigned long   flags;
        fc_port_t       *fcport = NULL;
 
+       if (!vha->hw->flags.fw_started)
+               return;
+
        /* Setup to process RIO completion. */
        handle_cnt = 0;
        if (IS_CNA_CAPABLE(ha))
index 7e875f5752299bdb6af02e542ba90362a399a5c7..f0ec13d48bf34077db50a560bc70d448627c8c3b 100644 (file)
@@ -4220,6 +4220,9 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
        mbx_cmd_t *mcp = &mc;
        struct qla_hw_data *ha = vha->hw;
 
+       if (!ha->flags.fw_started)
+               return QLA_SUCCESS;
+
        ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d3,
            "Entered %s.\n", __func__);
 
@@ -4289,6 +4292,9 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
        mbx_cmd_t *mcp = &mc;
        struct qla_hw_data *ha = vha->hw;
 
+       if (!ha->flags.fw_started)
+               return QLA_SUCCESS;
+
        ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d6,
            "Entered %s.\n", __func__);
 
index f6f0a759a7c248eb403873c01fc9d4d912cced60..aa727d07b702e9ab1e54e7c0fef5df8cf619f767 100644 (file)
@@ -152,11 +152,18 @@ int
 qla24xx_disable_vp(scsi_qla_host_t *vha)
 {
        unsigned long flags;
-       int ret;
+       int ret = QLA_SUCCESS;
+       fc_port_t *fcport;
+
+       if (vha->hw->flags.fw_started)
+               ret = qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL);
 
-       ret = qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL);
        atomic_set(&vha->loop_state, LOOP_DOWN);
        atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
+       list_for_each_entry(fcport, &vha->vp_fcports, list)
+               fcport->logout_on_delete = 0;
+
+       qla2x00_mark_all_devices_lost(vha, 0);
 
        /* Remove port id from vp target map */
        spin_lock_irqsave(&vha->hw->hardware_lock, flags);
index e881fce7477a90956a4d45b856e484d89821b9e4..1fbd16c8c9a7b1db0e12334468dc177625e57871 100644 (file)
@@ -303,6 +303,7 @@ static void qla2x00_free_device(scsi_qla_host_t *);
 static int qla2xxx_map_queues(struct Scsi_Host *shost);
 static void qla2x00_destroy_deferred_work(struct qla_hw_data *);
 
+
 struct scsi_host_template qla2xxx_driver_template = {
        .module                 = THIS_MODULE,
        .name                   = QLA2XXX_DRIVER_NAME,
@@ -1147,7 +1148,7 @@ static inline int test_fcport_count(scsi_qla_host_t *vha)
  * qla2x00_wait_for_sess_deletion can only be called from remove_one.
  * it has dependency on UNLOADING flag to stop device discovery
  */
-static void
+void
 qla2x00_wait_for_sess_deletion(scsi_qla_host_t *vha)
 {
        qla2x00_mark_all_devices_lost(vha, 0);
@@ -3180,6 +3181,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
            "req->req_q_in=%p req->req_q_out=%p rsp->rsp_q_in=%p rsp->rsp_q_out=%p.\n",
            req->req_q_in, req->req_q_out, rsp->rsp_q_in, rsp->rsp_q_out);
 
+       ha->wq = alloc_workqueue("qla2xxx_wq", 0, 0);
+
        if (ha->isp_ops->initialize_adapter(base_vha)) {
                ql_log(ql_log_fatal, base_vha, 0x00d6,
                    "Failed to initialize adapter - Adapter flags %x.\n",
@@ -3216,8 +3219,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
            host->can_queue, base_vha->req,
            base_vha->mgmt_svr_loop_id, host->sg_tablesize);
 
-       ha->wq = alloc_workqueue("qla2xxx_wq", 0, 0);
-
        if (ha->mqenable) {
                bool mq = false;
                bool startit = false;
@@ -3603,6 +3604,8 @@ qla2x00_remove_one(struct pci_dev *pdev)
 
        base_vha = pci_get_drvdata(pdev);
        ha = base_vha->hw;
+       ql_log(ql_log_info, base_vha, 0xb079,
+           "Removing driver\n");
 
        /* Indicate device removal to prevent future board_disable and wait
         * until any pending board_disable has completed. */
@@ -3625,6 +3628,21 @@ qla2x00_remove_one(struct pci_dev *pdev)
        }
        qla2x00_wait_for_hba_ready(base_vha);
 
+       if (IS_QLA25XX(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha)) {
+               if (ha->flags.fw_started)
+                       qla2x00_abort_isp_cleanup(base_vha);
+       } else if (!IS_QLAFX00(ha)) {
+               if (IS_QLA8031(ha)) {
+                       ql_dbg(ql_dbg_p3p, base_vha, 0xb07e,
+                           "Clearing fcoe driver presence.\n");
+                       if (qla83xx_clear_drv_presence(base_vha) != QLA_SUCCESS)
+                               ql_dbg(ql_dbg_p3p, base_vha, 0xb079,
+                                   "Error while clearing DRV-Presence.\n");
+               }
+
+               qla2x00_try_to_stop_firmware(base_vha);
+       }
+
        qla2x00_wait_for_sess_deletion(base_vha);
 
        /*
@@ -3648,14 +3666,6 @@ qla2x00_remove_one(struct pci_dev *pdev)
 
        qla2x00_delete_all_vps(ha, base_vha);
 
-       if (IS_QLA8031(ha)) {
-               ql_dbg(ql_dbg_p3p, base_vha, 0xb07e,
-                   "Clearing fcoe driver presence.\n");
-               if (qla83xx_clear_drv_presence(base_vha) != QLA_SUCCESS)
-                       ql_dbg(ql_dbg_p3p, base_vha, 0xb079,
-                           "Error while clearing DRV-Presence.\n");
-       }
-
        qla2x00_abort_all_cmds(base_vha, DID_NO_CONNECT << 16);
 
        qla2x00_dfs_remove(base_vha);
@@ -3715,24 +3725,6 @@ qla2x00_free_device(scsi_qla_host_t *vha)
                qla2x00_stop_timer(vha);
 
        qla25xx_delete_queues(vha);
-
-       if (ha->flags.fce_enabled)
-               qla2x00_disable_fce_trace(vha, NULL, NULL);
-
-       if (ha->eft)
-               qla2x00_disable_eft_trace(vha);
-
-       if (IS_QLA25XX(ha) ||  IS_QLA2031(ha) || IS_QLA27XX(ha)) {
-               if (ha->flags.fw_started)
-                       qla2x00_abort_isp_cleanup(vha);
-       } else {
-               if (ha->flags.fw_started) {
-                       /* Stop currently executing firmware. */
-                       qla2x00_try_to_stop_firmware(vha);
-                       ha->flags.fw_started = 0;
-               }
-       }
-
        vha->flags.online = 0;
 
        /* turn-off interrupts on the card */
@@ -6028,8 +6020,9 @@ qla2x00_do_dpc(void *data)
                                set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
                }
 
-               if (test_and_clear_bit(ISP_ABORT_NEEDED,
-                                               &base_vha->dpc_flags)) {
+               if (test_and_clear_bit
+                   (ISP_ABORT_NEEDED, &base_vha->dpc_flags) &&
+                   !test_bit(UNLOADING, &base_vha->dpc_flags)) {
 
                        ql_dbg(ql_dbg_dpc, base_vha, 0x4007,
                            "ISP abort scheduled.\n");
index 04458eb19d380f3f26e4c1389639fb969b607fb5..4499c787165f14f63f9190800886b39374054352 100644 (file)
@@ -1880,6 +1880,9 @@ qla24xx_beacon_off(struct scsi_qla_host *vha)
        if (IS_P3P_TYPE(ha))
                return QLA_SUCCESS;
 
+       if (!ha->flags.fw_started)
+               return QLA_SUCCESS;
+
        ha->beacon_blink_led = 0;
 
        if (IS_QLA2031(ha) || IS_QLA27XX(ha))
index 0fea2e2326becbf4993dd7cc216e36dad529d678..1027b0cb7fa3634baf0bd870ffdc93e9286cac8e 100644 (file)
@@ -1224,7 +1224,6 @@ static void qla24xx_chk_fcp_state(struct fc_port *sess)
 void qlt_schedule_sess_for_deletion(struct fc_port *sess)
 {
        struct qla_tgt *tgt = sess->tgt;
-       struct qla_hw_data *ha = sess->vha->hw;
        unsigned long flags;
 
        if (sess->disc_state == DSC_DELETE_PEND)
@@ -1241,16 +1240,16 @@ void qlt_schedule_sess_for_deletion(struct fc_port *sess)
                        return;
        }
 
-       spin_lock_irqsave(&ha->tgt.sess_lock, flags);
        if (sess->deleted == QLA_SESS_DELETED)
                sess->logout_on_delete = 0;
 
+       spin_lock_irqsave(&sess->vha->work_lock, flags);
        if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
-               spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+               spin_unlock_irqrestore(&sess->vha->work_lock, flags);
                return;
        }
        sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
-       spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+       spin_unlock_irqrestore(&sess->vha->work_lock, flags);
 
        sess->disc_state = DSC_DELETE_PEND;
 
index 24d7496cd9e23cfc2a97126fc22b5f4c25a253b0..364e71861bfd5c2c17caf1e93cdeae669e95b971 100644 (file)
@@ -5507,9 +5507,9 @@ static void __exit scsi_debug_exit(void)
        int k = sdebug_add_host;
 
        stop_all_queued();
-       free_all_queued();
        for (; k; k--)
                sdebug_remove_adapter();
+       free_all_queued();
        driver_unregister(&sdebug_driverfs_driver);
        bus_unregister(&pseudo_lld_bus);
        root_device_unregister(pseudo_primary);
index 8932ae81a15a7c36bf7e7800cada555d87a6bd26..2715cdaa669c0802bfb224157e33803e5d0310aa 100644 (file)
@@ -296,6 +296,20 @@ enum blk_eh_timer_return scsi_times_out(struct request *req)
                rtn = host->hostt->eh_timed_out(scmd);
 
        if (rtn == BLK_EH_DONE) {
+               /*
+                * For blk-mq, we must set the request state to complete now
+                * before sending the request to the scsi error handler. This
+                * will prevent a use-after-free in the event the LLD manages
+                * to complete the request before the error handler finishes
+                * processing this timed out request.
+                *
+                * If the request was already completed, then the LLD beat the
+                * time out handler from transferring the request to the scsi
+                * error handler. In that case we can return immediately as no
+                * further action is required.
+                */
+               if (req->q->mq_ops && !blk_mq_mark_complete(req))
+                       return rtn;
                if (scsi_abort_command(scmd) != SUCCESS) {
                        set_host_byte(scmd, DID_TIME_OUT);
                        scsi_eh_scmd_add(scmd);
index 1da3d71e9f61f784e8131093bd5378d94bb98745..13948102ca298cf1a20d45d49781aa4dee55d851 100644 (file)
@@ -3592,7 +3592,7 @@ fc_bsg_job_timeout(struct request *req)
 
        /* the blk_end_sync_io() doesn't check the error */
        if (inflight)
-               blk_mq_complete_request(req);
+               __blk_complete_request(req);
        return BLK_EH_DONE;
 }
 
index a14fef11776ec846c482178ee555c8122a598d22..2bf3bf73886e373ab573cf6de16ee4a1802c288d 100644 (file)
@@ -391,7 +391,8 @@ static int sd_zbc_check_capacity(struct scsi_disk *sdkp, unsigned char *buf)
  * Check that all zones of the device are equal. The last zone can however
  * be smaller. The zone size must also be a power of two number of LBAs.
  *
- * Returns the zone size in bytes upon success or an error code upon failure.
+ * Returns the zone size in number of blocks upon success or an error code
+ * upon failure.
  */
 static s64 sd_zbc_check_zone_size(struct scsi_disk *sdkp)
 {
@@ -401,7 +402,7 @@ static s64 sd_zbc_check_zone_size(struct scsi_disk *sdkp)
        unsigned char *rec;
        unsigned int buf_len;
        unsigned int list_length;
-       int ret;
+       s64 ret;
        u8 same;
 
        /* Get a buffer */
index 53ae52dbff84afd2021e80b7c1329cb7c53117c2..ba9ba0e04f42587476a3d0472104aca342e4f4ef 100644 (file)
@@ -51,6 +51,7 @@ static int sg_version_num = 30536;    /* 2 digits for each component */
 #include <linux/atomic.h>
 #include <linux/ratelimit.h>
 #include <linux/uio.h>
+#include <linux/cred.h> /* for sg_check_file_access() */
 
 #include "scsi.h"
 #include <scsi/scsi_dbg.h>
@@ -209,6 +210,33 @@ static void sg_device_destroy(struct kref *kref);
        sdev_prefix_printk(prefix, (sdp)->device,               \
                           (sdp)->disk->disk_name, fmt, ##a)
 
+/*
+ * The SCSI interfaces that use read() and write() as an asynchronous variant of
+ * ioctl(..., SG_IO, ...) are fundamentally unsafe, since there are lots of ways
+ * to trigger read() and write() calls from various contexts with elevated
+ * privileges. This can lead to kernel memory corruption (e.g. if these
+ * interfaces are called through splice()) and privilege escalation inside
+ * userspace (e.g. if a process with access to such a device passes a file
+ * descriptor to a SUID binary as stdin/stdout/stderr).
+ *
+ * This function provides protection for the legacy API by restricting the
+ * calling context.
+ */
+static int sg_check_file_access(struct file *filp, const char *caller)
+{
+       if (filp->f_cred != current_real_cred()) {
+               pr_err_once("%s: process %d (%s) changed security contexts after opening file descriptor, this is not allowed.\n",
+                       caller, task_tgid_vnr(current), current->comm);
+               return -EPERM;
+       }
+       if (uaccess_kernel()) {
+               pr_err_once("%s: process %d (%s) called from kernel context, this is not allowed.\n",
+                       caller, task_tgid_vnr(current), current->comm);
+               return -EACCES;
+       }
+       return 0;
+}
+
 static int sg_allow_access(struct file *filp, unsigned char *cmd)
 {
        struct sg_fd *sfp = filp->private_data;
@@ -393,6 +421,14 @@ sg_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos)
        struct sg_header *old_hdr = NULL;
        int retval = 0;
 
+       /*
+        * This could cause a response to be stranded. Close the associated
+        * file descriptor to free up any resources being held.
+        */
+       retval = sg_check_file_access(filp, __func__);
+       if (retval)
+               return retval;
+
        if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
                return -ENXIO;
        SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
@@ -580,9 +616,11 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
        struct sg_header old_hdr;
        sg_io_hdr_t *hp;
        unsigned char cmnd[SG_MAX_CDB_SIZE];
+       int retval;
 
-       if (unlikely(uaccess_kernel()))
-               return -EINVAL;
+       retval = sg_check_file_access(filp, __func__);
+       if (retval)
+               return retval;
 
        if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
                return -ENXIO;
@@ -1703,15 +1741,11 @@ sg_start_req(Sg_request *srp, unsigned char *cmd)
         *
         * With scsi-mq enabled, there are a fixed number of preallocated
         * requests equal in number to shost->can_queue.  If all of the
-        * preallocated requests are already in use, then using GFP_ATOMIC with
-        * blk_get_request() will return -EWOULDBLOCK, whereas using GFP_KERNEL
-        * will cause blk_get_request() to sleep until an active command
-        * completes, freeing up a request.  Neither option is ideal, but
-        * GFP_KERNEL is the better choice to prevent userspace from getting an
-        * unexpected EWOULDBLOCK.
-        *
-        * With scsi-mq disabled, blk_get_request() with GFP_KERNEL usually
-        * does not sleep except under memory pressure.
+        * preallocated requests are already in use, then blk_get_request()
+        * will sleep until an active command completes, freeing up a request.
+        * Although waiting in an asynchronous interface is less than ideal, we
+        * do not want to use BLK_MQ_REQ_NOWAIT here because userspace might
+        * not expect an EWOULDBLOCK from this condition.
         */
        rq = blk_get_request(q, hp->dxfer_direction == SG_DXFER_TO_DEV ?
                        REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, 0);
@@ -2147,6 +2181,7 @@ sg_add_sfp(Sg_device * sdp)
        write_lock_irqsave(&sdp->sfd_lock, iflags);
        if (atomic_read(&sdp->detaching)) {
                write_unlock_irqrestore(&sdp->sfd_lock, iflags);
+               kfree(sfp);
                return ERR_PTR(-ENODEV);
        }
        list_add_tail(&sfp->sfd_siblings, &sdp->sfds);
index 3f3cb72e0c0cdab6a76ea8c4057229f76924899c..d0389b20574d0f778e2bfd95b07e80458970dbd5 100644 (file)
@@ -523,18 +523,26 @@ static int sr_init_command(struct scsi_cmnd *SCpnt)
 static int sr_block_open(struct block_device *bdev, fmode_t mode)
 {
        struct scsi_cd *cd;
+       struct scsi_device *sdev;
        int ret = -ENXIO;
 
+       cd = scsi_cd_get(bdev->bd_disk);
+       if (!cd)
+               goto out;
+
+       sdev = cd->device;
+       scsi_autopm_get_device(sdev);
        check_disk_change(bdev);
 
        mutex_lock(&sr_mutex);
-       cd = scsi_cd_get(bdev->bd_disk);
-       if (cd) {
-               ret = cdrom_open(&cd->cdi, bdev, mode);
-               if (ret)
-                       scsi_cd_put(cd);
-       }
+       ret = cdrom_open(&cd->cdi, bdev, mode);
        mutex_unlock(&sr_mutex);
+
+       scsi_autopm_put_device(sdev);
+       if (ret)
+               scsi_cd_put(cd);
+
+out:
        return ret;
 }
 
@@ -562,6 +570,8 @@ static int sr_block_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
        if (ret)
                goto out;
 
+       scsi_autopm_get_device(sdev);
+
        /*
         * Send SCSI addressing ioctls directly to mid level, send other
         * ioctls to cdrom/block level.
@@ -570,15 +580,18 @@ static int sr_block_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
        case SCSI_IOCTL_GET_IDLUN:
        case SCSI_IOCTL_GET_BUS_NUMBER:
                ret = scsi_ioctl(sdev, cmd, argp);
-               goto out;
+               goto put;
        }
 
        ret = cdrom_ioctl(&cd->cdi, bdev, mode, cmd, arg);
        if (ret != -ENOSYS)
-               goto out;
+               goto put;
 
        ret = scsi_ioctl(sdev, cmd, argp);
 
+put:
+       scsi_autopm_put_device(sdev);
+
 out:
        mutex_unlock(&sr_mutex);
        return ret;
index 777e5f1e52d10968d5f23e0e316db05b8209511d..0cd947f78b5bfdfa013ce4909afdc8f894b6146b 100644 (file)
@@ -561,9 +561,14 @@ static void pvscsi_complete_request(struct pvscsi_adapter *adapter,
            (btstat == BTSTAT_SUCCESS ||
             btstat == BTSTAT_LINKED_COMMAND_COMPLETED ||
             btstat == BTSTAT_LINKED_COMMAND_COMPLETED_WITH_FLAG)) {
-               cmd->result = (DID_OK << 16) | sdstat;
-               if (sdstat == SAM_STAT_CHECK_CONDITION && cmd->sense_buffer)
-                       cmd->result |= (DRIVER_SENSE << 24);
+               if (sdstat == SAM_STAT_COMMAND_TERMINATED) {
+                       cmd->result = (DID_RESET << 16);
+               } else {
+                       cmd->result = (DID_OK << 16) | sdstat;
+                       if (sdstat == SAM_STAT_CHECK_CONDITION &&
+                           cmd->sense_buffer)
+                               cmd->result |= (DRIVER_SENSE << 24);
+               }
        } else
                switch (btstat) {
                case BTSTAT_SUCCESS:
index 36f59a1be7e9a60be61c2b1ba8f7468dfbd8c6c9..61389bdc7926690100fc0a38fc59e8b6a73853ab 100644 (file)
@@ -654,10 +654,17 @@ static int scsifront_dev_reset_handler(struct scsi_cmnd *sc)
 static int scsifront_sdev_configure(struct scsi_device *sdev)
 {
        struct vscsifrnt_info *info = shost_priv(sdev->host);
+       int err;
 
-       if (info && current == info->curr)
-               xenbus_printf(XBT_NIL, info->dev->nodename,
+       if (info && current == info->curr) {
+               err = xenbus_printf(XBT_NIL, info->dev->nodename,
                              info->dev_state_path, "%d", XenbusStateConnected);
+               if (err) {
+                       xenbus_dev_error(info->dev, err,
+                               "%s: writing dev_state_path", __func__);
+                       return err;
+               }
+       }
 
        return 0;
 }
@@ -665,10 +672,15 @@ static int scsifront_sdev_configure(struct scsi_device *sdev)
 static void scsifront_sdev_destroy(struct scsi_device *sdev)
 {
        struct vscsifrnt_info *info = shost_priv(sdev->host);
+       int err;
 
-       if (info && current == info->curr)
-               xenbus_printf(XBT_NIL, info->dev->nodename,
+       if (info && current == info->curr) {
+               err = xenbus_printf(XBT_NIL, info->dev->nodename,
                              info->dev_state_path, "%d", XenbusStateClosed);
+               if (err)
+                       xenbus_dev_error(info->dev, err,
+                               "%s: writing dev_state_path", __func__);
+       }
 }
 
 static struct scsi_host_template scsifront_sht = {
@@ -1003,9 +1015,12 @@ static void scsifront_do_lun_hotplug(struct vscsifrnt_info *info, int op)
 
                        if (scsi_add_device(info->host, chn, tgt, lun)) {
                                dev_err(&dev->dev, "scsi_add_device\n");
-                               xenbus_printf(XBT_NIL, dev->nodename,
+                               err = xenbus_printf(XBT_NIL, dev->nodename,
                                              info->dev_state_path,
                                              "%d", XenbusStateClosed);
+                               if (err)
+                                       xenbus_dev_error(dev, err,
+                                               "%s: writing dev_state_path", __func__);
                        }
                        break;
                case VSCSIFRONT_OP_DEL_LUN:
@@ -1019,10 +1034,14 @@ static void scsifront_do_lun_hotplug(struct vscsifrnt_info *info, int op)
                        }
                        break;
                case VSCSIFRONT_OP_READD_LUN:
-                       if (device_state == XenbusStateConnected)
-                               xenbus_printf(XBT_NIL, dev->nodename,
+                       if (device_state == XenbusStateConnected) {
+                               err = xenbus_printf(XBT_NIL, dev->nodename,
                                              info->dev_state_path,
                                              "%d", XenbusStateConnected);
+                               if (err)
+                                       xenbus_dev_error(dev, err,
+                                               "%s: writing dev_state_path", __func__);
+                       }
                        break;
                default:
                        break;
index 32f0748fd0678fedc520b08b807590981d007154..0097a939487fd30cc29e73424656dc2f912cc099 100644 (file)
 #define GPC_PGC_SW2ISO_SHIFT   0x8
 #define GPC_PGC_SW_SHIFT       0x0
 
+#define GPC_PGC_PCI_PDN                0x200
+#define GPC_PGC_PCI_SR         0x20c
+
 #define GPC_PGC_GPU_PDN                0x260
 #define GPC_PGC_GPU_PUPSCR     0x264
 #define GPC_PGC_GPU_PDNSCR     0x268
+#define GPC_PGC_GPU_SR         0x26c
+
+#define GPC_PGC_DISP_PDN       0x240
+#define GPC_PGC_DISP_SR                0x24c
 
 #define GPU_VPU_PUP_REQ                BIT(1)
 #define GPU_VPU_PDN_REQ                BIT(0)
@@ -318,10 +325,24 @@ static const struct of_device_id imx_gpc_dt_ids[] = {
        { }
 };
 
+static const struct regmap_range yes_ranges[] = {
+       regmap_reg_range(GPC_CNTR, GPC_CNTR),
+       regmap_reg_range(GPC_PGC_PCI_PDN, GPC_PGC_PCI_SR),
+       regmap_reg_range(GPC_PGC_GPU_PDN, GPC_PGC_GPU_SR),
+       regmap_reg_range(GPC_PGC_DISP_PDN, GPC_PGC_DISP_SR),
+};
+
+static const struct regmap_access_table access_table = {
+       .yes_ranges     = yes_ranges,
+       .n_yes_ranges   = ARRAY_SIZE(yes_ranges),
+};
+
 static const struct regmap_config imx_gpc_regmap_config = {
        .reg_bits = 32,
        .val_bits = 32,
        .reg_stride = 4,
+       .rd_table = &access_table,
+       .wr_table = &access_table,
        .max_register = 0x2ac,
 };
 
index f4e3bd40c72e60c0448c98456f7b53f6be7936bd..6ef18cf8f24387e324cf455ae98c30f2b27c95d3 100644 (file)
 
 #define GPC_M4_PU_PDN_FLG              0x1bc
 
-
-#define PGC_MIPI                       4
-#define PGC_PCIE                       5
-#define PGC_USB_HSIC                   8
+/*
+ * The PGC offset values in Reference Manual
+ * (Rev. 1, 01/2018 and the older ones) GPC chapter's
+ * GPC_PGC memory map are incorrect, below offset
+ * values are from design RTL.
+ */
+#define PGC_MIPI                       16
+#define PGC_PCIE                       17
+#define PGC_USB_HSIC                   20
 #define GPC_PGC_CTRL(n)                        (0x800 + (n) * 0x40)
 #define GPC_PGC_SR(n)                  (GPC_PGC_CTRL(n) + 0xc)
 
index 9dc02f390ba314bf8cfbb1b86223af1859af2507..5856e792d09c8d317b01627c2d03a97eeaebff37 100644 (file)
@@ -5,7 +5,8 @@ menu "Qualcomm SoC drivers"
 
 config QCOM_COMMAND_DB
        bool "Qualcomm Command DB"
-       depends on (ARCH_QCOM && OF) || COMPILE_TEST
+       depends on ARCH_QCOM || COMPILE_TEST
+       depends on OF_RESERVED_MEM
        help
          Command DB queries shared memory by key string for shared system
          resources. Platform drivers that require to set state of a shared
index 95120acc4d806da630f49737cca1eede3286edae..50d03d8b4f9a55f50d52f328039afe0c27991740 100644 (file)
@@ -194,11 +194,12 @@ static int rcar_sysc_pd_power_on(struct generic_pm_domain *genpd)
 
 static bool has_cpg_mstp;
 
-static void __init rcar_sysc_pd_setup(struct rcar_sysc_pd *pd)
+static int __init rcar_sysc_pd_setup(struct rcar_sysc_pd *pd)
 {
        struct generic_pm_domain *genpd = &pd->genpd;
        const char *name = pd->genpd.name;
        struct dev_power_governor *gov = &simple_qos_governor;
+       int error;
 
        if (pd->flags & PD_CPU) {
                /*
@@ -251,7 +252,11 @@ static void __init rcar_sysc_pd_setup(struct rcar_sysc_pd *pd)
        rcar_sysc_power_up(&pd->ch);
 
 finalize:
-       pm_genpd_init(genpd, gov, false);
+       error = pm_genpd_init(genpd, gov, false);
+       if (error)
+               pr_err("Failed to init PM domain %s: %d\n", name, error);
+
+       return error;
 }
 
 static const struct of_device_id rcar_sysc_matches[] __initconst = {
@@ -375,6 +380,9 @@ static int __init rcar_sysc_pd_init(void)
        pr_debug("%pOF: syscier = 0x%08x\n", np, syscier);
        iowrite32(syscier, base + SYSCIER);
 
+       /*
+        * First, create all PM domains
+        */
        for (i = 0; i < info->num_areas; i++) {
                const struct rcar_sysc_area *area = &info->areas[i];
                struct rcar_sysc_pd *pd;
@@ -397,14 +405,29 @@ static int __init rcar_sysc_pd_init(void)
                pd->ch.isr_bit = area->isr_bit;
                pd->flags = area->flags;
 
-               rcar_sysc_pd_setup(pd);
-               if (area->parent >= 0)
-                       pm_genpd_add_subdomain(domains->domains[area->parent],
-                                              &pd->genpd);
+               error = rcar_sysc_pd_setup(pd);
+               if (error)
+                       goto out_put;
 
                domains->domains[area->isr_bit] = &pd->genpd;
        }
 
+       /*
+        * Second, link all PM domains to their parents
+        */
+       for (i = 0; i < info->num_areas; i++) {
+               const struct rcar_sysc_area *area = &info->areas[i];
+
+               if (!area->name || area->parent < 0)
+                       continue;
+
+               error = pm_genpd_add_subdomain(domains->domains[area->parent],
+                                              domains->domains[area->isr_bit]);
+               if (error)
+                       pr_warn("Failed to add PM subdomain %s to parent %u\n",
+                               area->name, area->parent);
+       }
+
        error = of_genpd_add_provider_onecell(np, &domains->onecell_data);
 
 out_put:
index a1a0025b59e0e020b7e86cce894723254f325725..d5d33e12e9529288876dd56950f751116cf6dd98 100644 (file)
@@ -402,6 +402,8 @@ static int ashmem_mmap(struct file *file, struct vm_area_struct *vma)
                        fput(asma->file);
                        goto out;
                }
+       } else {
+               vma_set_anonymous(vma);
        }
 
        if (vma->vm_file)
index e8c4403297082898c54d0aacf09c778a399aa2d0..31db510018a9462ead01b016f02c4952c6871c96 100644 (file)
@@ -30,7 +30,7 @@ void *ion_heap_map_kernel(struct ion_heap *heap,
        struct page **tmp = pages;
 
        if (!pages)
-               return NULL;
+               return ERR_PTR(-ENOMEM);
 
        if (buffer->flags & ION_FLAG_CACHED)
                pgprot = PAGE_KERNEL;
index ea194aa01a642e0c691c9cb8b78e9d8ef43cfa79..257b0daff01f21317cf1e8f1a251efdbd9dc1189 100644 (file)
@@ -642,7 +642,7 @@ static int daqp_ao_insn_write(struct comedi_device *dev,
        /* Make sure D/A update mode is direct update */
        outb(0, dev->iobase + DAQP_AUX_REG);
 
-       for (i = 0; i > insn->n; i++) {
+       for (i = 0; i < insn->n; i++) {
                unsigned int val = data[i];
                int ret;
 
index 0ecffab52ec28f0faeaf84624db51774a3080419..abdaf7cf816269fb9063928607580ee38b114fef 100644 (file)
@@ -1842,15 +1842,15 @@ void hostif_sme_multicast_set(struct ks_wlan_private *priv)
        memset(set_address, 0, NIC_MAX_MCAST_LIST * ETH_ALEN);
 
        if (dev->flags & IFF_PROMISC) {
-               hostif_mib_set_request_bool(priv, LOCAL_MULTICAST_FILTER,
-                                           MCAST_FILTER_PROMISC);
+               hostif_mib_set_request_int(priv, LOCAL_MULTICAST_FILTER,
+                                          MCAST_FILTER_PROMISC);
                goto spin_unlock;
        }
 
        if ((netdev_mc_count(dev) > NIC_MAX_MCAST_LIST) ||
            (dev->flags & IFF_ALLMULTI)) {
-               hostif_mib_set_request_bool(priv, LOCAL_MULTICAST_FILTER,
-                                           MCAST_FILTER_MCASTALL);
+               hostif_mib_set_request_int(priv, LOCAL_MULTICAST_FILTER,
+                                          MCAST_FILTER_MCASTALL);
                goto spin_unlock;
        }
 
@@ -1866,8 +1866,8 @@ void hostif_sme_multicast_set(struct ks_wlan_private *priv)
                                               ETH_ALEN * mc_count);
        } else {
                priv->sme_i.sme_flag |= SME_MULTICAST;
-               hostif_mib_set_request_bool(priv, LOCAL_MULTICAST_FILTER,
-                                           MCAST_FILTER_MCAST);
+               hostif_mib_set_request_int(priv, LOCAL_MULTICAST_FILTER,
+                                          MCAST_FILTER_MCAST);
        }
 
 spin_unlock:
index a3a83424a926c793c99cdc3ee54f0f61b2b679cd..16478fe9e3f8ad3e07cdfbc84db734d4834f6530 100644 (file)
@@ -11,7 +11,6 @@
  * (at your option) any later version.
  */
 
-#include <asm/cacheflush.h>
 #include <linux/clk.h>
 #include <linux/mm.h>
 #include <linux/pagemap.h>
@@ -24,6 +23,8 @@
 #include <media/v4l2-ioctl.h>
 #include <media/v4l2-mc.h>
 
+#include <asm/cacheflush.h>
+
 #include "iss_video.h"
 #include "iss.h"
 
index 673fdce2553070ab5cf28182edf2adcfe01e110d..ff7832798a7730c0932853df571b98ded8e054b0 100644 (file)
@@ -7,7 +7,6 @@ config R8188EU
        select LIB80211
        select LIB80211_CRYPT_WEP
        select LIB80211_CRYPT_CCMP
-       select LIB80211_CRYPT_TKIP
        ---help---
        This option adds the Realtek RTL8188EU USB device such as TP-Link TL-WN725N.
        If built as a module, it will be called r8188eu.
index 05936a45eb93dc2ba8ca79ce1ba0278d3f8c25db..c6857a5be12aaab6968a67ac93e71b1a4a77cc93 100644 (file)
@@ -23,7 +23,6 @@
 #include <mon.h>
 #include <wifi.h>
 #include <linux/vmalloc.h>
-#include <net/lib80211.h>
 
 #define ETHERNET_HEADER_SIZE   14      /*  Ethernet Header Length */
 #define LLC_HEADER_SIZE                        6       /*  LLC Header Length */
@@ -221,20 +220,31 @@ u32 rtw_free_uc_swdec_pending_queue(struct adapter *adapter)
 static int recvframe_chkmic(struct adapter *adapter,
                            struct recv_frame *precvframe)
 {
-       int res = _SUCCESS;
-       struct rx_pkt_attrib *prxattrib = &precvframe->attrib;
-       struct sta_info *stainfo = rtw_get_stainfo(&adapter->stapriv, prxattrib->ta);
+       int     i, res = _SUCCESS;
+       u32     datalen;
+       u8      miccode[8];
+       u8      bmic_err = false, brpt_micerror = true;
+       u8      *pframe, *payload, *pframemic;
+       u8      *mickey;
+       struct  sta_info                *stainfo;
+       struct  rx_pkt_attrib   *prxattrib = &precvframe->attrib;
+       struct  security_priv   *psecuritypriv = &adapter->securitypriv;
+
+       struct mlme_ext_priv    *pmlmeext = &adapter->mlmeextpriv;
+       struct mlme_ext_info    *pmlmeinfo = &(pmlmeext->mlmext_info);
+
+       stainfo = rtw_get_stainfo(&adapter->stapriv, &prxattrib->ta[0]);
 
        if (prxattrib->encrypt == _TKIP_) {
+               RT_TRACE(_module_rtl871x_recv_c_, _drv_info_,
+                        ("\n %s: prxattrib->encrypt==_TKIP_\n", __func__));
+               RT_TRACE(_module_rtl871x_recv_c_, _drv_info_,
+                        ("\n %s: da=0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x\n",
+                         __func__, prxattrib->ra[0], prxattrib->ra[1], prxattrib->ra[2],
+                         prxattrib->ra[3], prxattrib->ra[4], prxattrib->ra[5]));
+
+               /* calculate mic code */
                if (stainfo) {
-                       int key_idx;
-                       const int iv_len = 8, icv_len = 4, key_length = 32;
-                       struct sk_buff *skb = precvframe->pkt;
-                       u8 key[32], iv[8], icv[4], *pframe = skb->data;
-                       void *crypto_private = NULL;
-                       struct lib80211_crypto_ops *crypto_ops = try_then_request_module(lib80211_get_crypto_ops("TKIP"), "lib80211_crypt_tkip");
-                       struct security_priv *psecuritypriv = &adapter->securitypriv;
-
                        if (IS_MCAST(prxattrib->ra)) {
                                if (!psecuritypriv) {
                                        res = _FAIL;
@@ -243,58 +253,115 @@ static int recvframe_chkmic(struct adapter *adapter,
                                        DBG_88E("\n %s: didn't install group key!!!!!!!!!!\n", __func__);
                                        goto exit;
                                }
-                               key_idx = prxattrib->key_index;
-                               memcpy(key, psecuritypriv->dot118021XGrpKey[key_idx].skey, 16);
-                               memcpy(key + 16, psecuritypriv->dot118021XGrprxmickey[key_idx].skey, 16);
+                               mickey = &psecuritypriv->dot118021XGrprxmickey[prxattrib->key_index].skey[0];
+
+                               RT_TRACE(_module_rtl871x_recv_c_, _drv_info_,
+                                        ("\n %s: bcmc key\n", __func__));
                        } else {
-                               key_idx = 0;
-                               memcpy(key, stainfo->dot118021x_UncstKey.skey, 16);
-                               memcpy(key + 16, stainfo->dot11tkiprxmickey.skey, 16);
+                               mickey = &stainfo->dot11tkiprxmickey.skey[0];
+                               RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
+                                        ("\n %s: unicast key\n", __func__));
                        }
 
-                       if (!crypto_ops) {
-                               res = _FAIL;
-                               goto exit_lib80211_tkip;
-                       }
+                       /* icv_len included the mic code */
+                       datalen = precvframe->pkt->len-prxattrib->hdrlen -
+                                 prxattrib->iv_len-prxattrib->icv_len-8;
+                       pframe = precvframe->pkt->data;
+                       payload = pframe+prxattrib->hdrlen+prxattrib->iv_len;
 
-                       memcpy(iv, pframe + prxattrib->hdrlen, iv_len);
-                       memcpy(icv, pframe + skb->len - icv_len, icv_len);
-                       memmove(pframe + iv_len, pframe, prxattrib->hdrlen);
+                       RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("\n prxattrib->iv_len=%d prxattrib->icv_len=%d\n", prxattrib->iv_len, prxattrib->icv_len));
+                       rtw_seccalctkipmic(mickey, pframe, payload, datalen, &miccode[0],
+                                          (unsigned char)prxattrib->priority); /* care the length of the data */
 
-                       skb_pull(skb, iv_len);
-                       skb_trim(skb, skb->len - icv_len);
+                       pframemic = payload+datalen;
 
-                       crypto_private = crypto_ops->init(key_idx);
-                       if (!crypto_private) {
-                               res = _FAIL;
-                               goto exit_lib80211_tkip;
-                       }
-                       if (crypto_ops->set_key(key, key_length, NULL, crypto_private) < 0) {
-                               res = _FAIL;
-                               goto exit_lib80211_tkip;
-                       }
-                       if (crypto_ops->decrypt_msdu(skb, key_idx, prxattrib->hdrlen, crypto_private)) {
-                               res = _FAIL;
-                               goto exit_lib80211_tkip;
+                       bmic_err = false;
+
+                       for (i = 0; i < 8; i++) {
+                               if (miccode[i] != *(pframemic+i)) {
+                                       RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
+                                                ("%s: miccode[%d](%02x)!=*(pframemic+%d)(%02x) ",
+                                                 __func__, i, miccode[i], i, *(pframemic + i)));
+                                       bmic_err = true;
+                               }
                        }
 
-                       memmove(pframe, pframe + iv_len, prxattrib->hdrlen);
-                       skb_push(skb, iv_len);
-                       skb_put(skb, icv_len);
+                       if (bmic_err) {
+                               RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
+                                        ("\n *(pframemic-8)-*(pframemic-1)=0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x\n",
+                                        *(pframemic-8), *(pframemic-7), *(pframemic-6),
+                                        *(pframemic-5), *(pframemic-4), *(pframemic-3),
+                                        *(pframemic-2), *(pframemic-1)));
+                               RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
+                                        ("\n *(pframemic-16)-*(pframemic-9)=0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x\n",
+                                        *(pframemic-16), *(pframemic-15), *(pframemic-14),
+                                        *(pframemic-13), *(pframemic-12), *(pframemic-11),
+                                        *(pframemic-10), *(pframemic-9)));
+                               {
+                                       uint i;
 
-                       memcpy(pframe + prxattrib->hdrlen, iv, iv_len);
-                       memcpy(pframe + skb->len - icv_len, icv, icv_len);
+                                       RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
+                                                ("\n ======demp packet (len=%d)======\n",
+                                                precvframe->pkt->len));
+                                       for (i = 0; i < precvframe->pkt->len; i += 8) {
+                                               RT_TRACE(_module_rtl871x_recv_c_,
+                                                        _drv_err_,
+                                                        ("0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x",
+                                                        *(precvframe->pkt->data+i),
+                                                        *(precvframe->pkt->data+i+1),
+                                                        *(precvframe->pkt->data+i+2),
+                                                        *(precvframe->pkt->data+i+3),
+                                                        *(precvframe->pkt->data+i+4),
+                                                        *(precvframe->pkt->data+i+5),
+                                                        *(precvframe->pkt->data+i+6),
+                                                        *(precvframe->pkt->data+i+7)));
+                                       }
+                                       RT_TRACE(_module_rtl871x_recv_c_,
+                                                _drv_err_,
+                                                ("\n ====== demp packet end [len=%d]======\n",
+                                                precvframe->pkt->len));
+                                       RT_TRACE(_module_rtl871x_recv_c_,
+                                                _drv_err_,
+                                                ("\n hrdlen=%d,\n",
+                                                prxattrib->hdrlen));
+                               }
 
-exit_lib80211_tkip:
-                       if (crypto_ops && crypto_private)
-                               crypto_ops->deinit(crypto_private);
+                               RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
+                                        ("ra=0x%.2x 0x%.2x 0x%.2x 0x%.2x 0x%.2x 0x%.2x psecuritypriv->binstallGrpkey=%d ",
+                                        prxattrib->ra[0], prxattrib->ra[1], prxattrib->ra[2],
+                                        prxattrib->ra[3], prxattrib->ra[4], prxattrib->ra[5], psecuritypriv->binstallGrpkey));
+
+                               /*  double check key_index for some timing issue , */
+                               /*  cannot compare with psecuritypriv->dot118021XGrpKeyid also cause timing issue */
+                               if ((IS_MCAST(prxattrib->ra) == true)  && (prxattrib->key_index != pmlmeinfo->key_index))
+                                       brpt_micerror = false;
+
+                               if ((prxattrib->bdecrypted) && (brpt_micerror)) {
+                                       rtw_handle_tkip_mic_err(adapter, (u8)IS_MCAST(prxattrib->ra));
+                                       RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, (" mic error :prxattrib->bdecrypted=%d ", prxattrib->bdecrypted));
+                                       DBG_88E(" mic error :prxattrib->bdecrypted=%d\n", prxattrib->bdecrypted);
+                               } else {
+                                       RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, (" mic error :prxattrib->bdecrypted=%d ", prxattrib->bdecrypted));
+                                       DBG_88E(" mic error :prxattrib->bdecrypted=%d\n", prxattrib->bdecrypted);
+                               }
+                               res = _FAIL;
+                       } else {
+                               /* mic checked ok */
+                               if ((!psecuritypriv->bcheck_grpkey) && (IS_MCAST(prxattrib->ra))) {
+                                       psecuritypriv->bcheck_grpkey = true;
+                                       RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("psecuritypriv->bcheck_grpkey = true"));
+                               }
+                       }
                } else {
                        RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
                                 ("%s: rtw_get_stainfo==NULL!!!\n", __func__));
                }
+
+               skb_trim(precvframe->pkt, precvframe->pkt->len - 8);
        }
 
 exit:
+
        return res;
 }
 
index bfe0b217e6798070294c5991eaf13d63105877d3..67a2490f055e234b3ea4621fce7ca0903104801c 100644 (file)
@@ -650,71 +650,71 @@ u32       rtw_tkip_encrypt(struct adapter *padapter, u8 *pxmitframe)
        return res;
 }
 
+/* The hlen isn't include the IV */
 u32 rtw_tkip_decrypt(struct adapter *padapter, u8 *precvframe)
-{
-       struct rx_pkt_attrib *prxattrib = &((struct recv_frame *)precvframe)->attrib;
-       u32 res = _SUCCESS;
+{                                                                                                                                      /*  exclude ICV */
+       u16 pnl;
+       u32 pnh;
+       u8   rc4key[16];
+       u8   ttkey[16];
+       u8      crc[4];
+       struct arc4context mycontext;
+       int                     length;
+
+       u8      *pframe, *payload, *iv, *prwskey;
+       union pn48 dot11txpn;
+       struct  sta_info                *stainfo;
+       struct  rx_pkt_attrib    *prxattrib = &((struct recv_frame *)precvframe)->attrib;
+       struct  security_priv   *psecuritypriv = &padapter->securitypriv;
+       u32             res = _SUCCESS;
+
+
+       pframe = (unsigned char *)((struct recv_frame *)precvframe)->pkt->data;
 
        /* 4 start to decrypt recvframe */
        if (prxattrib->encrypt == _TKIP_) {
-               struct sta_info *stainfo = rtw_get_stainfo(&padapter->stapriv, prxattrib->ta);
-
+               stainfo = rtw_get_stainfo(&padapter->stapriv, &prxattrib->ta[0]);
                if (stainfo) {
-                       int key_idx;
-                       const int iv_len = 8, icv_len = 4, key_length = 32;
-                       void *crypto_private = NULL;
-                       struct sk_buff *skb = ((struct recv_frame *)precvframe)->pkt;
-                       u8 key[32], iv[8], icv[4], *pframe = skb->data;
-                       struct lib80211_crypto_ops *crypto_ops = try_then_request_module(lib80211_get_crypto_ops("TKIP"), "lib80211_crypt_tkip");
-                       struct security_priv *psecuritypriv = &padapter->securitypriv;
-
                        if (IS_MCAST(prxattrib->ra)) {
                                if (!psecuritypriv->binstallGrpkey) {
                                        res = _FAIL;
                                        DBG_88E("%s:rx bc/mc packets, but didn't install group key!!!!!!!!!!\n", __func__);
                                        goto exit;
                                }
-                               key_idx = prxattrib->key_index;
-                               memcpy(key, psecuritypriv->dot118021XGrpKey[key_idx].skey, 16);
-                               memcpy(key + 16, psecuritypriv->dot118021XGrprxmickey[key_idx].skey, 16);
+                               prwskey = psecuritypriv->dot118021XGrpKey[prxattrib->key_index].skey;
                        } else {
-                               key_idx = 0;
-                               memcpy(key, stainfo->dot118021x_UncstKey.skey, 16);
-                               memcpy(key + 16, stainfo->dot11tkiprxmickey.skey, 16);
+                               RT_TRACE(_module_rtl871x_security_c_, _drv_err_, ("%s: stainfo!= NULL!!!\n", __func__));
+                               prwskey = &stainfo->dot118021x_UncstKey.skey[0];
                        }
 
-                       if (!crypto_ops) {
-                               res = _FAIL;
-                               goto exit_lib80211_tkip;
-                       }
+                       iv = pframe+prxattrib->hdrlen;
+                       payload = pframe+prxattrib->iv_len+prxattrib->hdrlen;
+                       length = ((struct recv_frame *)precvframe)->pkt->len-prxattrib->hdrlen-prxattrib->iv_len;
 
-                       memcpy(iv, pframe + prxattrib->hdrlen, iv_len);
-                       memcpy(icv, pframe + skb->len - icv_len, icv_len);
+                       GET_TKIP_PN(iv, dot11txpn);
 
-                       crypto_private = crypto_ops->init(key_idx);
-                       if (!crypto_private) {
-                               res = _FAIL;
-                               goto exit_lib80211_tkip;
-                       }
-                       if (crypto_ops->set_key(key, key_length, NULL, crypto_private) < 0) {
-                               res = _FAIL;
-                               goto exit_lib80211_tkip;
-                       }
-                       if (crypto_ops->decrypt_mpdu(skb, prxattrib->hdrlen, crypto_private)) {
-                               res = _FAIL;
-                               goto exit_lib80211_tkip;
-                       }
+                       pnl = (u16)(dot11txpn.val);
+                       pnh = (u32)(dot11txpn.val>>16);
 
-                       memmove(pframe, pframe + iv_len, prxattrib->hdrlen);
-                       skb_push(skb, iv_len);
-                       skb_put(skb, icv_len);
+                       phase1((u16 *)&ttkey[0], prwskey, &prxattrib->ta[0], pnh);
+                       phase2(&rc4key[0], prwskey, (unsigned short *)&ttkey[0], pnl);
 
-                       memcpy(pframe + prxattrib->hdrlen, iv, iv_len);
-                       memcpy(pframe + skb->len - icv_len, icv, icv_len);
+                       /* 4 decrypt payload include icv */
 
-exit_lib80211_tkip:
-                       if (crypto_ops && crypto_private)
-                               crypto_ops->deinit(crypto_private);
+                       arcfour_init(&mycontext, rc4key, 16);
+                       arcfour_encrypt(&mycontext, payload, payload, length);
+
+                       *((__le32 *)crc) = getcrc32(payload, length-4);
+
+                       if (crc[3] != payload[length-1] ||
+                           crc[2] != payload[length-2] ||
+                           crc[1] != payload[length-3] ||
+                           crc[0] != payload[length-4]) {
+                               RT_TRACE(_module_rtl871x_security_c_, _drv_err_,
+                                        ("rtw_wep_decrypt:icv error crc (%4ph)!=payload (%4ph)\n",
+                                        &crc, &payload[length-4]));
+                               res = _FAIL;
+                       }
                } else {
                        RT_TRACE(_module_rtl871x_security_c_, _drv_err_, ("rtw_tkip_decrypt: stainfo==NULL!!!\n"));
                        res = _FAIL;
index 45c05527a57a327a7490acfe889581cfe0bb41ba..faf4b4158cfa2c174ea2f68c3829f7933d64c35f 100644 (file)
@@ -1051,7 +1051,7 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf,  int len)
                return _FAIL;
 
 
-       if (len > MAX_IE_SZ)
+       if (len < 0 || len > MAX_IE_SZ)
                return _FAIL;
 
        pbss_network->IELength = len;
index 7947edb239a13b7d7752cf2e2a82f0814424306e..88ba5b2fea6acdb47863ba959f61eb05318f75a7 100644 (file)
@@ -803,7 +803,7 @@ static void _rtl8822be_enable_aspm_back_door(struct ieee80211_hw *hw)
                return;
 
        pci_read_config_byte(rtlpci->pdev, 0x70f, &tmp);
-       pci_write_config_byte(rtlpci->pdev, 0x70f, tmp | BIT(7));
+       pci_write_config_byte(rtlpci->pdev, 0x70f, tmp | ASPM_L1_LATENCY << 3);
 
        pci_read_config_byte(rtlpci->pdev, 0x719, &tmp);
        pci_write_config_byte(rtlpci->pdev, 0x719, tmp | BIT(3) | BIT(4));
index 012fb618840b05e910e6d68fba38e044dfc26ab0..a45f0eb69d3f2fdb1b14df330a5dbf8cb0ddc24d 100644 (file)
@@ -88,6 +88,7 @@
 #define RTL_USB_MAX_RX_COUNT                   100
 #define QBSS_LOAD_SIZE                         5
 #define MAX_WMMELE_LENGTH                      64
+#define ASPM_L1_LATENCY                                7
 
 #define TOTAL_CAM_ENTRY                                32
 
index a61bc41b82d7845f29dda7cd34669a2311833896..947c79532e1004818eea7ef94b101ec93303e2b2 100644 (file)
@@ -198,11 +198,15 @@ static ssize_t softsynthx_read(struct file *fp, char __user *buf, size_t count,
        int chars_sent = 0;
        char __user *cp;
        char *init;
+       size_t bytes_per_ch = unicode ? 3 : 1;
        u16 ch;
        int empty;
        unsigned long flags;
        DEFINE_WAIT(wait);
 
+       if (count < bytes_per_ch)
+               return -EINVAL;
+
        spin_lock_irqsave(&speakup_info.spinlock, flags);
        while (1) {
                prepare_to_wait(&speakup_event, &wait, TASK_INTERRUPTIBLE);
@@ -228,7 +232,7 @@ static ssize_t softsynthx_read(struct file *fp, char __user *buf, size_t count,
        init = get_initstring();
 
        /* Keep 3 bytes available for a 16bit UTF-8-encoded character */
-       while (chars_sent <= count - 3) {
+       while (chars_sent <= count - bytes_per_ch) {
                if (speakup_info.flushing) {
                        speakup_info.flushing = 0;
                        ch = '\x18';
index 3aa981fbc8f56c4215344de1d20269cfc1ae9fc3..e45ed08a51668fbe5ba03abc849746471a1a48e1 100644 (file)
@@ -11,6 +11,7 @@ config TYPEC_TCPCI
 
 config TYPEC_RT1711H
        tristate "Richtek RT1711H Type-C chip driver"
+       depends on I2C
        select TYPEC_TCPCI
        help
          Richtek RT1711H Type-C chip driver that works with
index 514986b57c2d60ce19c1074f4d19d65dd550be2e..25eb3891e34b8435fe15b80c5b7eb5e2a7b7c6d6 100644 (file)
@@ -652,6 +652,7 @@ static int cxgbit_set_iso_npdu(struct cxgbit_sock *csk)
        struct iscsi_param *param;
        u32 mrdsl, mbl;
        u32 max_npdu, max_iso_npdu;
+       u32 max_iso_payload;
 
        if (conn->login->leading_connection) {
                param = iscsi_find_param_from_key(MAXBURSTLENGTH,
@@ -670,8 +671,10 @@ static int cxgbit_set_iso_npdu(struct cxgbit_sock *csk)
        mrdsl = conn_ops->MaxRecvDataSegmentLength;
        max_npdu = mbl / mrdsl;
 
-       max_iso_npdu = CXGBIT_MAX_ISO_PAYLOAD /
-                       (ISCSI_HDR_LEN + mrdsl +
+       max_iso_payload = rounddown(CXGBIT_MAX_ISO_PAYLOAD, csk->emss);
+
+       max_iso_npdu = max_iso_payload /
+                      (ISCSI_HDR_LEN + mrdsl +
                        cxgbit_digest_len[csk->submode]);
 
        csk->max_iso_npdu = min(max_npdu, max_iso_npdu);
@@ -741,6 +744,9 @@ static int cxgbit_set_params(struct iscsi_conn *conn)
        if (conn_ops->MaxRecvDataSegmentLength > cdev->mdsl)
                conn_ops->MaxRecvDataSegmentLength = cdev->mdsl;
 
+       if (cxgbit_set_digest(csk))
+               return -1;
+
        if (conn->login->leading_connection) {
                param = iscsi_find_param_from_key(ERRORRECOVERYLEVEL,
                                                  conn->param_list);
@@ -764,7 +770,7 @@ static int cxgbit_set_params(struct iscsi_conn *conn)
                        if (is_t5(cdev->lldi.adapter_type))
                                goto enable_ddp;
                        else
-                               goto enable_digest;
+                               return 0;
                }
 
                if (test_bit(CDEV_ISO_ENABLE, &cdev->flags)) {
@@ -781,10 +787,6 @@ enable_ddp:
                }
        }
 
-enable_digest:
-       if (cxgbit_set_digest(csk))
-               return -1;
-
        return 0;
 }
 
index 01ac306131c1f163c6eb6043651a412e3e71dc76..10db5656fd5dcb8e95769a922223b8e88cf23983 100644 (file)
@@ -3727,11 +3727,16 @@ core_scsi3_pri_read_keys(struct se_cmd *cmd)
                 * Check for overflow of 8byte PRI READ_KEYS payload and
                 * next reservation key list descriptor.
                 */
-               if ((add_len + 8) > (cmd->data_length - 8))
-                       break;
-
-               put_unaligned_be64(pr_reg->pr_res_key, &buf[off]);
-               off += 8;
+               if (off + 8 <= cmd->data_length) {
+                       put_unaligned_be64(pr_reg->pr_res_key, &buf[off]);
+                       off += 8;
+               }
+               /*
+                * SPC5r17: 6.16.2 READ KEYS service action
+                * The ADDITIONAL LENGTH field indicates the number of bytes in
+                * the Reservation key list. The contents of the ADDITIONAL
+                * LENGTH field are not altered based on the allocation length
+                */
                add_len += 8;
        }
        spin_unlock(&dev->t10_pr.registration_lock);
index 7f96dfa32b9cdf1cbf167fe1b0581e3b94f1a08b..d8dc3d22051f7810efa5faafba0cc71e3ad43040 100644 (file)
@@ -656,7 +656,7 @@ static void scatter_data_area(struct tcmu_dev *udev,
 }
 
 static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
-                            bool bidi)
+                            bool bidi, uint32_t read_len)
 {
        struct se_cmd *se_cmd = cmd->se_cmd;
        int i, dbi;
@@ -689,7 +689,7 @@ static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
        for_each_sg(data_sg, sg, data_nents, i) {
                int sg_remaining = sg->length;
                to = kmap_atomic(sg_page(sg)) + sg->offset;
-               while (sg_remaining > 0) {
+               while (sg_remaining > 0 && read_len > 0) {
                        if (block_remaining == 0) {
                                if (from)
                                        kunmap_atomic(from);
@@ -701,6 +701,8 @@ static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
                        }
                        copy_bytes = min_t(size_t, sg_remaining,
                                        block_remaining);
+                       if (read_len < copy_bytes)
+                               copy_bytes = read_len;
                        offset = DATA_BLOCK_SIZE - block_remaining;
                        tcmu_flush_dcache_range(from, copy_bytes);
                        memcpy(to + sg->length - sg_remaining, from + offset,
@@ -708,8 +710,11 @@ static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
 
                        sg_remaining -= copy_bytes;
                        block_remaining -= copy_bytes;
+                       read_len -= copy_bytes;
                }
                kunmap_atomic(to - sg->offset);
+               if (read_len == 0)
+                       break;
        }
        if (from)
                kunmap_atomic(from);
@@ -1042,6 +1047,8 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *
 {
        struct se_cmd *se_cmd = cmd->se_cmd;
        struct tcmu_dev *udev = cmd->tcmu_dev;
+       bool read_len_valid = false;
+       uint32_t read_len = se_cmd->data_length;
 
        /*
         * cmd has been completed already from timeout, just reclaim
@@ -1056,13 +1063,28 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *
                pr_warn("TCMU: Userspace set UNKNOWN_OP flag on se_cmd %p\n",
                        cmd->se_cmd);
                entry->rsp.scsi_status = SAM_STAT_CHECK_CONDITION;
-       } else if (entry->rsp.scsi_status == SAM_STAT_CHECK_CONDITION) {
+               goto done;
+       }
+
+       if (se_cmd->data_direction == DMA_FROM_DEVICE &&
+           (entry->hdr.uflags & TCMU_UFLAG_READ_LEN) && entry->rsp.read_len) {
+               read_len_valid = true;
+               if (entry->rsp.read_len < read_len)
+                       read_len = entry->rsp.read_len;
+       }
+
+       if (entry->rsp.scsi_status == SAM_STAT_CHECK_CONDITION) {
                transport_copy_sense_to_cmd(se_cmd, entry->rsp.sense_buffer);
-       } else if (se_cmd->se_cmd_flags & SCF_BIDI) {
+               if (!read_len_valid )
+                       goto done;
+               else
+                       se_cmd->se_cmd_flags |= SCF_TREAT_READ_AS_NORMAL;
+       }
+       if (se_cmd->se_cmd_flags & SCF_BIDI) {
                /* Get Data-In buffer before clean up */
-               gather_data_area(udev, cmd, true);
+               gather_data_area(udev, cmd, true, read_len);
        } else if (se_cmd->data_direction == DMA_FROM_DEVICE) {
-               gather_data_area(udev, cmd, false);
+               gather_data_area(udev, cmd, false, read_len);
        } else if (se_cmd->data_direction == DMA_TO_DEVICE) {
                /* TODO: */
        } else if (se_cmd->data_direction != DMA_NONE) {
@@ -1070,7 +1092,13 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *
                        se_cmd->data_direction);
        }
 
-       target_complete_cmd(cmd->se_cmd, entry->rsp.scsi_status);
+done:
+       if (read_len_valid) {
+               pr_debug("read_len = %d\n", read_len);
+               target_complete_cmd_with_length(cmd->se_cmd,
+                                       entry->rsp.scsi_status, read_len);
+       } else
+               target_complete_cmd(cmd->se_cmd, entry->rsp.scsi_status);
 
 out:
        cmd->se_cmd = NULL;
@@ -1740,7 +1768,7 @@ static int tcmu_configure_device(struct se_device *dev)
        /* Initialise the mailbox of the ring buffer */
        mb = udev->mb_addr;
        mb->version = TCMU_MAILBOX_VERSION;
-       mb->flags = TCMU_MAILBOX_FLAG_CAP_OOOC;
+       mb->flags = TCMU_MAILBOX_FLAG_CAP_OOOC | TCMU_MAILBOX_FLAG_CAP_READ_LEN;
        mb->cmdr_off = CMDR_OFF;
        mb->cmdr_size = udev->cmdr_size;
 
index 6281266b8ec0a15721da5196b641b707b8a5c973..a923ebdeb73c80bf845af7acc90dcc20a9c2ce1b 100644 (file)
@@ -213,6 +213,10 @@ static ssize_t boot_acl_store(struct device *dev, struct device_attribute *attr,
                goto err_free_acl;
        }
        ret = tb->cm_ops->set_boot_acl(tb, acl, tb->nboot_acl);
+       if (!ret) {
+               /* Notify userspace about the change */
+               kobject_uevent(&tb->dev.kobj, KOBJ_CHANGE);
+       }
        mutex_unlock(&tb->lock);
 
 err_free_acl:
index cbe98bc2b998276fd95b2d8086a6fabcf2351bf7..43174220170924e094567ad6271703bd881e2a6f 100644 (file)
@@ -124,6 +124,8 @@ struct n_tty_data {
        struct mutex output_lock;
 };
 
+#define MASK(x) ((x) & (N_TTY_BUF_SIZE - 1))
+
 static inline size_t read_cnt(struct n_tty_data *ldata)
 {
        return ldata->read_head - ldata->read_tail;
@@ -141,6 +143,7 @@ static inline unsigned char *read_buf_addr(struct n_tty_data *ldata, size_t i)
 
 static inline unsigned char echo_buf(struct n_tty_data *ldata, size_t i)
 {
+       smp_rmb(); /* Matches smp_wmb() in add_echo_byte(). */
        return ldata->echo_buf[i & (N_TTY_BUF_SIZE - 1)];
 }
 
@@ -316,9 +319,7 @@ static inline void put_tty_queue(unsigned char c, struct n_tty_data *ldata)
 static void reset_buffer_flags(struct n_tty_data *ldata)
 {
        ldata->read_head = ldata->canon_head = ldata->read_tail = 0;
-       ldata->echo_head = ldata->echo_tail = ldata->echo_commit = 0;
        ldata->commit_head = 0;
-       ldata->echo_mark = 0;
        ldata->line_start = 0;
 
        ldata->erasing = 0;
@@ -617,12 +618,19 @@ static size_t __process_echoes(struct tty_struct *tty)
        old_space = space = tty_write_room(tty);
 
        tail = ldata->echo_tail;
-       while (ldata->echo_commit != tail) {
+       while (MASK(ldata->echo_commit) != MASK(tail)) {
                c = echo_buf(ldata, tail);
                if (c == ECHO_OP_START) {
                        unsigned char op;
                        int no_space_left = 0;
 
+                       /*
+                        * Since add_echo_byte() is called without holding
+                        * output_lock, we might see only portion of multi-byte
+                        * operation.
+                        */
+                       if (MASK(ldata->echo_commit) == MASK(tail + 1))
+                               goto not_yet_stored;
                        /*
                         * If the buffer byte is the start of a multi-byte
                         * operation, get the next byte, which is either the
@@ -634,6 +642,8 @@ static size_t __process_echoes(struct tty_struct *tty)
                                unsigned int num_chars, num_bs;
 
                        case ECHO_OP_ERASE_TAB:
+                               if (MASK(ldata->echo_commit) == MASK(tail + 2))
+                                       goto not_yet_stored;
                                num_chars = echo_buf(ldata, tail + 2);
 
                                /*
@@ -728,7 +738,8 @@ static size_t __process_echoes(struct tty_struct *tty)
        /* If the echo buffer is nearly full (so that the possibility exists
         * of echo overrun before the next commit), then discard enough
         * data at the tail to prevent a subsequent overrun */
-       while (ldata->echo_commit - tail >= ECHO_DISCARD_WATERMARK) {
+       while (ldata->echo_commit > tail &&
+              ldata->echo_commit - tail >= ECHO_DISCARD_WATERMARK) {
                if (echo_buf(ldata, tail) == ECHO_OP_START) {
                        if (echo_buf(ldata, tail + 1) == ECHO_OP_ERASE_TAB)
                                tail += 3;
@@ -738,6 +749,7 @@ static size_t __process_echoes(struct tty_struct *tty)
                        tail++;
        }
 
+ not_yet_stored:
        ldata->echo_tail = tail;
        return old_space - space;
 }
@@ -748,6 +760,7 @@ static void commit_echoes(struct tty_struct *tty)
        size_t nr, old, echoed;
        size_t head;
 
+       mutex_lock(&ldata->output_lock);
        head = ldata->echo_head;
        ldata->echo_mark = head;
        old = ldata->echo_commit - ldata->echo_tail;
@@ -756,10 +769,12 @@ static void commit_echoes(struct tty_struct *tty)
         * is over the threshold (and try again each time another
         * block is accumulated) */
        nr = head - ldata->echo_tail;
-       if (nr < ECHO_COMMIT_WATERMARK || (nr % ECHO_BLOCK > old % ECHO_BLOCK))
+       if (nr < ECHO_COMMIT_WATERMARK ||
+           (nr % ECHO_BLOCK > old % ECHO_BLOCK)) {
+               mutex_unlock(&ldata->output_lock);
                return;
+       }
 
-       mutex_lock(&ldata->output_lock);
        ldata->echo_commit = head;
        echoed = __process_echoes(tty);
        mutex_unlock(&ldata->output_lock);
@@ -810,7 +825,9 @@ static void flush_echoes(struct tty_struct *tty)
 
 static inline void add_echo_byte(unsigned char c, struct n_tty_data *ldata)
 {
-       *echo_buf_addr(ldata, ldata->echo_head++) = c;
+       *echo_buf_addr(ldata, ldata->echo_head) = c;
+       smp_wmb(); /* Matches smp_rmb() in echo_buf(). */
+       ldata->echo_head++;
 }
 
 /**
@@ -978,14 +995,15 @@ static void eraser(unsigned char c, struct tty_struct *tty)
        }
 
        seen_alnums = 0;
-       while (ldata->read_head != ldata->canon_head) {
+       while (MASK(ldata->read_head) != MASK(ldata->canon_head)) {
                head = ldata->read_head;
 
                /* erase a single possibly multibyte character */
                do {
                        head--;
                        c = read_buf(ldata, head);
-               } while (is_continuation(c, tty) && head != ldata->canon_head);
+               } while (is_continuation(c, tty) &&
+                        MASK(head) != MASK(ldata->canon_head));
 
                /* do not partially erase */
                if (is_continuation(c, tty))
@@ -1027,7 +1045,7 @@ static void eraser(unsigned char c, struct tty_struct *tty)
                                 * This info is used to go back the correct
                                 * number of columns.
                                 */
-                               while (tail != ldata->canon_head) {
+                               while (MASK(tail) != MASK(ldata->canon_head)) {
                                        tail--;
                                        c = read_buf(ldata, tail);
                                        if (c == '\t') {
@@ -1302,7 +1320,7 @@ n_tty_receive_char_special(struct tty_struct *tty, unsigned char c)
                        finish_erasing(ldata);
                        echo_char(c, tty);
                        echo_char_raw('\n', ldata);
-                       while (tail != ldata->read_head) {
+                       while (MASK(tail) != MASK(ldata->read_head)) {
                                echo_char(read_buf(ldata, tail), tty);
                                tail++;
                        }
@@ -1878,30 +1896,21 @@ static int n_tty_open(struct tty_struct *tty)
        struct n_tty_data *ldata;
 
        /* Currently a malloc failure here can panic */
-       ldata = vmalloc(sizeof(*ldata));
+       ldata = vzalloc(sizeof(*ldata));
        if (!ldata)
-               goto err;
+               return -ENOMEM;
 
        ldata->overrun_time = jiffies;
        mutex_init(&ldata->atomic_read_lock);
        mutex_init(&ldata->output_lock);
 
        tty->disc_data = ldata;
-       reset_buffer_flags(tty->disc_data);
-       ldata->column = 0;
-       ldata->canon_column = 0;
-       ldata->num_overrun = 0;
-       ldata->no_room = 0;
-       ldata->lnext = 0;
        tty->closing = 0;
        /* indicate buffer work may resume */
        clear_bit(TTY_LDISC_HALTED, &tty->flags);
        n_tty_set_termios(tty, NULL);
        tty_unthrottle(tty);
-
        return 0;
-err:
-       return -ENOMEM;
 }
 
 static inline int input_available_p(struct tty_struct *tty, int poll)
@@ -2411,7 +2420,7 @@ static unsigned long inq_canon(struct n_tty_data *ldata)
        tail = ldata->read_tail;
        nr = head - tail;
        /* Skip EOF-chars.. */
-       while (head != tail) {
+       while (MASK(head) != MASK(tail)) {
                if (test_bit(tail & (N_TTY_BUF_SIZE - 1), ldata->read_flags) &&
                    read_buf(ldata, tail) == __DISABLED_CHAR)
                        nr--;
index df93b727e984ee3d185fa0f5a42cad09d63a3f65..9e59f4788589c879358ce12507362baec459533d 100644 (file)
@@ -617,6 +617,7 @@ EXPORT_SYMBOL_GPL(__serdev_device_driver_register);
 static void __exit serdev_exit(void)
 {
        bus_unregister(&serdev_bus_type);
+       ida_destroy(&ctrl_ida);
 }
 module_exit(serdev_exit);
 
index 3296a05cda2db8d53b1d869123ed8e2aa884a248..f80a300b5d68f6e8ad61b7daf2544234da7e1662 100644 (file)
@@ -3339,9 +3339,7 @@ static const struct pci_device_id blacklist[] = {
        /* multi-io cards handled by parport_serial */
        { PCI_DEVICE(0x4348, 0x7053), }, /* WCH CH353 2S1P */
        { PCI_DEVICE(0x4348, 0x5053), }, /* WCH CH353 1S1P */
-       { PCI_DEVICE(0x4348, 0x7173), }, /* WCH CH355 4S */
        { PCI_DEVICE(0x1c00, 0x3250), }, /* WCH CH382 2S1P */
-       { PCI_DEVICE(0x1c00, 0x3470), }, /* WCH CH384 4S */
 
        /* Moxa Smartio MUE boards handled by 8250_moxa */
        { PCI_VDEVICE(MOXA, 0x1024), },
index 1eb1a376a0419d4084cd7a72e1e2c7eb769798f7..15eb6c829d39c5b108adfca034fa50769763c0bc 100644 (file)
@@ -784,7 +784,7 @@ int vc_allocate(unsigned int currcons)      /* return 0 on success */
        if (!*vc->vc_uni_pagedir_loc)
                con_set_default_unimap(vc);
 
-       vc->vc_screenbuf = kmalloc(vc->vc_screenbuf_size, GFP_KERNEL);
+       vc->vc_screenbuf = kzalloc(vc->vc_screenbuf_size, GFP_KERNEL);
        if (!vc->vc_screenbuf)
                goto err_free;
 
@@ -871,7 +871,7 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc,
 
        if (new_screen_size > (4 << 20))
                return -EINVAL;
-       newscreen = kmalloc(new_screen_size, GFP_USER);
+       newscreen = kzalloc(new_screen_size, GFP_USER);
        if (!newscreen)
                return -ENOMEM;
 
index e8f4ac9400ea842a8fe631fe23519e04f565f4e6..5d421d7e8904fc633f9f25c0faf6d69ec1d8f498 100644 (file)
@@ -215,7 +215,20 @@ static ssize_t name_show(struct device *dev,
                         struct device_attribute *attr, char *buf)
 {
        struct uio_device *idev = dev_get_drvdata(dev);
-       return sprintf(buf, "%s\n", idev->info->name);
+       int ret;
+
+       mutex_lock(&idev->info_lock);
+       if (!idev->info) {
+               ret = -EINVAL;
+               dev_err(dev, "the device has been unregistered\n");
+               goto out;
+       }
+
+       ret = sprintf(buf, "%s\n", idev->info->name);
+
+out:
+       mutex_unlock(&idev->info_lock);
+       return ret;
 }
 static DEVICE_ATTR_RO(name);
 
@@ -223,7 +236,20 @@ static ssize_t version_show(struct device *dev,
                            struct device_attribute *attr, char *buf)
 {
        struct uio_device *idev = dev_get_drvdata(dev);
-       return sprintf(buf, "%s\n", idev->info->version);
+       int ret;
+
+       mutex_lock(&idev->info_lock);
+       if (!idev->info) {
+               ret = -EINVAL;
+               dev_err(dev, "the device has been unregistered\n");
+               goto out;
+       }
+
+       ret = sprintf(buf, "%s\n", idev->info->version);
+
+out:
+       mutex_unlock(&idev->info_lock);
+       return ret;
 }
 static DEVICE_ATTR_RO(version);
 
@@ -415,11 +441,15 @@ EXPORT_SYMBOL_GPL(uio_event_notify);
 static irqreturn_t uio_interrupt(int irq, void *dev_id)
 {
        struct uio_device *idev = (struct uio_device *)dev_id;
-       irqreturn_t ret = idev->info->handler(irq, idev->info);
+       irqreturn_t ret;
 
+       mutex_lock(&idev->info_lock);
+
+       ret = idev->info->handler(irq, idev->info);
        if (ret == IRQ_HANDLED)
                uio_event_notify(idev->info);
 
+       mutex_unlock(&idev->info_lock);
        return ret;
 }
 
@@ -433,7 +463,6 @@ static int uio_open(struct inode *inode, struct file *filep)
        struct uio_device *idev;
        struct uio_listener *listener;
        int ret = 0;
-       unsigned long flags;
 
        mutex_lock(&minor_lock);
        idev = idr_find(&uio_idr, iminor(inode));
@@ -460,10 +489,16 @@ static int uio_open(struct inode *inode, struct file *filep)
        listener->event_count = atomic_read(&idev->event);
        filep->private_data = listener;
 
-       spin_lock_irqsave(&idev->info_lock, flags);
+       mutex_lock(&idev->info_lock);
+       if (!idev->info) {
+               mutex_unlock(&idev->info_lock);
+               ret = -EINVAL;
+               goto err_alloc_listener;
+       }
+
        if (idev->info && idev->info->open)
                ret = idev->info->open(idev->info, inode);
-       spin_unlock_irqrestore(&idev->info_lock, flags);
+       mutex_unlock(&idev->info_lock);
        if (ret)
                goto err_infoopen;
 
@@ -495,12 +530,11 @@ static int uio_release(struct inode *inode, struct file *filep)
        int ret = 0;
        struct uio_listener *listener = filep->private_data;
        struct uio_device *idev = listener->dev;
-       unsigned long flags;
 
-       spin_lock_irqsave(&idev->info_lock, flags);
+       mutex_lock(&idev->info_lock);
        if (idev->info && idev->info->release)
                ret = idev->info->release(idev->info, inode);
-       spin_unlock_irqrestore(&idev->info_lock, flags);
+       mutex_unlock(&idev->info_lock);
 
        module_put(idev->owner);
        kfree(listener);
@@ -513,12 +547,11 @@ static __poll_t uio_poll(struct file *filep, poll_table *wait)
        struct uio_listener *listener = filep->private_data;
        struct uio_device *idev = listener->dev;
        __poll_t ret = 0;
-       unsigned long flags;
 
-       spin_lock_irqsave(&idev->info_lock, flags);
+       mutex_lock(&idev->info_lock);
        if (!idev->info || !idev->info->irq)
                ret = -EIO;
-       spin_unlock_irqrestore(&idev->info_lock, flags);
+       mutex_unlock(&idev->info_lock);
 
        if (ret)
                return ret;
@@ -537,12 +570,11 @@ static ssize_t uio_read(struct file *filep, char __user *buf,
        DECLARE_WAITQUEUE(wait, current);
        ssize_t retval = 0;
        s32 event_count;
-       unsigned long flags;
 
-       spin_lock_irqsave(&idev->info_lock, flags);
+       mutex_lock(&idev->info_lock);
        if (!idev->info || !idev->info->irq)
                retval = -EIO;
-       spin_unlock_irqrestore(&idev->info_lock, flags);
+       mutex_unlock(&idev->info_lock);
 
        if (retval)
                return retval;
@@ -592,9 +624,13 @@ static ssize_t uio_write(struct file *filep, const char __user *buf,
        struct uio_device *idev = listener->dev;
        ssize_t retval;
        s32 irq_on;
-       unsigned long flags;
 
-       spin_lock_irqsave(&idev->info_lock, flags);
+       mutex_lock(&idev->info_lock);
+       if (!idev->info) {
+               retval = -EINVAL;
+               goto out;
+       }
+
        if (!idev->info || !idev->info->irq) {
                retval = -EIO;
                goto out;
@@ -618,7 +654,7 @@ static ssize_t uio_write(struct file *filep, const char __user *buf,
        retval = idev->info->irqcontrol(idev->info, irq_on);
 
 out:
-       spin_unlock_irqrestore(&idev->info_lock, flags);
+       mutex_unlock(&idev->info_lock);
        return retval ? retval : sizeof(s32);
 }
 
@@ -640,10 +676,20 @@ static vm_fault_t uio_vma_fault(struct vm_fault *vmf)
        struct page *page;
        unsigned long offset;
        void *addr;
+       int ret = 0;
+       int mi;
 
-       int mi = uio_find_mem_index(vmf->vma);
-       if (mi < 0)
-               return VM_FAULT_SIGBUS;
+       mutex_lock(&idev->info_lock);
+       if (!idev->info) {
+               ret = VM_FAULT_SIGBUS;
+               goto out;
+       }
+
+       mi = uio_find_mem_index(vmf->vma);
+       if (mi < 0) {
+               ret = VM_FAULT_SIGBUS;
+               goto out;
+       }
 
        /*
         * We need to subtract mi because userspace uses offset = N*PAGE_SIZE
@@ -658,7 +704,11 @@ static vm_fault_t uio_vma_fault(struct vm_fault *vmf)
                page = vmalloc_to_page(addr);
        get_page(page);
        vmf->page = page;
-       return 0;
+
+out:
+       mutex_unlock(&idev->info_lock);
+
+       return ret;
 }
 
 static const struct vm_operations_struct uio_logical_vm_ops = {
@@ -683,6 +733,7 @@ static int uio_mmap_physical(struct vm_area_struct *vma)
        struct uio_device *idev = vma->vm_private_data;
        int mi = uio_find_mem_index(vma);
        struct uio_mem *mem;
+
        if (mi < 0)
                return -EINVAL;
        mem = idev->info->mem + mi;
@@ -724,30 +775,46 @@ static int uio_mmap(struct file *filep, struct vm_area_struct *vma)
 
        vma->vm_private_data = idev;
 
+       mutex_lock(&idev->info_lock);
+       if (!idev->info) {
+               ret = -EINVAL;
+               goto out;
+       }
+
        mi = uio_find_mem_index(vma);
-       if (mi < 0)
-               return -EINVAL;
+       if (mi < 0) {
+               ret = -EINVAL;
+               goto out;
+       }
 
        requested_pages = vma_pages(vma);
        actual_pages = ((idev->info->mem[mi].addr & ~PAGE_MASK)
                        + idev->info->mem[mi].size + PAGE_SIZE -1) >> PAGE_SHIFT;
-       if (requested_pages > actual_pages)
-               return -EINVAL;
+       if (requested_pages > actual_pages) {
+               ret = -EINVAL;
+               goto out;
+       }
 
        if (idev->info->mmap) {
                ret = idev->info->mmap(idev->info, vma);
-               return ret;
+               goto out;
        }
 
        switch (idev->info->mem[mi].memtype) {
                case UIO_MEM_PHYS:
-                       return uio_mmap_physical(vma);
+                       ret = uio_mmap_physical(vma);
+                       break;
                case UIO_MEM_LOGICAL:
                case UIO_MEM_VIRTUAL:
-                       return uio_mmap_logical(vma);
+                       ret = uio_mmap_logical(vma);
+                       break;
                default:
-                       return -EINVAL;
+                       ret = -EINVAL;
        }
+
+out:
+       mutex_unlock(&idev->info_lock);
+       return 0;
 }
 
 static const struct file_operations uio_fops = {
@@ -865,7 +932,7 @@ int __uio_register_device(struct module *owner,
 
        idev->owner = owner;
        idev->info = info;
-       spin_lock_init(&idev->info_lock);
+       mutex_init(&idev->info_lock);
        init_waitqueue_head(&idev->wait);
        atomic_set(&idev->event, 0);
 
@@ -902,8 +969,9 @@ int __uio_register_device(struct module *owner,
                 * FDs at the time of unregister and therefore may not be
                 * freed until they are released.
                 */
-               ret = request_irq(info->irq, uio_interrupt,
-                                 info->irq_flags, info->name, idev);
+               ret = request_threaded_irq(info->irq, NULL, uio_interrupt,
+                                          info->irq_flags, info->name, idev);
+
                if (ret)
                        goto err_request_irq;
        }
@@ -928,7 +996,6 @@ EXPORT_SYMBOL_GPL(__uio_register_device);
 void uio_unregister_device(struct uio_info *info)
 {
        struct uio_device *idev;
-       unsigned long flags;
 
        if (!info || !info->uio_dev)
                return;
@@ -937,14 +1004,14 @@ void uio_unregister_device(struct uio_info *info)
 
        uio_free_minor(idev);
 
+       mutex_lock(&idev->info_lock);
        uio_dev_del_attributes(idev);
 
        if (info->irq && info->irq != UIO_IRQ_CUSTOM)
                free_irq(info->irq, idev);
 
-       spin_lock_irqsave(&idev->info_lock, flags);
        idev->info = NULL;
-       spin_unlock_irqrestore(&idev->info_lock, flags);
+       mutex_unlock(&idev->info_lock);
 
        device_unregister(&idev->dev);
 
index 785f0ed037f7897d06cd3432a7a79013bba3831c..ee34e9046f7ea201f53e79d3e8725d9c937b0d88 100644 (file)
@@ -3,6 +3,7 @@ config USB_CHIPIDEA
        depends on ((USB_EHCI_HCD && USB_GADGET) || (USB_EHCI_HCD && !USB_GADGET) || (!USB_EHCI_HCD && USB_GADGET)) && HAS_DMA
        select EXTCON
        select RESET_CONTROLLER
+       select USB_ULPI_BUS
        help
          Say Y here if your system has a dual role high speed USB
          controller based on ChipIdea silicon IP. It supports:
@@ -38,12 +39,4 @@ config USB_CHIPIDEA_HOST
        help
          Say Y here to enable host controller functionality of the
          ChipIdea driver.
-
-config USB_CHIPIDEA_ULPI
-       bool "ChipIdea ULPI PHY support"
-       depends on USB_ULPI_BUS=y || USB_ULPI_BUS=USB_CHIPIDEA
-       help
-         Say Y here if you have a ULPI PHY attached to your ChipIdea
-         controller.
-
 endif
index e3d5e728fa530aef709487061b28f76a36f3a98d..12df94f78f7221e39aa24f0646d78d90977088e7 100644 (file)
@@ -1,11 +1,10 @@
 # SPDX-License-Identifier: GPL-2.0
 obj-$(CONFIG_USB_CHIPIDEA)             += ci_hdrc.o
 
-ci_hdrc-y                              := core.o otg.o debug.o
+ci_hdrc-y                              := core.o otg.o debug.o ulpi.o
 ci_hdrc-$(CONFIG_USB_CHIPIDEA_UDC)     += udc.o
 ci_hdrc-$(CONFIG_USB_CHIPIDEA_HOST)    += host.o
 ci_hdrc-$(CONFIG_USB_OTG_FSM)          += otg_fsm.o
-ci_hdrc-$(CONFIG_USB_CHIPIDEA_ULPI)    += ulpi.o
 
 # Glue/Bridge layers go here
 
index 0bf244d505442932d7660640029beebb66c2e0e0..6a2cc5cd0281d8d728fb4f083798b2ea6bf55cc8 100644 (file)
@@ -240,10 +240,8 @@ struct ci_hdrc {
 
        struct ci_hdrc_platform_data    *platdata;
        int                             vbus_active;
-#ifdef CONFIG_USB_CHIPIDEA_ULPI
        struct ulpi                     *ulpi;
        struct ulpi_ops                 ulpi_ops;
-#endif
        struct phy                      *phy;
        /* old usb_phy interface */
        struct usb_phy                  *usb_phy;
@@ -426,15 +424,9 @@ static inline bool ci_otg_is_fsm_mode(struct ci_hdrc *ci)
 #endif
 }
 
-#if IS_ENABLED(CONFIG_USB_CHIPIDEA_ULPI)
 int ci_ulpi_init(struct ci_hdrc *ci);
 void ci_ulpi_exit(struct ci_hdrc *ci);
 int ci_ulpi_resume(struct ci_hdrc *ci);
-#else
-static inline int ci_ulpi_init(struct ci_hdrc *ci) { return 0; }
-static inline void ci_ulpi_exit(struct ci_hdrc *ci) { }
-static inline int ci_ulpi_resume(struct ci_hdrc *ci) { return 0; }
-#endif
 
 u32 hw_read_intr_enable(struct ci_hdrc *ci);
 
index af45aa3222b5ce1c99ccb4de586c988d3b3a0b9f..4638d9b066bea7ad2b35f6c966ee7acec428facf 100644 (file)
@@ -124,8 +124,11 @@ static int host_start(struct ci_hdrc *ci)
 
        hcd->power_budget = ci->platdata->power_budget;
        hcd->tpl_support = ci->platdata->tpl_support;
-       if (ci->phy || ci->usb_phy)
+       if (ci->phy || ci->usb_phy) {
                hcd->skip_phy_initialization = 1;
+               if (ci->usb_phy)
+                       hcd->usb_phy = ci->usb_phy;
+       }
 
        ehci = hcd_to_ehci(hcd);
        ehci->caps = ci->hw_bank.cap;
index 6da42dcd2888601a23f65aa6644608c259b3288c..dfec07e8ae1d268c459b495603bd5e8de154518f 100644 (file)
@@ -95,6 +95,9 @@ int ci_ulpi_resume(struct ci_hdrc *ci)
 {
        int cnt = 100000;
 
+       if (ci->platdata->phy_mode != USBPHY_INTERFACE_MODE_ULPI)
+               return 0;
+
        while (cnt-- > 0) {
                if (hw_read(ci, OP_ULPI_VIEWPORT, ULPI_SYNC_STATE))
                        return 0;
index 7b366a6c0b493f2eb8bec4959830d222223f3cb2..75c4623ad779eecd64b0164a24b6d8ac86177ca3 100644 (file)
@@ -1758,6 +1758,9 @@ static const struct usb_device_id acm_ids[] = {
        { USB_DEVICE(0x11ca, 0x0201), /* VeriFone Mx870 Gadget Serial */
        .driver_info = SINGLE_RX_URB,
        },
+       { USB_DEVICE(0x1965, 0x0018), /* Uniden UBC125XLT */
+       .driver_info = NO_UNION_NORMAL, /* has no union descriptor */
+       },
        { USB_DEVICE(0x22b8, 0x7000), /* Motorola Q Phone */
        .driver_info = NO_UNION_NORMAL, /* has no union descriptor */
        },
@@ -1828,6 +1831,9 @@ static const struct usb_device_id acm_ids[] = {
        { USB_DEVICE(0x09d8, 0x0320), /* Elatec GmbH TWN3 */
        .driver_info = NO_UNION_NORMAL, /* has misplaced union descriptor */
        },
+       { USB_DEVICE(0x0ca6, 0xa050), /* Castles VEGA3000 */
+       .driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */
+       },
 
        { USB_DEVICE(0x2912, 0x0001), /* ATOL FPrint */
        .driver_info = CLEAR_HALT_CONDITIONS,
index fcae521df29b8de4712e92de725c575f298f567a..1fb2668099663e08ffa3f5e7713ab0eaa55fb10a 100644 (file)
@@ -1142,10 +1142,14 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
 
                if (!udev || udev->state == USB_STATE_NOTATTACHED) {
                        /* Tell hub_wq to disconnect the device or
-                        * check for a new connection
+                        * check for a new connection or over current condition.
+                        * Based on USB2.0 Spec Section 11.12.5,
+                        * C_PORT_OVER_CURRENT could be set while
+                        * PORT_OVER_CURRENT is not. So check for any of them.
                         */
                        if (udev || (portstatus & USB_PORT_STAT_CONNECTION) ||
-                           (portstatus & USB_PORT_STAT_OVERCURRENT))
+                           (portstatus & USB_PORT_STAT_OVERCURRENT) ||
+                           (portchange & USB_PORT_STAT_C_OVERCURRENT))
                                set_bit(port1, hub->change_bits);
 
                } else if (portstatus & USB_PORT_STAT_ENABLE) {
index c55def2f1320f92c6c0c652fc94c7056165ee467..097057d2eacf7bcb18316473c6f0def8a5744b62 100644 (file)
@@ -378,6 +378,10 @@ static const struct usb_device_id usb_quirk_list[] = {
        /* Corsair K70 RGB */
        { USB_DEVICE(0x1b1c, 0x1b13), .driver_info = USB_QUIRK_DELAY_INIT },
 
+       /* Corsair Strafe */
+       { USB_DEVICE(0x1b1c, 0x1b15), .driver_info = USB_QUIRK_DELAY_INIT |
+         USB_QUIRK_DELAY_CTRL_MSG },
+
        /* Corsair Strafe RGB */
        { USB_DEVICE(0x1b1c, 0x1b20), .driver_info = USB_QUIRK_DELAY_INIT |
          USB_QUIRK_DELAY_CTRL_MSG },
index 4a56ac772a3c35360d3834f5542aad22d5b1b720..71b3b08ad516c9fb3bfbd403da1687b5825e7e14 100644 (file)
@@ -1004,6 +1004,7 @@ struct dwc2_hregs_backup {
  * @frame_list_sz:      Frame list size
  * @desc_gen_cache:     Kmem cache for generic descriptors
  * @desc_hsisoc_cache:  Kmem cache for hs isochronous descriptors
+ * @unaligned_cache:    Kmem cache for DMA mode to handle non-aligned buf
  *
  * These are for peripheral mode:
  *
@@ -1177,6 +1178,8 @@ struct dwc2_hsotg {
        u32 frame_list_sz;
        struct kmem_cache *desc_gen_cache;
        struct kmem_cache *desc_hsisoc_cache;
+       struct kmem_cache *unaligned_cache;
+#define DWC2_KMEM_UNALIGNED_BUF_SIZE 1024
 
 #endif /* CONFIG_USB_DWC2_HOST || CONFIG_USB_DWC2_DUAL_ROLE */
 
index f0d9ccf1d665ad37b2f23786806bde8c16da11ea..cefc99ae69b2f489c8fa16f9fca4381ffb405bc3 100644 (file)
@@ -812,6 +812,7 @@ static int dwc2_gadget_fill_isoc_desc(struct dwc2_hsotg_ep *hs_ep,
        u32 index;
        u32 maxsize = 0;
        u32 mask = 0;
+       u8 pid = 0;
 
        maxsize = dwc2_gadget_get_desc_params(hs_ep, &mask);
 
@@ -840,7 +841,11 @@ static int dwc2_gadget_fill_isoc_desc(struct dwc2_hsotg_ep *hs_ep,
                         ((len << DEV_DMA_NBYTES_SHIFT) & mask));
 
        if (hs_ep->dir_in) {
-               desc->status |= ((hs_ep->mc << DEV_DMA_ISOC_PID_SHIFT) &
+               if (len)
+                       pid = DIV_ROUND_UP(len, hs_ep->ep.maxpacket);
+               else
+                       pid = 1;
+               desc->status |= ((pid << DEV_DMA_ISOC_PID_SHIFT) &
                                 DEV_DMA_ISOC_PID_MASK) |
                                ((len % hs_ep->ep.maxpacket) ?
                                 DEV_DMA_SHORT : 0) |
@@ -884,6 +889,7 @@ static void dwc2_gadget_start_isoc_ddma(struct dwc2_hsotg_ep *hs_ep)
        struct dwc2_dma_desc *desc;
 
        if (list_empty(&hs_ep->queue)) {
+               hs_ep->target_frame = TARGET_FRAME_INITIAL;
                dev_dbg(hsotg->dev, "%s: No requests in queue\n", __func__);
                return;
        }
@@ -2755,8 +2761,6 @@ static void dwc2_gadget_handle_out_token_ep_disabled(struct dwc2_hsotg_ep *ep)
         */
        tmp = dwc2_hsotg_read_frameno(hsotg);
 
-       dwc2_hsotg_complete_request(hsotg, ep, get_ep_head(ep), 0);
-
        if (using_desc_dma(hsotg)) {
                if (ep->target_frame == TARGET_FRAME_INITIAL) {
                        /* Start first ISO Out */
@@ -2817,9 +2821,6 @@ static void dwc2_gadget_handle_nak(struct dwc2_hsotg_ep *hs_ep)
 
                tmp = dwc2_hsotg_read_frameno(hsotg);
                if (using_desc_dma(hsotg)) {
-                       dwc2_hsotg_complete_request(hsotg, hs_ep,
-                                                   get_ep_head(hs_ep), 0);
-
                        hs_ep->target_frame = tmp;
                        dwc2_gadget_incr_frame_num(hs_ep);
                        dwc2_gadget_start_isoc_ddma(hs_ep);
@@ -3429,7 +3430,7 @@ static void dwc2_gadget_handle_incomplete_isoc_in(struct dwc2_hsotg *hsotg)
        for (idx = 1; idx < hsotg->num_of_eps; idx++) {
                hs_ep = hsotg->eps_in[idx];
                /* Proceed only unmasked ISOC EPs */
-               if (!hs_ep->isochronous || (BIT(idx) & ~daintmsk))
+               if ((BIT(idx) & ~daintmsk) || !hs_ep->isochronous)
                        continue;
 
                epctrl = dwc2_readl(hsotg->regs + DIEPCTL(idx));
@@ -3475,7 +3476,7 @@ static void dwc2_gadget_handle_incomplete_isoc_out(struct dwc2_hsotg *hsotg)
        for (idx = 1; idx < hsotg->num_of_eps; idx++) {
                hs_ep = hsotg->eps_out[idx];
                /* Proceed only unmasked ISOC EPs */
-               if (!hs_ep->isochronous || (BIT(idx) & ~daintmsk))
+               if ((BIT(idx) & ~daintmsk) || !hs_ep->isochronous)
                        continue;
 
                epctrl = dwc2_readl(hsotg->regs + DOEPCTL(idx));
@@ -3649,7 +3650,7 @@ irq_retry:
                for (idx = 1; idx < hsotg->num_of_eps; idx++) {
                        hs_ep = hsotg->eps_out[idx];
                        /* Proceed only unmasked ISOC EPs */
-                       if (!hs_ep->isochronous || (BIT(idx) & ~daintmsk))
+                       if ((BIT(idx) & ~daintmsk) || !hs_ep->isochronous)
                                continue;
 
                        epctrl = dwc2_readl(hsotg->regs + DOEPCTL(idx));
@@ -4739,9 +4740,11 @@ int dwc2_gadget_init(struct dwc2_hsotg *hsotg)
        }
 
        ret = usb_add_gadget_udc(dev, &hsotg->gadget);
-       if (ret)
+       if (ret) {
+               dwc2_hsotg_ep_free_request(&hsotg->eps_out[0]->ep,
+                                          hsotg->ctrl_req);
                return ret;
-
+       }
        dwc2_hsotg_dump(hsotg);
 
        return 0;
@@ -4755,6 +4758,7 @@ int dwc2_gadget_init(struct dwc2_hsotg *hsotg)
 int dwc2_hsotg_remove(struct dwc2_hsotg *hsotg)
 {
        usb_del_gadget_udc(&hsotg->gadget);
+       dwc2_hsotg_ep_free_request(&hsotg->eps_out[0]->ep, hsotg->ctrl_req);
 
        return 0;
 }
index edaf0b6af4f0491ba192d346c29a792a06a85751..6e2cdd7b93d46cf839fe2c515e88028dec22bb2e 100644 (file)
@@ -1567,11 +1567,20 @@ static void dwc2_hc_start_transfer(struct dwc2_hsotg *hsotg,
        }
 
        if (hsotg->params.host_dma) {
-               dwc2_writel((u32)chan->xfer_dma,
-                           hsotg->regs + HCDMA(chan->hc_num));
+               dma_addr_t dma_addr;
+
+               if (chan->align_buf) {
+                       if (dbg_hc(chan))
+                               dev_vdbg(hsotg->dev, "align_buf\n");
+                       dma_addr = chan->align_buf;
+               } else {
+                       dma_addr = chan->xfer_dma;
+               }
+               dwc2_writel((u32)dma_addr, hsotg->regs + HCDMA(chan->hc_num));
+
                if (dbg_hc(chan))
                        dev_vdbg(hsotg->dev, "Wrote %08lx to HCDMA(%d)\n",
-                                (unsigned long)chan->xfer_dma, chan->hc_num);
+                                (unsigned long)dma_addr, chan->hc_num);
        }
 
        /* Start the split */
@@ -2625,36 +2634,66 @@ static void dwc2_hc_init_xfer(struct dwc2_hsotg *hsotg,
        }
 }
 
-#define DWC2_USB_DMA_ALIGN 4
+static int dwc2_alloc_split_dma_aligned_buf(struct dwc2_hsotg *hsotg,
+                                           struct dwc2_qh *qh,
+                                           struct dwc2_host_chan *chan)
+{
+       if (!hsotg->unaligned_cache ||
+           chan->max_packet > DWC2_KMEM_UNALIGNED_BUF_SIZE)
+               return -ENOMEM;
 
-struct dma_aligned_buffer {
-       void *kmalloc_ptr;
-       void *old_xfer_buffer;
-       u8 data[0];
-};
+       if (!qh->dw_align_buf) {
+               qh->dw_align_buf = kmem_cache_alloc(hsotg->unaligned_cache,
+                                                   GFP_ATOMIC | GFP_DMA);
+               if (!qh->dw_align_buf)
+                       return -ENOMEM;
+       }
+
+       qh->dw_align_buf_dma = dma_map_single(hsotg->dev, qh->dw_align_buf,
+                                             DWC2_KMEM_UNALIGNED_BUF_SIZE,
+                                             DMA_FROM_DEVICE);
+
+       if (dma_mapping_error(hsotg->dev, qh->dw_align_buf_dma)) {
+               dev_err(hsotg->dev, "can't map align_buf\n");
+               chan->align_buf = 0;
+               return -EINVAL;
+       }
+
+       chan->align_buf = qh->dw_align_buf_dma;
+       return 0;
+}
+
+#define DWC2_USB_DMA_ALIGN 4
 
 static void dwc2_free_dma_aligned_buffer(struct urb *urb)
 {
-       struct dma_aligned_buffer *temp;
+       void *stored_xfer_buffer;
+       size_t length;
 
        if (!(urb->transfer_flags & URB_ALIGNED_TEMP_BUFFER))
                return;
 
-       temp = container_of(urb->transfer_buffer,
-                           struct dma_aligned_buffer, data);
+       /* Restore urb->transfer_buffer from the end of the allocated area */
+       memcpy(&stored_xfer_buffer, urb->transfer_buffer +
+              urb->transfer_buffer_length, sizeof(urb->transfer_buffer));
 
-       if (usb_urb_dir_in(urb))
-               memcpy(temp->old_xfer_buffer, temp->data,
-                      urb->transfer_buffer_length);
-       urb->transfer_buffer = temp->old_xfer_buffer;
-       kfree(temp->kmalloc_ptr);
+       if (usb_urb_dir_in(urb)) {
+               if (usb_pipeisoc(urb->pipe))
+                       length = urb->transfer_buffer_length;
+               else
+                       length = urb->actual_length;
+
+               memcpy(stored_xfer_buffer, urb->transfer_buffer, length);
+       }
+       kfree(urb->transfer_buffer);
+       urb->transfer_buffer = stored_xfer_buffer;
 
        urb->transfer_flags &= ~URB_ALIGNED_TEMP_BUFFER;
 }
 
 static int dwc2_alloc_dma_aligned_buffer(struct urb *urb, gfp_t mem_flags)
 {
-       struct dma_aligned_buffer *temp, *kmalloc_ptr;
+       void *kmalloc_ptr;
        size_t kmalloc_size;
 
        if (urb->num_sgs || urb->sg ||
@@ -2662,22 +2701,29 @@ static int dwc2_alloc_dma_aligned_buffer(struct urb *urb, gfp_t mem_flags)
            !((uintptr_t)urb->transfer_buffer & (DWC2_USB_DMA_ALIGN - 1)))
                return 0;
 
-       /* Allocate a buffer with enough padding for alignment */
+       /*
+        * Allocate a buffer with enough padding for original transfer_buffer
+        * pointer. This allocation is guaranteed to be aligned properly for
+        * DMA
+        */
        kmalloc_size = urb->transfer_buffer_length +
-               sizeof(struct dma_aligned_buffer) + DWC2_USB_DMA_ALIGN - 1;
+               sizeof(urb->transfer_buffer);
 
        kmalloc_ptr = kmalloc(kmalloc_size, mem_flags);
        if (!kmalloc_ptr)
                return -ENOMEM;
 
-       /* Position our struct dma_aligned_buffer such that data is aligned */
-       temp = PTR_ALIGN(kmalloc_ptr + 1, DWC2_USB_DMA_ALIGN) - 1;
-       temp->kmalloc_ptr = kmalloc_ptr;
-       temp->old_xfer_buffer = urb->transfer_buffer;
+       /*
+        * Position value of original urb->transfer_buffer pointer to the end
+        * of allocation for later referencing
+        */
+       memcpy(kmalloc_ptr + urb->transfer_buffer_length,
+              &urb->transfer_buffer, sizeof(urb->transfer_buffer));
+
        if (usb_urb_dir_out(urb))
-               memcpy(temp->data, urb->transfer_buffer,
+               memcpy(kmalloc_ptr, urb->transfer_buffer,
                       urb->transfer_buffer_length);
-       urb->transfer_buffer = temp->data;
+       urb->transfer_buffer = kmalloc_ptr;
 
        urb->transfer_flags |= URB_ALIGNED_TEMP_BUFFER;
 
@@ -2802,6 +2848,32 @@ static int dwc2_assign_and_init_hc(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
        /* Set the transfer attributes */
        dwc2_hc_init_xfer(hsotg, chan, qtd);
 
+       /* For non-dword aligned buffers */
+       if (hsotg->params.host_dma && qh->do_split &&
+           chan->ep_is_in && (chan->xfer_dma & 0x3)) {
+               dev_vdbg(hsotg->dev, "Non-aligned buffer\n");
+               if (dwc2_alloc_split_dma_aligned_buf(hsotg, qh, chan)) {
+                       dev_err(hsotg->dev,
+                               "Failed to allocate memory to handle non-aligned buffer\n");
+                       /* Add channel back to free list */
+                       chan->align_buf = 0;
+                       chan->multi_count = 0;
+                       list_add_tail(&chan->hc_list_entry,
+                                     &hsotg->free_hc_list);
+                       qtd->in_process = 0;
+                       qh->channel = NULL;
+                       return -ENOMEM;
+               }
+       } else {
+               /*
+                * We assume that DMA is always aligned in non-split
+                * case or split out case. Warn if not.
+                */
+               WARN_ON_ONCE(hsotg->params.host_dma &&
+                            (chan->xfer_dma & 0x3));
+               chan->align_buf = 0;
+       }
+
        if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
            chan->ep_type == USB_ENDPOINT_XFER_ISOC)
                /*
@@ -5246,6 +5318,19 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg)
                }
        }
 
+       if (hsotg->params.host_dma) {
+               /*
+                * Create kmem caches to handle non-aligned buffer
+                * in Buffer DMA mode.
+                */
+               hsotg->unaligned_cache = kmem_cache_create("dwc2-unaligned-dma",
+                                               DWC2_KMEM_UNALIGNED_BUF_SIZE, 4,
+                                               SLAB_CACHE_DMA, NULL);
+               if (!hsotg->unaligned_cache)
+                       dev_err(hsotg->dev,
+                               "unable to create dwc2 unaligned cache\n");
+       }
+
        hsotg->otg_port = 1;
        hsotg->frame_list = NULL;
        hsotg->frame_list_dma = 0;
@@ -5280,8 +5365,9 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg)
        return 0;
 
 error4:
-       kmem_cache_destroy(hsotg->desc_gen_cache);
+       kmem_cache_destroy(hsotg->unaligned_cache);
        kmem_cache_destroy(hsotg->desc_hsisoc_cache);
+       kmem_cache_destroy(hsotg->desc_gen_cache);
 error3:
        dwc2_hcd_release(hsotg);
 error2:
@@ -5322,8 +5408,9 @@ void dwc2_hcd_remove(struct dwc2_hsotg *hsotg)
        usb_remove_hcd(hcd);
        hsotg->priv = NULL;
 
-       kmem_cache_destroy(hsotg->desc_gen_cache);
+       kmem_cache_destroy(hsotg->unaligned_cache);
        kmem_cache_destroy(hsotg->desc_hsisoc_cache);
+       kmem_cache_destroy(hsotg->desc_gen_cache);
 
        dwc2_hcd_release(hsotg);
        usb_put_hcd(hcd);
@@ -5435,7 +5522,7 @@ int dwc2_host_enter_hibernation(struct dwc2_hsotg *hsotg)
        dwc2_writel(hprt0, hsotg->regs + HPRT0);
 
        /* Wait for the HPRT0.PrtSusp register field to be set */
-       if (dwc2_hsotg_wait_bit_set(hsotg, HPRT0, HPRT0_SUSP, 300))
+       if (dwc2_hsotg_wait_bit_set(hsotg, HPRT0, HPRT0_SUSP, 3000))
                dev_warn(hsotg->dev, "Suspend wasn't generated\n");
 
        /*
@@ -5616,6 +5703,8 @@ int dwc2_host_exit_hibernation(struct dwc2_hsotg *hsotg, int rem_wakeup,
                return ret;
        }
 
+       dwc2_hcd_rem_wakeup(hsotg);
+
        hsotg->hibernated = 0;
        hsotg->bus_suspended = 0;
        hsotg->lx_state = DWC2_L0;
index 7db1ee7e7a7781c12100d413abe7e4d62828e951..5502a501f5166640a2926132e4d4e1ee2f450724 100644 (file)
@@ -76,6 +76,8 @@ struct dwc2_qh;
  *                      (micro)frame
  * @xfer_buf:           Pointer to current transfer buffer position
  * @xfer_dma:           DMA address of xfer_buf
+ * @align_buf:          In Buffer DMA mode this will be used if xfer_buf is not
+ *                      DWORD aligned
  * @xfer_len:           Total number of bytes to transfer
  * @xfer_count:         Number of bytes transferred so far
  * @start_pkt_count:    Packet count at start of transfer
@@ -133,6 +135,7 @@ struct dwc2_host_chan {
 
        u8 *xfer_buf;
        dma_addr_t xfer_dma;
+       dma_addr_t align_buf;
        u32 xfer_len;
        u32 xfer_count;
        u16 start_pkt_count;
@@ -302,6 +305,9 @@ struct dwc2_hs_transfer_time {
  *                           speed.  Note that this is in "schedule slice" which
  *                           is tightly packed.
  * @ntd:                Actual number of transfer descriptors in a list
+ * @dw_align_buf:       Used instead of original buffer if its physical address
+ *                      is not dword-aligned
+ * @dw_align_buf_dma:   DMA address for dw_align_buf
  * @qtd_list:           List of QTDs for this QH
  * @channel:            Host channel currently processing transfers for this QH
  * @qh_list_entry:      Entry for QH in either the periodic or non-periodic
@@ -350,6 +356,8 @@ struct dwc2_qh {
        struct dwc2_hs_transfer_time hs_transfers[DWC2_HS_SCHEDULE_UFRAMES];
        u32 ls_start_schedule_slice;
        u16 ntd;
+       u8 *dw_align_buf;
+       dma_addr_t dw_align_buf_dma;
        struct list_head qtd_list;
        struct dwc2_host_chan *channel;
        struct list_head qh_list_entry;
index fbea5e3fb9479bc4ff4ef250026df2583969ec67..8ce10caf3e1981fa7b5132d44a3aa760c5252d0f 100644 (file)
@@ -942,14 +942,21 @@ static int dwc2_xfercomp_isoc_split_in(struct dwc2_hsotg *hsotg,
        frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index];
        len = dwc2_get_actual_xfer_length(hsotg, chan, chnum, qtd,
                                          DWC2_HC_XFER_COMPLETE, NULL);
-       if (!len) {
+       if (!len && !qtd->isoc_split_offset) {
                qtd->complete_split = 0;
-               qtd->isoc_split_offset = 0;
                return 0;
        }
 
        frame_desc->actual_length += len;
 
+       if (chan->align_buf) {
+               dev_vdbg(hsotg->dev, "non-aligned buffer\n");
+               dma_unmap_single(hsotg->dev, chan->qh->dw_align_buf_dma,
+                                DWC2_KMEM_UNALIGNED_BUF_SIZE, DMA_FROM_DEVICE);
+               memcpy(qtd->urb->buf + (chan->xfer_dma - qtd->urb->dma),
+                      chan->qh->dw_align_buf, len);
+       }
+
        qtd->isoc_split_offset += len;
 
        hctsiz = dwc2_readl(hsotg->regs + HCTSIZ(chnum));
@@ -1224,7 +1231,10 @@ static void dwc2_hc_nak_intr(struct dwc2_hsotg *hsotg,
         * avoid interrupt storms we'll wait before retrying if we've got
         * several NAKs. If we didn't do this we'd retry directly from the
         * interrupt handler and could end up quickly getting another
-        * interrupt (another NAK), which we'd retry.
+        * interrupt (another NAK), which we'd retry. Note that we do not
+        * delay retries for IN parts of control requests, as those are expected
+        * to complete fairly quickly, and if we delay them we risk confusing
+        * the device and cause it issue STALL.
         *
         * Note that in DMA mode software only gets involved to re-send NAKed
         * transfers for split transactions, so we only need to apply this
@@ -1237,7 +1247,9 @@ static void dwc2_hc_nak_intr(struct dwc2_hsotg *hsotg,
                        qtd->error_count = 0;
                qtd->complete_split = 0;
                qtd->num_naks++;
-               qtd->qh->want_wait = qtd->num_naks >= DWC2_NAKS_BEFORE_DELAY;
+               qtd->qh->want_wait = qtd->num_naks >= DWC2_NAKS_BEFORE_DELAY &&
+                               !(chan->ep_type == USB_ENDPOINT_XFER_CONTROL &&
+                                 chan->ep_is_in);
                dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_NAK);
                goto handle_nak_done;
        }
index d7c3d6c776d86a8edf5c41832a7856cd6fca29eb..301ced1618f873203b534ffa77cf7ef59a773f84 100644 (file)
@@ -383,7 +383,7 @@ static unsigned long *dwc2_get_ls_map(struct dwc2_hsotg *hsotg,
        /* Get the map and adjust if this is a multi_tt hub */
        map = qh->dwc_tt->periodic_bitmaps;
        if (qh->dwc_tt->usb_tt->multi)
-               map += DWC2_ELEMENTS_PER_LS_BITMAP * qh->ttport;
+               map += DWC2_ELEMENTS_PER_LS_BITMAP * (qh->ttport - 1);
 
        return map;
 }
@@ -1696,6 +1696,9 @@ void dwc2_hcd_qh_free(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
 
        if (qh->desc_list)
                dwc2_hcd_qh_free_ddma(hsotg, qh);
+       else if (hsotg->unaligned_cache && qh->dw_align_buf)
+               kmem_cache_free(hsotg->unaligned_cache, qh->dw_align_buf);
+
        kfree(qh);
 }
 
index ea91310113b9abd2a233a17bcd973d7a1ed1e09e..103807587dc640a747d75339f703a3aea0e8e992 100644 (file)
@@ -1272,7 +1272,6 @@ static int dwc3_probe(struct platform_device *pdev)
        if (!dwc->clks)
                return -ENOMEM;
 
-       dwc->num_clks = ARRAY_SIZE(dwc3_core_clks);
        dwc->dev = dev;
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1307,15 +1306,19 @@ static int dwc3_probe(struct platform_device *pdev)
        if (IS_ERR(dwc->reset))
                return PTR_ERR(dwc->reset);
 
-       ret = clk_bulk_get(dev, dwc->num_clks, dwc->clks);
-       if (ret == -EPROBE_DEFER)
-               return ret;
-       /*
-        * Clocks are optional, but new DT platforms should support all clocks
-        * as required by the DT-binding.
-        */
-       if (ret)
-               dwc->num_clks = 0;
+       if (dev->of_node) {
+               dwc->num_clks = ARRAY_SIZE(dwc3_core_clks);
+
+               ret = clk_bulk_get(dev, dwc->num_clks, dwc->clks);
+               if (ret == -EPROBE_DEFER)
+                       return ret;
+               /*
+                * Clocks are optional, but new DT platforms should support all
+                * clocks as required by the DT-binding.
+                */
+               if (ret)
+                       dwc->num_clks = 0;
+       }
 
        ret = reset_control_deassert(dwc->reset);
        if (ret)
index 6b3ccd542bd76f6c40308a9df550ac2674673e45..dbeff5e6ad1461eea71a4cf9e562755cd50b1b17 100644 (file)
@@ -165,8 +165,9 @@ static int dwc3_of_simple_remove(struct platform_device *pdev)
 
        reset_control_put(simple->resets);
 
-       pm_runtime_put_sync(dev);
        pm_runtime_disable(dev);
+       pm_runtime_put_noidle(dev);
+       pm_runtime_set_suspended(dev);
 
        return 0;
 }
index c961a94d136b5248a5e242a6ab3370b22f3fa360..f57e7c94b8e5e0154ef430e3a0b3973d6ff84bd9 100644 (file)
@@ -34,6 +34,7 @@
 #define PCI_DEVICE_ID_INTEL_GLK                        0x31aa
 #define PCI_DEVICE_ID_INTEL_CNPLP              0x9dee
 #define PCI_DEVICE_ID_INTEL_CNPH               0xa36e
+#define PCI_DEVICE_ID_INTEL_ICLLP              0x34ee
 
 #define PCI_INTEL_BXT_DSM_GUID         "732b85d5-b7a7-4a1b-9ba0-4bbd00ffd511"
 #define PCI_INTEL_BXT_FUNC_PMU_PWR     4
@@ -289,6 +290,7 @@ static const struct pci_device_id dwc3_pci_id_table[] = {
        { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_GLK), },
        { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CNPLP), },
        { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CNPH), },
+       { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICLLP), },
        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_NL_USB), },
        {  }    /* Terminating Entry */
 };
index b0e67ab2f98cd09ba54eaabfdab1680e30783e63..a6d0203e40b6e048bfb736133321ef6c857c09ca 100644 (file)
@@ -490,6 +490,7 @@ static int dwc3_qcom_probe(struct platform_device *pdev)
        qcom->dwc3 = of_find_device_by_node(dwc3_np);
        if (!qcom->dwc3) {
                dev_err(&pdev->dev, "failed to get dwc3 platform device\n");
+               ret = -ENODEV;
                goto depopulate;
        }
 
@@ -547,8 +548,7 @@ static int dwc3_qcom_remove(struct platform_device *pdev)
        return 0;
 }
 
-#ifdef CONFIG_PM_SLEEP
-static int dwc3_qcom_pm_suspend(struct device *dev)
+static int __maybe_unused dwc3_qcom_pm_suspend(struct device *dev)
 {
        struct dwc3_qcom *qcom = dev_get_drvdata(dev);
        int ret = 0;
@@ -560,7 +560,7 @@ static int dwc3_qcom_pm_suspend(struct device *dev)
        return ret;
 }
 
-static int dwc3_qcom_pm_resume(struct device *dev)
+static int __maybe_unused dwc3_qcom_pm_resume(struct device *dev)
 {
        struct dwc3_qcom *qcom = dev_get_drvdata(dev);
        int ret;
@@ -571,23 +571,20 @@ static int dwc3_qcom_pm_resume(struct device *dev)
 
        return ret;
 }
-#endif
 
-#ifdef CONFIG_PM
-static int dwc3_qcom_runtime_suspend(struct device *dev)
+static int __maybe_unused dwc3_qcom_runtime_suspend(struct device *dev)
 {
        struct dwc3_qcom *qcom = dev_get_drvdata(dev);
 
        return dwc3_qcom_suspend(qcom);
 }
 
-static int dwc3_qcom_runtime_resume(struct device *dev)
+static int __maybe_unused dwc3_qcom_runtime_resume(struct device *dev)
 {
        struct dwc3_qcom *qcom = dev_get_drvdata(dev);
 
        return dwc3_qcom_resume(qcom);
 }
-#endif
 
 static const struct dev_pm_ops dwc3_qcom_dev_pm_ops = {
        SET_SYSTEM_SLEEP_PM_OPS(dwc3_qcom_pm_suspend, dwc3_qcom_pm_resume)
index c77ff50a88a2c5c44910b02fa88189a23491fab2..8efde178eef4d55faeb91aa924bcc82e7b548a41 100644 (file)
@@ -973,15 +973,12 @@ static void __dwc3_ep0_do_control_data(struct dwc3 *dwc,
                ret = dwc3_ep0_start_trans(dep);
        } else if (IS_ALIGNED(req->request.length, dep->endpoint.maxpacket) &&
                   req->request.length && req->request.zero) {
-               u32     maxpacket;
 
                ret = usb_gadget_map_request_by_dev(dwc->sysdev,
                                &req->request, dep->number);
                if (ret)
                        return;
 
-               maxpacket = dep->endpoint.maxpacket;
-
                /* prepare normal TRB */
                dwc3_ep0_prepare_one_trb(dep, req->request.dma,
                                         req->request.length,
index f242c2bcea810c0dee04f067ceea0dc716912058..b8a15840b4ffd574430cdc5d0e57e74a3c116242 100644 (file)
@@ -1719,6 +1719,8 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
                 */
                if (w_value && !f->get_alt)
                        break;
+
+               spin_lock(&cdev->lock);
                value = f->set_alt(f, w_index, w_value);
                if (value == USB_GADGET_DELAYED_STATUS) {
                        DBG(cdev,
@@ -1728,6 +1730,7 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
                        DBG(cdev, "delayed_status count %d\n",
                                        cdev->delayed_status);
                }
+               spin_unlock(&cdev->lock);
                break;
        case USB_REQ_GET_INTERFACE:
                if (ctrl->bRequestType != (USB_DIR_IN|USB_RECIP_INTERFACE))
@@ -1816,7 +1819,6 @@ unknown:
                if (cdev->use_os_string && cdev->os_desc_config &&
                    (ctrl->bRequestType & USB_TYPE_VENDOR) &&
                    ctrl->bRequest == cdev->b_vendor_code) {
-                       struct usb_request              *req;
                        struct usb_configuration        *os_desc_cfg;
                        u8                              *buf;
                        int                             interface;
index dce9d12c7981afb1733be479e9daa43977bd218a..3ada83d81bda8d2810ab04d154b65658f57e0ba2 100644 (file)
@@ -215,6 +215,7 @@ struct ffs_io_data {
 
        struct mm_struct *mm;
        struct work_struct work;
+       struct work_struct cancellation_work;
 
        struct usb_ep *ep;
        struct usb_request *req;
@@ -1072,22 +1073,31 @@ ffs_epfile_open(struct inode *inode, struct file *file)
        return 0;
 }
 
+static void ffs_aio_cancel_worker(struct work_struct *work)
+{
+       struct ffs_io_data *io_data = container_of(work, struct ffs_io_data,
+                                                  cancellation_work);
+
+       ENTER();
+
+       usb_ep_dequeue(io_data->ep, io_data->req);
+}
+
 static int ffs_aio_cancel(struct kiocb *kiocb)
 {
        struct ffs_io_data *io_data = kiocb->private;
-       struct ffs_epfile *epfile = kiocb->ki_filp->private_data;
+       struct ffs_data *ffs = io_data->ffs;
        int value;
 
        ENTER();
 
-       spin_lock_irq(&epfile->ffs->eps_lock);
-
-       if (likely(io_data && io_data->ep && io_data->req))
-               value = usb_ep_dequeue(io_data->ep, io_data->req);
-       else
+       if (likely(io_data && io_data->ep && io_data->req)) {
+               INIT_WORK(&io_data->cancellation_work, ffs_aio_cancel_worker);
+               queue_work(ffs->io_completion_wq, &io_data->cancellation_work);
+               value = -EINPROGRESS;
+       } else {
                value = -EINVAL;
-
-       spin_unlock_irq(&epfile->ffs->eps_lock);
+       }
 
        return value;
 }
@@ -3253,7 +3263,7 @@ static int ffs_func_setup(struct usb_function *f,
        __ffs_event_add(ffs, FUNCTIONFS_SETUP);
        spin_unlock_irqrestore(&ffs->ev.waitq.lock, flags);
 
-       return USB_GADGET_DELAYED_STATUS;
+       return creq->wLength == 0 ? USB_GADGET_DELAYED_STATUS : 0;
 }
 
 static bool ffs_func_req_match(struct usb_function *f,
index d2dc1f00180b7869201afc1dbd82dcd4b126ab16..d582921f7257de4325f76102c7d86e6eda463645 100644 (file)
@@ -438,14 +438,14 @@ static struct usb_descriptor_header *hs_audio_desc[] = {
 };
 
 struct cntrl_cur_lay3 {
-       __u32   dCUR;
+       __le32  dCUR;
 };
 
 struct cntrl_range_lay3 {
-       __u16   wNumSubRanges;
-       __u32   dMIN;
-       __u32   dMAX;
-       __u32   dRES;
+       __le16  wNumSubRanges;
+       __le32  dMIN;
+       __le32  dMAX;
+       __le32  dRES;
 } __packed;
 
 static void set_ep_max_packet_size(const struct f_uac2_opts *uac2_opts,
@@ -559,13 +559,13 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
        agdev->out_ep = usb_ep_autoconfig(gadget, &fs_epout_desc);
        if (!agdev->out_ep) {
                dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
-               return ret;
+               return -ENODEV;
        }
 
        agdev->in_ep = usb_ep_autoconfig(gadget, &fs_epin_desc);
        if (!agdev->in_ep) {
                dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
-               return ret;
+               return -ENODEV;
        }
 
        agdev->in_ep_maxpsize = max_t(u16,
@@ -703,9 +703,9 @@ in_rq_cur(struct usb_function *fn, const struct usb_ctrlrequest *cr)
                memset(&c, 0, sizeof(struct cntrl_cur_lay3));
 
                if (entity_id == USB_IN_CLK_ID)
-                       c.dCUR = p_srate;
+                       c.dCUR = cpu_to_le32(p_srate);
                else if (entity_id == USB_OUT_CLK_ID)
-                       c.dCUR = c_srate;
+                       c.dCUR = cpu_to_le32(c_srate);
 
                value = min_t(unsigned, w_length, sizeof c);
                memcpy(req->buf, &c, value);
@@ -742,15 +742,15 @@ in_rq_range(struct usb_function *fn, const struct usb_ctrlrequest *cr)
 
        if (control_selector == UAC2_CS_CONTROL_SAM_FREQ) {
                if (entity_id == USB_IN_CLK_ID)
-                       r.dMIN = p_srate;
+                       r.dMIN = cpu_to_le32(p_srate);
                else if (entity_id == USB_OUT_CLK_ID)
-                       r.dMIN = c_srate;
+                       r.dMIN = cpu_to_le32(c_srate);
                else
                        return -EOPNOTSUPP;
 
                r.dMAX = r.dMIN;
                r.dRES = 0;
-               r.wNumSubRanges = 1;
+               r.wNumSubRanges = cpu_to_le16(1);
 
                value = min_t(unsigned, w_length, sizeof r);
                memcpy(req->buf, &r, value);
index a72295c953bba502468fb380c38601eaef6e4988..fb5ed97572e5fabe11609a3c01367fd26ba280c6 100644 (file)
@@ -32,9 +32,6 @@ struct uac_req {
 struct uac_rtd_params {
        struct snd_uac_chip *uac; /* parent chip */
        bool ep_enabled; /* if the ep is enabled */
-       /* Size of the ring buffer */
-       size_t dma_bytes;
-       unsigned char *dma_area;
 
        struct snd_pcm_substream *ss;
 
@@ -43,8 +40,6 @@ struct uac_rtd_params {
 
        void *rbuf;
 
-       size_t period_size;
-
        unsigned max_psize;     /* MaxPacketSize of endpoint */
        struct uac_req *ureq;
 
@@ -84,12 +79,12 @@ static const struct snd_pcm_hardware uac_pcm_hardware = {
 static void u_audio_iso_complete(struct usb_ep *ep, struct usb_request *req)
 {
        unsigned pending;
-       unsigned long flags;
+       unsigned long flags, flags2;
        unsigned int hw_ptr;
-       bool update_alsa = false;
        int status = req->status;
        struct uac_req *ur = req->context;
        struct snd_pcm_substream *substream;
+       struct snd_pcm_runtime *runtime;
        struct uac_rtd_params *prm = ur->pp;
        struct snd_uac_chip *uac = prm->uac;
 
@@ -111,6 +106,14 @@ static void u_audio_iso_complete(struct usb_ep *ep, struct usb_request *req)
        if (!substream)
                goto exit;
 
+       snd_pcm_stream_lock_irqsave(substream, flags2);
+
+       runtime = substream->runtime;
+       if (!runtime || !snd_pcm_running(substream)) {
+               snd_pcm_stream_unlock_irqrestore(substream, flags2);
+               goto exit;
+       }
+
        spin_lock_irqsave(&prm->lock, flags);
 
        if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
@@ -137,43 +140,46 @@ static void u_audio_iso_complete(struct usb_ep *ep, struct usb_request *req)
                req->actual = req->length;
        }
 
-       pending = prm->hw_ptr % prm->period_size;
-       pending += req->actual;
-       if (pending >= prm->period_size)
-               update_alsa = true;
-
        hw_ptr = prm->hw_ptr;
-       prm->hw_ptr = (prm->hw_ptr + req->actual) % prm->dma_bytes;
 
        spin_unlock_irqrestore(&prm->lock, flags);
 
        /* Pack USB load in ALSA ring buffer */
-       pending = prm->dma_bytes - hw_ptr;
+       pending = runtime->dma_bytes - hw_ptr;
 
        if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
                if (unlikely(pending < req->actual)) {
-                       memcpy(req->buf, prm->dma_area + hw_ptr, pending);
-                       memcpy(req->buf + pending, prm->dma_area,
+                       memcpy(req->buf, runtime->dma_area + hw_ptr, pending);
+                       memcpy(req->buf + pending, runtime->dma_area,
                               req->actual - pending);
                } else {
-                       memcpy(req->buf, prm->dma_area + hw_ptr, req->actual);
+                       memcpy(req->buf, runtime->dma_area + hw_ptr,
+                              req->actual);
                }
        } else {
                if (unlikely(pending < req->actual)) {
-                       memcpy(prm->dma_area + hw_ptr, req->buf, pending);
-                       memcpy(prm->dma_area, req->buf + pending,
+                       memcpy(runtime->dma_area + hw_ptr, req->buf, pending);
+                       memcpy(runtime->dma_area, req->buf + pending,
                               req->actual - pending);
                } else {
-                       memcpy(prm->dma_area + hw_ptr, req->buf, req->actual);
+                       memcpy(runtime->dma_area + hw_ptr, req->buf,
+                              req->actual);
                }
        }
 
+       spin_lock_irqsave(&prm->lock, flags);
+       /* update hw_ptr after data is copied to memory */
+       prm->hw_ptr = (hw_ptr + req->actual) % runtime->dma_bytes;
+       hw_ptr = prm->hw_ptr;
+       spin_unlock_irqrestore(&prm->lock, flags);
+       snd_pcm_stream_unlock_irqrestore(substream, flags2);
+
+       if ((hw_ptr % snd_pcm_lib_period_bytes(substream)) < req->actual)
+               snd_pcm_period_elapsed(substream);
+
 exit:
        if (usb_ep_queue(ep, req, GFP_ATOMIC))
                dev_err(uac->card->dev, "%d Error!\n", __LINE__);
-
-       if (update_alsa)
-               snd_pcm_period_elapsed(substream);
 }
 
 static int uac_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
@@ -236,40 +242,12 @@ static snd_pcm_uframes_t uac_pcm_pointer(struct snd_pcm_substream *substream)
 static int uac_pcm_hw_params(struct snd_pcm_substream *substream,
                               struct snd_pcm_hw_params *hw_params)
 {
-       struct snd_uac_chip *uac = snd_pcm_substream_chip(substream);
-       struct uac_rtd_params *prm;
-       int err;
-
-       if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
-               prm = &uac->p_prm;
-       else
-               prm = &uac->c_prm;
-
-       err = snd_pcm_lib_malloc_pages(substream,
+       return snd_pcm_lib_malloc_pages(substream,
                                        params_buffer_bytes(hw_params));
-       if (err >= 0) {
-               prm->dma_bytes = substream->runtime->dma_bytes;
-               prm->dma_area = substream->runtime->dma_area;
-               prm->period_size = params_period_bytes(hw_params);
-       }
-
-       return err;
 }
 
 static int uac_pcm_hw_free(struct snd_pcm_substream *substream)
 {
-       struct snd_uac_chip *uac = snd_pcm_substream_chip(substream);
-       struct uac_rtd_params *prm;
-
-       if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
-               prm = &uac->p_prm;
-       else
-               prm = &uac->c_prm;
-
-       prm->dma_area = NULL;
-       prm->dma_bytes = 0;
-       prm->period_size = 0;
-
        return snd_pcm_lib_free_pages(substream);
 }
 
@@ -595,15 +573,15 @@ int g_audio_setup(struct g_audio *g_audio, const char *pcm_name,
        if (err < 0)
                goto snd_fail;
 
-       strcpy(pcm->name, pcm_name);
+       strlcpy(pcm->name, pcm_name, sizeof(pcm->name));
        pcm->private_data = uac;
        uac->pcm = pcm;
 
        snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &uac_pcm_ops);
        snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &uac_pcm_ops);
 
-       strcpy(card->driver, card_name);
-       strcpy(card->shortname, card_name);
+       strlcpy(card->driver, card_name, sizeof(card->driver));
+       strlcpy(card->shortname, card_name, sizeof(card->shortname));
        sprintf(card->longname, "%s %i", card_name, card->dev->id);
 
        snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_CONTINUOUS,
index f0cdf89b850371e693db8fefc505efd90b72cd0d..83ba8a2eb6af9f95fe84a95e44904a20ada3c5ac 100644 (file)
@@ -2,6 +2,7 @@
 config USB_ASPEED_VHUB
        tristate "Aspeed vHub UDC driver"
        depends on ARCH_ASPEED || COMPILE_TEST
+       depends on USB_LIBCOMPOSITE
        help
          USB peripheral controller for the Aspeed AST2500 family
          SoCs supporting the "vHub" functionality and USB2.0
index 20ffb03ff6ac1823c366dcedc5ba0a2360ffeb34..e2927fb083cf14f3119fc71945a286176fa7990d 100644 (file)
@@ -108,6 +108,13 @@ void ast_vhub_ep0_handle_setup(struct ast_vhub_ep *ep)
        /* Check our state, cancel pending requests if needed */
        if (ep->ep0.state != ep0_state_token) {
                EPDBG(ep, "wrong state\n");
+               ast_vhub_nuke(ep, -EIO);
+
+               /*
+                * Accept the packet regardless, this seems to happen
+                * when stalling a SETUP packet that has an OUT data
+                * phase.
+                */
                ast_vhub_nuke(ep, 0);
                goto stall;
        }
@@ -212,6 +219,8 @@ static void ast_vhub_ep0_do_send(struct ast_vhub_ep *ep,
        if (chunk && req->req.buf)
                memcpy(ep->buf, req->req.buf + req->req.actual, chunk);
 
+       vhub_dma_workaround(ep->buf);
+
        /* Remember chunk size and trigger send */
        reg = VHUB_EP0_SET_TX_LEN(chunk);
        writel(reg, ep->ep0.ctlstat);
@@ -224,7 +233,7 @@ static void ast_vhub_ep0_rx_prime(struct ast_vhub_ep *ep)
        EPVDBG(ep, "rx prime\n");
 
        /* Prime endpoint for receiving data */
-       writel(VHUB_EP0_RX_BUFF_RDY, ep->ep0.ctlstat + AST_VHUB_EP0_CTRL);
+       writel(VHUB_EP0_RX_BUFF_RDY, ep->ep0.ctlstat);
 }
 
 static void ast_vhub_ep0_do_receive(struct ast_vhub_ep *ep, struct ast_vhub_req *req,
index 80c9feac5147b5450cf0be37ce6ed29c8b44f0bb..5939eb1e97f209bc43538155ed666aaebcd28eaf 100644 (file)
@@ -66,11 +66,16 @@ static void ast_vhub_epn_kick(struct ast_vhub_ep *ep, struct ast_vhub_req *req)
        if (!req->req.dma) {
 
                /* For IN transfers, copy data over first */
-               if (ep->epn.is_in)
+               if (ep->epn.is_in) {
                        memcpy(ep->buf, req->req.buf + act, chunk);
+                       vhub_dma_workaround(ep->buf);
+               }
                writel(ep->buf_dma, ep->epn.regs + AST_VHUB_EP_DESC_BASE);
-       } else
+       } else {
+               if (ep->epn.is_in)
+                       vhub_dma_workaround(req->req.buf);
                writel(req->req.dma + act, ep->epn.regs + AST_VHUB_EP_DESC_BASE);
+       }
 
        /* Start DMA */
        req->active = true;
@@ -161,6 +166,7 @@ static inline unsigned int ast_vhub_count_free_descs(struct ast_vhub_ep *ep)
 static void ast_vhub_epn_kick_desc(struct ast_vhub_ep *ep,
                                   struct ast_vhub_req *req)
 {
+       struct ast_vhub_desc *desc = NULL;
        unsigned int act = req->act_count;
        unsigned int len = req->req.length;
        unsigned int chunk;
@@ -177,7 +183,6 @@ static void ast_vhub_epn_kick_desc(struct ast_vhub_ep *ep,
 
        /* While we can create descriptors */
        while (ast_vhub_count_free_descs(ep) && req->last_desc < 0) {
-               struct ast_vhub_desc *desc;
                unsigned int d_num;
 
                /* Grab next free descriptor */
@@ -227,6 +232,9 @@ static void ast_vhub_epn_kick_desc(struct ast_vhub_ep *ep,
                req->act_count = act = act + chunk;
        }
 
+       if (likely(desc))
+               vhub_dma_workaround(desc);
+
        /* Tell HW about new descriptors */
        writel(VHUB_EP_DMA_SET_CPU_WPTR(ep->epn.d_next),
               ep->epn.regs + AST_VHUB_EP_DESC_STATUS);
index 2b040257bc1f698f0097a4d673ed6cb9fffe3d6b..4ed03d33a5a92b53f836d9d6930722aa5c418cfe 100644 (file)
@@ -462,6 +462,39 @@ enum std_req_rc {
 #define DDBG(d, fmt, ...)      do { } while(0)
 #endif
 
+static inline void vhub_dma_workaround(void *addr)
+{
+       /*
+        * This works around a confirmed HW issue with the Aspeed chip.
+        *
+        * The core uses a different bus to memory than the AHB going to
+        * the USB device controller. Due to the latter having a higher
+        * priority than the core for arbitration on that bus, it's
+        * possible for an MMIO to the device, followed by a DMA by the
+        * device from memory to all be performed and services before
+        * a previous store to memory gets completed.
+        *
+        * This the following scenario can happen:
+        *
+        *    - Driver writes to a DMA descriptor (Mbus)
+        *    - Driver writes to the MMIO register to start the DMA (AHB)
+        *    - The gadget sees the second write and sends a read of the
+        *      descriptor to the memory controller (Mbus)
+        *    - The gadget hits memory before the descriptor write
+        *      causing it to read an obsolete value.
+        *
+        * Thankfully the problem is limited to the USB gadget device, other
+        * masters in the SoC all have a lower priority than the core, thus
+        * ensuring that the store by the core arrives first.
+        *
+        * The workaround consists of using a dummy read of the memory before
+        * doing the MMIO writes. This will ensure that the previous writes
+        * have been "pushed out".
+        */
+       mb();
+       (void)__raw_readl((void __iomem *)addr);
+}
+
 /* core.c */
 void ast_vhub_done(struct ast_vhub_ep *ep, struct ast_vhub_req *req,
                   int status);
index a3ecce62662ba6cdc1e0f7ee65e90b2445537b56..11e25a3f4f1fa86ea42677d7efbbdd1f0f83e6b5 100644 (file)
@@ -832,11 +832,11 @@ static void init_controller(struct r8a66597 *r8a66597)
 
                r8a66597_bset(r8a66597, XCKE, SYSCFG0);
 
-               msleep(3);
+               mdelay(3);
 
                r8a66597_bset(r8a66597, PLLC, SYSCFG0);
 
-               msleep(1);
+               mdelay(1);
 
                r8a66597_bset(r8a66597, SCKE, SYSCFG0);
 
@@ -1190,7 +1190,7 @@ __acquires(r8a66597->lock)
        r8a66597->ep0_req->length = 2;
        /* AV: what happens if we get called again before that gets through? */
        spin_unlock(&r8a66597->lock);
-       r8a66597_queue(r8a66597->gadget.ep0, r8a66597->ep0_req, GFP_KERNEL);
+       r8a66597_queue(r8a66597->gadget.ep0, r8a66597->ep0_req, GFP_ATOMIC);
        spin_lock(&r8a66597->lock);
 }
 
index 1fbfd89d0a0f00945abec540a75c58582a3d1943..387f124a83340b5f27eb9cfd2e4bb0323fcbd954 100644 (file)
@@ -508,16 +508,18 @@ static int xhci_do_dbc_start(struct xhci_hcd *xhci)
        return 0;
 }
 
-static void xhci_do_dbc_stop(struct xhci_hcd *xhci)
+static int xhci_do_dbc_stop(struct xhci_hcd *xhci)
 {
        struct xhci_dbc         *dbc = xhci->dbc;
 
        if (dbc->state == DS_DISABLED)
-               return;
+               return -1;
 
        writel(0, &dbc->regs->control);
        xhci_dbc_mem_cleanup(xhci);
        dbc->state = DS_DISABLED;
+
+       return 0;
 }
 
 static int xhci_dbc_start(struct xhci_hcd *xhci)
@@ -544,6 +546,7 @@ static int xhci_dbc_start(struct xhci_hcd *xhci)
 
 static void xhci_dbc_stop(struct xhci_hcd *xhci)
 {
+       int ret;
        unsigned long           flags;
        struct xhci_dbc         *dbc = xhci->dbc;
        struct dbc_port         *port = &dbc->port;
@@ -556,10 +559,11 @@ static void xhci_dbc_stop(struct xhci_hcd *xhci)
                xhci_dbc_tty_unregister_device(xhci);
 
        spin_lock_irqsave(&dbc->lock, flags);
-       xhci_do_dbc_stop(xhci);
+       ret = xhci_do_dbc_stop(xhci);
        spin_unlock_irqrestore(&dbc->lock, flags);
 
-       pm_runtime_put_sync(xhci_to_hcd(xhci)->self.controller);
+       if (!ret)
+               pm_runtime_put_sync(xhci_to_hcd(xhci)->self.controller);
 }
 
 static void
index acbd3d7b8828693f79a51f19ad70fb1aa4ade050..ef350c33dc4a8615a0188af0cea13ae29a876532 100644 (file)
@@ -595,7 +595,7 @@ struct xhci_ring *xhci_stream_id_to_ring(
        if (!ep->stream_info)
                return NULL;
 
-       if (stream_id > ep->stream_info->num_streams)
+       if (stream_id >= ep->stream_info->num_streams)
                return NULL;
        return ep->stream_info->stream_rings[stream_id];
 }
@@ -886,12 +886,12 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
 
        dev = xhci->devs[slot_id];
 
-       trace_xhci_free_virt_device(dev);
-
        xhci->dcbaa->dev_context_ptrs[slot_id] = 0;
        if (!dev)
                return;
 
+       trace_xhci_free_virt_device(dev);
+
        if (dev->tt_info)
                old_active_eps = dev->tt_info->active_eps;
 
index a8c1d073cba05e3b070e73722d02b32eaf112e69..4b463e5202a421705be74610a136239dc3a9c423 100644 (file)
@@ -481,7 +481,7 @@ static void tegra_xusb_mbox_handle(struct tegra_xusb *tegra,
        unsigned long mask;
        unsigned int port;
        bool idle, enable;
-       int err;
+       int err = 0;
 
        memset(&rsp, 0, sizeof(rsp));
 
@@ -1223,10 +1223,10 @@ disable_rpm:
        pm_runtime_disable(&pdev->dev);
        usb_put_hcd(tegra->hcd);
 disable_xusbc:
-       if (!&pdev->dev.pm_domain)
+       if (!pdev->dev.pm_domain)
                tegra_powergate_power_off(TEGRA_POWERGATE_XUSBC);
 disable_xusba:
-       if (!&pdev->dev.pm_domain)
+       if (!pdev->dev.pm_domain)
                tegra_powergate_power_off(TEGRA_POWERGATE_XUSBA);
 put_padctl:
        tegra_xusb_padctl_put(tegra->padctl);
index 410544ffe78f68fa73e7328e8e105028a9116008..88b427434bd82536c653a911bb393c4bdb87814b 100644 (file)
@@ -171,6 +171,37 @@ DEFINE_EVENT(xhci_log_trb, xhci_dbc_gadget_ep_queue,
        TP_ARGS(ring, trb)
 );
 
+DECLARE_EVENT_CLASS(xhci_log_free_virt_dev,
+       TP_PROTO(struct xhci_virt_device *vdev),
+       TP_ARGS(vdev),
+       TP_STRUCT__entry(
+               __field(void *, vdev)
+               __field(unsigned long long, out_ctx)
+               __field(unsigned long long, in_ctx)
+               __field(u8, fake_port)
+               __field(u8, real_port)
+               __field(u16, current_mel)
+
+       ),
+       TP_fast_assign(
+               __entry->vdev = vdev;
+               __entry->in_ctx = (unsigned long long) vdev->in_ctx->dma;
+               __entry->out_ctx = (unsigned long long) vdev->out_ctx->dma;
+               __entry->fake_port = (u8) vdev->fake_port;
+               __entry->real_port = (u8) vdev->real_port;
+               __entry->current_mel = (u16) vdev->current_mel;
+               ),
+       TP_printk("vdev %p ctx %llx | %llx fake_port %d real_port %d current_mel %d",
+               __entry->vdev, __entry->in_ctx, __entry->out_ctx,
+               __entry->fake_port, __entry->real_port, __entry->current_mel
+       )
+);
+
+DEFINE_EVENT(xhci_log_free_virt_dev, xhci_free_virt_device,
+       TP_PROTO(struct xhci_virt_device *vdev),
+       TP_ARGS(vdev)
+);
+
 DECLARE_EVENT_CLASS(xhci_log_virt_dev,
        TP_PROTO(struct xhci_virt_device *vdev),
        TP_ARGS(vdev),
@@ -208,11 +239,6 @@ DEFINE_EVENT(xhci_log_virt_dev, xhci_alloc_virt_device,
        TP_ARGS(vdev)
 );
 
-DEFINE_EVENT(xhci_log_virt_dev, xhci_free_virt_device,
-       TP_PROTO(struct xhci_virt_device *vdev),
-       TP_ARGS(vdev)
-);
-
 DEFINE_EVENT(xhci_log_virt_dev, xhci_setup_device,
        TP_PROTO(struct xhci_virt_device *vdev),
        TP_ARGS(vdev)
index 8c8da2d657fa1008c1e612e6f30d3e9716d534b6..68e6132aa8b2a3985f01ac12c6bdd75882c4748d 100644 (file)
@@ -908,6 +908,41 @@ static void xhci_disable_port_wake_on_bits(struct xhci_hcd *xhci)
        spin_unlock_irqrestore(&xhci->lock, flags);
 }
 
+static bool xhci_pending_portevent(struct xhci_hcd *xhci)
+{
+       struct xhci_port        **ports;
+       int                     port_index;
+       u32                     status;
+       u32                     portsc;
+
+       status = readl(&xhci->op_regs->status);
+       if (status & STS_EINT)
+               return true;
+       /*
+        * Checking STS_EINT is not enough as there is a lag between a change
+        * bit being set and the Port Status Change Event that it generated
+        * being written to the Event Ring. See note in xhci 1.1 section 4.19.2.
+        */
+
+       port_index = xhci->usb2_rhub.num_ports;
+       ports = xhci->usb2_rhub.ports;
+       while (port_index--) {
+               portsc = readl(ports[port_index]->addr);
+               if (portsc & PORT_CHANGE_MASK ||
+                   (portsc & PORT_PLS_MASK) == XDEV_RESUME)
+                       return true;
+       }
+       port_index = xhci->usb3_rhub.num_ports;
+       ports = xhci->usb3_rhub.ports;
+       while (port_index--) {
+               portsc = readl(ports[port_index]->addr);
+               if (portsc & PORT_CHANGE_MASK ||
+                   (portsc & PORT_PLS_MASK) == XDEV_RESUME)
+                       return true;
+       }
+       return false;
+}
+
 /*
  * Stop HC (not bus-specific)
  *
@@ -1009,7 +1044,7 @@ EXPORT_SYMBOL_GPL(xhci_suspend);
  */
 int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
 {
-       u32                     command, temp = 0, status;
+       u32                     command, temp = 0;
        struct usb_hcd          *hcd = xhci_to_hcd(xhci);
        struct usb_hcd          *secondary_hcd;
        int                     retval = 0;
@@ -1043,8 +1078,13 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
                command = readl(&xhci->op_regs->command);
                command |= CMD_CRS;
                writel(command, &xhci->op_regs->command);
+               /*
+                * Some controllers take up to 55+ ms to complete the controller
+                * restore so setting the timeout to 100ms. Xhci specification
+                * doesn't mention any timeout value.
+                */
                if (xhci_handshake(&xhci->op_regs->status,
-                             STS_RESTORE, 0, 10 * 1000)) {
+                             STS_RESTORE, 0, 100 * 1000)) {
                        xhci_warn(xhci, "WARN: xHC restore state timeout\n");
                        spin_unlock_irq(&xhci->lock);
                        return -ETIMEDOUT;
@@ -1134,8 +1174,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
  done:
        if (retval == 0) {
                /* Resume root hubs only when have pending events. */
-               status = readl(&xhci->op_regs->status);
-               if (status & STS_EINT) {
+               if (xhci_pending_portevent(xhci)) {
                        usb_hcd_resume_root_hub(xhci->shared_hcd);
                        usb_hcd_resume_root_hub(hcd);
                }
@@ -3012,6 +3051,7 @@ static void xhci_endpoint_reset(struct usb_hcd *hcd,
        if (!list_empty(&ep->ring->td_list)) {
                dev_err(&udev->dev, "EP not empty, refuse reset\n");
                spin_unlock_irqrestore(&xhci->lock, flags);
+               xhci_free_command(xhci, cfg_cmd);
                goto cleanup;
        }
        xhci_queue_stop_endpoint(xhci, stop_cmd, udev->slot_id, ep_index, 0);
index 939e2f86b595eecbf1f1ecac7dcd7f39965d238d..841e89ffe2e9d88f6f81255340da58144916ca59 100644 (file)
@@ -382,6 +382,10 @@ struct xhci_op_regs {
 #define PORT_PLC       (1 << 22)
 /* port configure error change - port failed to configure its link partner */
 #define PORT_CEC       (1 << 23)
+#define PORT_CHANGE_MASK       (PORT_CSC | PORT_PEC | PORT_WRC | PORT_OCC | \
+                                PORT_RC | PORT_PLC | PORT_CEC)
+
+
 /* Cold Attach Status - xHC can set this bit to report device attached during
  * Sx state. Warm port reset should be perfomed to clear this bit and move port
  * to connected state.
index 8abb6cbbd98a17d6b6ff95d0ad88268832780b87..3be40eaa1ac9b2caf493a8fd21e8980982b5d9a1 100644 (file)
@@ -396,8 +396,7 @@ static ssize_t yurex_read(struct file *file, char __user *buffer, size_t count,
                          loff_t *ppos)
 {
        struct usb_yurex *dev;
-       int retval = 0;
-       int bytes_read = 0;
+       int len = 0;
        char in_buffer[20];
        unsigned long flags;
 
@@ -405,26 +404,16 @@ static ssize_t yurex_read(struct file *file, char __user *buffer, size_t count,
 
        mutex_lock(&dev->io_mutex);
        if (!dev->interface) {          /* already disconnected */
-               retval = -ENODEV;
-               goto exit;
+               mutex_unlock(&dev->io_mutex);
+               return -ENODEV;
        }
 
        spin_lock_irqsave(&dev->lock, flags);
-       bytes_read = snprintf(in_buffer, 20, "%lld\n", dev->bbu);
+       len = snprintf(in_buffer, 20, "%lld\n", dev->bbu);
        spin_unlock_irqrestore(&dev->lock, flags);
-
-       if (*ppos < bytes_read) {
-               if (copy_to_user(buffer, in_buffer + *ppos, bytes_read - *ppos))
-                       retval = -EFAULT;
-               else {
-                       retval = bytes_read - *ppos;
-                       *ppos += bytes_read;
-               }
-       }
-
-exit:
        mutex_unlock(&dev->io_mutex);
-       return retval;
+
+       return simple_read_from_buffer(buffer, count, ppos, in_buffer, len);
 }
 
 static ssize_t yurex_write(struct file *file, const char __user *user_buffer,
index 900875f326d7c2939f053f2227b4dec9942aa9ef..f7c96d209eda78a73b812685a931c59b53a1a6c8 100644 (file)
@@ -861,6 +861,7 @@ int usb_otg_start(struct platform_device *pdev)
        if (pdata->init && pdata->init(pdev) != 0)
                return -EINVAL;
 
+#ifdef CONFIG_PPC32
        if (pdata->big_endian_mmio) {
                _fsl_readl = _fsl_readl_be;
                _fsl_writel = _fsl_writel_be;
@@ -868,6 +869,7 @@ int usb_otg_start(struct platform_device *pdev)
                _fsl_readl = _fsl_readl_le;
                _fsl_writel = _fsl_writel_le;
        }
+#endif
 
        /* request irq */
        p_otg->irq = platform_get_irq(pdev, 0);
@@ -958,7 +960,7 @@ int usb_otg_start(struct platform_device *pdev)
 /*
  * state file in sysfs
  */
-static int show_fsl_usb2_otg_state(struct device *dev,
+static ssize_t show_fsl_usb2_otg_state(struct device *dev,
                                   struct device_attribute *attr, char *buf)
 {
        struct otg_fsm *fsm = &fsl_otg_dev->fsm;
index bdd7a5ad3bf1c0060bcef8a3a1dc640b4a45cbcc..3bb1fff02bedd0e076581baabeff440e4552e3ce 100644 (file)
@@ -128,7 +128,7 @@ static int ch341_control_in(struct usb_device *dev,
        r = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), request,
                            USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
                            value, index, buf, bufsize, DEFAULT_TIMEOUT);
-       if (r < bufsize) {
+       if (r < (int)bufsize) {
                if (r >= 0) {
                        dev_err(&dev->dev,
                                "short control message received (%d < %u)\n",
index eb6c26cbe5792b0e535c77b9e2e245b700071458..626a29d9aa58d7e13770f048ae8c705dcfab2fea 100644 (file)
@@ -95,6 +95,9 @@ static const struct usb_device_id id_table[] = {
        { USB_DEVICE(0x10C4, 0x8156) }, /* B&G H3000 link cable */
        { USB_DEVICE(0x10C4, 0x815E) }, /* Helicomm IP-Link 1220-DVM */
        { USB_DEVICE(0x10C4, 0x815F) }, /* Timewave HamLinkUSB */
+       { USB_DEVICE(0x10C4, 0x817C) }, /* CESINEL MEDCAL N Power Quality Monitor */
+       { USB_DEVICE(0x10C4, 0x817D) }, /* CESINEL MEDCAL NT Power Quality Monitor */
+       { USB_DEVICE(0x10C4, 0x817E) }, /* CESINEL MEDCAL S Power Quality Monitor */
        { USB_DEVICE(0x10C4, 0x818B) }, /* AVIT Research USB to TTL */
        { USB_DEVICE(0x10C4, 0x819F) }, /* MJS USB Toslink Switcher */
        { USB_DEVICE(0x10C4, 0x81A6) }, /* ThinkOptics WavIt */
@@ -112,6 +115,9 @@ static const struct usb_device_id id_table[] = {
        { USB_DEVICE(0x10C4, 0x826B) }, /* Cygnal Integrated Products, Inc., Fasttrax GPS demonstration module */
        { USB_DEVICE(0x10C4, 0x8281) }, /* Nanotec Plug & Drive */
        { USB_DEVICE(0x10C4, 0x8293) }, /* Telegesis ETRX2USB */
+       { USB_DEVICE(0x10C4, 0x82EF) }, /* CESINEL FALCO 6105 AC Power Supply */
+       { USB_DEVICE(0x10C4, 0x82F1) }, /* CESINEL MEDCAL EFD Earth Fault Detector */
+       { USB_DEVICE(0x10C4, 0x82F2) }, /* CESINEL MEDCAL ST Network Analyzer */
        { USB_DEVICE(0x10C4, 0x82F4) }, /* Starizona MicroTouch */
        { USB_DEVICE(0x10C4, 0x82F9) }, /* Procyon AVS */
        { USB_DEVICE(0x10C4, 0x8341) }, /* Siemens MC35PU GPRS Modem */
@@ -124,7 +130,9 @@ static const struct usb_device_id id_table[] = {
        { USB_DEVICE(0x10C4, 0x8470) }, /* Juniper Networks BX Series System Console */
        { USB_DEVICE(0x10C4, 0x8477) }, /* Balluff RFID */
        { USB_DEVICE(0x10C4, 0x84B6) }, /* Starizona Hyperion */
+       { USB_DEVICE(0x10C4, 0x851E) }, /* CESINEL MEDCAL PT Network Analyzer */
        { USB_DEVICE(0x10C4, 0x85A7) }, /* LifeScan OneTouch Verio IQ */
+       { USB_DEVICE(0x10C4, 0x85B8) }, /* CESINEL ReCon T Energy Logger */
        { USB_DEVICE(0x10C4, 0x85EA) }, /* AC-Services IBUS-IF */
        { USB_DEVICE(0x10C4, 0x85EB) }, /* AC-Services CIS-IBUS */
        { USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */
@@ -134,17 +142,24 @@ static const struct usb_device_id id_table[] = {
        { USB_DEVICE(0x10C4, 0x8857) }, /* CEL EM357 ZigBee USB Stick */
        { USB_DEVICE(0x10C4, 0x88A4) }, /* MMB Networks ZigBee USB Device */
        { USB_DEVICE(0x10C4, 0x88A5) }, /* Planet Innovation Ingeni ZigBee USB Device */
+       { USB_DEVICE(0x10C4, 0x88FB) }, /* CESINEL MEDCAL STII Network Analyzer */
+       { USB_DEVICE(0x10C4, 0x8938) }, /* CESINEL MEDCAL S II Network Analyzer */
        { USB_DEVICE(0x10C4, 0x8946) }, /* Ketra N1 Wireless Interface */
        { USB_DEVICE(0x10C4, 0x8962) }, /* Brim Brothers charging dock */
        { USB_DEVICE(0x10C4, 0x8977) }, /* CEL MeshWorks DevKit Device */
        { USB_DEVICE(0x10C4, 0x8998) }, /* KCF Technologies PRN */
+       { USB_DEVICE(0x10C4, 0x89A4) }, /* CESINEL FTBC Flexible Thyristor Bridge Controller */
+       { USB_DEVICE(0x10C4, 0x89FB) }, /* Qivicon ZigBee USB Radio Stick */
        { USB_DEVICE(0x10C4, 0x8A2A) }, /* HubZ dual ZigBee and Z-Wave dongle */
        { USB_DEVICE(0x10C4, 0x8A5E) }, /* CEL EM3588 ZigBee USB Stick Long Range */
        { USB_DEVICE(0x10C4, 0x8B34) }, /* Qivicon ZigBee USB Radio Stick */
        { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
        { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */
+       { USB_DEVICE(0x10C4, 0xEA63) }, /* Silicon Labs Windows Update (CP2101-4/CP2102N) */
        { USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */
        { USB_DEVICE(0x10C4, 0xEA71) }, /* Infinity GPS-MIC-1 Radio Monophone */
+       { USB_DEVICE(0x10C4, 0xEA7A) }, /* Silicon Labs Windows Update (CP2105) */
+       { USB_DEVICE(0x10C4, 0xEA7B) }, /* Silicon Labs Windows Update (CP2108) */
        { USB_DEVICE(0x10C4, 0xF001) }, /* Elan Digital Systems USBscope50 */
        { USB_DEVICE(0x10C4, 0xF002) }, /* Elan Digital Systems USBwave12 */
        { USB_DEVICE(0x10C4, 0xF003) }, /* Elan Digital Systems USBpulse100 */
index 5169624d8b11386ecb07d50234850ca99397150f..38d43c4b7ce547700e007f6f406d17f2c65d3ca4 100644 (file)
@@ -369,8 +369,10 @@ static int keyspan_pda_get_modem_info(struct usb_serial *serial,
                             3, /* get pins */
                             USB_TYPE_VENDOR|USB_RECIP_INTERFACE|USB_DIR_IN,
                             0, 0, data, 1, 2000);
-       if (rc >= 0)
+       if (rc == 1)
                *value = *data;
+       else if (rc >= 0)
+               rc = -EIO;
 
        kfree(data);
        return rc;
index fdceb46d9fc61a0c5eea2f113abd494dc4cc693b..b580b4c7fa488bbf80f1b5628e4e46bb8e28807b 100644 (file)
@@ -468,6 +468,9 @@ static void mos7840_control_callback(struct urb *urb)
        }
 
        dev_dbg(dev, "%s urb buffer size is %d\n", __func__, urb->actual_length);
+       if (urb->actual_length < 1)
+               goto out;
+
        dev_dbg(dev, "%s mos7840_port->MsrLsr is %d port %d\n", __func__,
                mos7840_port->MsrLsr, mos7840_port->port_num);
        data = urb->transfer_buffer;
index 8a201dd53d36b352b3d7fb68cf6486c08ddb96ff..d1d20252bad86889bb87b7f755545020abd6b0ea 100644 (file)
@@ -418,17 +418,18 @@ static void _tcpm_log(struct tcpm_port *port, const char *fmt, va_list args)
        u64 ts_nsec = local_clock();
        unsigned long rem_nsec;
 
+       mutex_lock(&port->logbuffer_lock);
        if (!port->logbuffer[port->logbuffer_head]) {
                port->logbuffer[port->logbuffer_head] =
                                kzalloc(LOG_BUFFER_ENTRY_SIZE, GFP_KERNEL);
-               if (!port->logbuffer[port->logbuffer_head])
+               if (!port->logbuffer[port->logbuffer_head]) {
+                       mutex_unlock(&port->logbuffer_lock);
                        return;
+               }
        }
 
        vsnprintf(tmpbuffer, sizeof(tmpbuffer), fmt, args);
 
-       mutex_lock(&port->logbuffer_lock);
-
        if (tcpm_log_full(port)) {
                port->logbuffer_head = max(port->logbuffer_head - 1, 0);
                strcpy(tmpbuffer, "overflow");
@@ -724,6 +725,9 @@ static int tcpm_set_current_limit(struct tcpm_port *port, u32 max_ma, u32 mv)
 
        tcpm_log(port, "Setting voltage/current limit %u mV %u mA", mv, max_ma);
 
+       port->supply_voltage = mv;
+       port->current_limit = max_ma;
+
        if (port->tcpc->set_current_limit)
                ret = port->tcpc->set_current_limit(port->tcpc, max_ma, mv);
 
@@ -2136,7 +2140,7 @@ static unsigned int tcpm_pd_select_pps_apdo(struct tcpm_port *port)
                         * PPS APDO. Again skip the first sink PDO as this will
                         * always be 5V 3A.
                         */
-                       for (j = i; j < port->nr_snk_pdo; j++) {
+                       for (j = 1; j < port->nr_snk_pdo; j++) {
                                pdo = port->snk_pdo[j];
 
                                switch (pdo_type(pdo)) {
@@ -2594,8 +2598,6 @@ static void tcpm_reset_port(struct tcpm_port *port)
        tcpm_set_attached_state(port, false);
        port->try_src_count = 0;
        port->try_snk_count = 0;
-       port->supply_voltage = 0;
-       port->current_limit = 0;
        port->usb_type = POWER_SUPPLY_USB_TYPE_C;
 
        power_supply_changed(port->psy);
@@ -3043,7 +3045,8 @@ static void run_state_machine(struct tcpm_port *port)
                    tcpm_port_is_sink(port) &&
                    time_is_after_jiffies(port->delayed_runtime)) {
                        tcpm_set_state(port, SNK_DISCOVERY,
-                                      port->delayed_runtime - jiffies);
+                                      jiffies_to_msecs(port->delayed_runtime -
+                                                       jiffies));
                        break;
                }
                tcpm_set_state(port, unattached_state(port), 0);
index bd5cca5632b395def6384ec233d8ba5926e81c93..8d0a6fe748bdc50ca99800c3d6ba5680a4e9f0bd 100644 (file)
@@ -350,6 +350,19 @@ static void ucsi_connector_change(struct work_struct *work)
        }
 
        if (con->status.change & UCSI_CONSTAT_CONNECT_CHANGE) {
+               typec_set_pwr_role(con->port, con->status.pwr_dir);
+
+               switch (con->status.partner_type) {
+               case UCSI_CONSTAT_PARTNER_TYPE_UFP:
+                       typec_set_data_role(con->port, TYPEC_HOST);
+                       break;
+               case UCSI_CONSTAT_PARTNER_TYPE_DFP:
+                       typec_set_data_role(con->port, TYPEC_DEVICE);
+                       break;
+               default:
+                       break;
+               }
+
                if (con->status.connected)
                        ucsi_register_partner(con);
                else
index 44eb4e1ea817b2e38eab36cee60021368508a342..a18112a83faed2df09e49c0a5a93d2fce0823c5f 100644 (file)
@@ -79,6 +79,11 @@ static int ucsi_acpi_probe(struct platform_device *pdev)
                return -ENODEV;
        }
 
+       /* This will make sure we can use ioremap_nocache() */
+       status = acpi_release_memory(ACPI_HANDLE(&pdev->dev), res, 1);
+       if (ACPI_FAILURE(status))
+               return -ENOMEM;
+
        /*
         * NOTE: The memory region for the data structures is used also in an
         * operation region, which means ACPI has already reserved it. Therefore
index 24ee2605b9f043c9c1128d73bd44a4aa47322a37..42dc1d3d71cf05a7c91c5316ee832b62b15bf75e 100644 (file)
@@ -28,5 +28,13 @@ config VFIO_PCI_INTX
        def_bool y if !S390
 
 config VFIO_PCI_IGD
-       depends on VFIO_PCI
-       def_bool y if X86
+       bool "VFIO PCI extensions for Intel graphics (GVT-d)"
+       depends on VFIO_PCI && X86
+       default y
+       help
+         Support for Intel IGD specific extensions to enable direct
+         assignment to virtual machines.  This includes exposing an IGD
+         specific firmware table and read-only copies of the host bridge
+         and LPC bridge config space.
+
+         To enable Intel IGD assignment through vfio-pci, say Y.
index b423a309a6e0d08930599cb1c0bebf5c5e08ab9a..125b58eff9369618e9a40398e089d8a0bee8deae 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/uaccess.h>
 #include <linux/vfio.h>
 #include <linux/vgaarb.h>
+#include <linux/nospec.h>
 
 #include "vfio_pci_private.h"
 
@@ -727,6 +728,9 @@ static long vfio_pci_ioctl(void *device_data,
                        if (info.index >=
                            VFIO_PCI_NUM_REGIONS + vdev->num_regions)
                                return -EINVAL;
+                       info.index = array_index_nospec(info.index,
+                                                       VFIO_PCI_NUM_REGIONS +
+                                                       vdev->num_regions);
 
                        i = info.index - VFIO_PCI_NUM_REGIONS;
 
index 759a5bdd40e1b37305f4fd850b60a87f1d3aafae..7cd63b0c1a4623edd236458cbf047cd472fa4b02 100644 (file)
@@ -457,17 +457,17 @@ static void tce_iommu_unuse_page(struct tce_container *container,
 }
 
 static int tce_iommu_prereg_ua_to_hpa(struct tce_container *container,
-               unsigned long tce, unsigned long size,
+               unsigned long tce, unsigned long shift,
                unsigned long *phpa, struct mm_iommu_table_group_mem_t **pmem)
 {
        long ret = 0;
        struct mm_iommu_table_group_mem_t *mem;
 
-       mem = mm_iommu_lookup(container->mm, tce, size);
+       mem = mm_iommu_lookup(container->mm, tce, 1ULL << shift);
        if (!mem)
                return -EINVAL;
 
-       ret = mm_iommu_ua_to_hpa(mem, tce, phpa);
+       ret = mm_iommu_ua_to_hpa(mem, tce, shift, phpa);
        if (ret)
                return -EINVAL;
 
@@ -487,7 +487,7 @@ static void tce_iommu_unuse_page_v2(struct tce_container *container,
        if (!pua)
                return;
 
-       ret = tce_iommu_prereg_ua_to_hpa(container, *pua, IOMMU_PAGE_SIZE(tbl),
+       ret = tce_iommu_prereg_ua_to_hpa(container, *pua, tbl->it_page_shift,
                        &hpa, &mem);
        if (ret)
                pr_debug("%s: tce %lx at #%lx was not cached, ret=%d\n",
@@ -611,7 +611,7 @@ static long tce_iommu_build_v2(struct tce_container *container,
                                entry + i);
 
                ret = tce_iommu_prereg_ua_to_hpa(container,
-                               tce, IOMMU_PAGE_SIZE(tbl), &hpa, &mem);
+                               tce, tbl->it_page_shift, &hpa, &mem);
                if (ret)
                        break;
 
index 2c75b33db4ac19768ea77415685b4fac700dc4d3..3e5b17710a4f1fa47eb4f1333c17c96e1eae2cdd 100644 (file)
@@ -343,18 +343,16 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr,
        struct page *page[1];
        struct vm_area_struct *vma;
        struct vm_area_struct *vmas[1];
+       unsigned int flags = 0;
        int ret;
 
+       if (prot & IOMMU_WRITE)
+               flags |= FOLL_WRITE;
+
+       down_read(&mm->mmap_sem);
        if (mm == current->mm) {
-               ret = get_user_pages_longterm(vaddr, 1, !!(prot & IOMMU_WRITE),
-                                             page, vmas);
+               ret = get_user_pages_longterm(vaddr, 1, flags, page, vmas);
        } else {
-               unsigned int flags = 0;
-
-               if (prot & IOMMU_WRITE)
-                       flags |= FOLL_WRITE;
-
-               down_read(&mm->mmap_sem);
                ret = get_user_pages_remote(NULL, mm, vaddr, 1, flags, page,
                                            vmas, NULL);
                /*
@@ -368,8 +366,8 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr,
                        ret = -EOPNOTSUPP;
                        put_page(page[0]);
                }
-               up_read(&mm->mmap_sem);
        }
+       up_read(&mm->mmap_sem);
 
        if (ret == 1) {
                *pfn = page_to_pfn(page[0]);
index 686dc670fd294b3077cf363241338ab871b26244..29756d88799b630f2c73ca097b56b092a14a7d5a 100644 (file)
@@ -1226,7 +1226,8 @@ err_used:
        if (ubufs)
                vhost_net_ubuf_put_wait_and_free(ubufs);
 err_ubufs:
-       sockfd_put(sock);
+       if (sock)
+               sockfd_put(sock);
 err_vq:
        mutex_unlock(&vq->mutex);
 err:
index a502f1af4a213607adec4aa28fa6ae8eb9ce0389..ed3114556fdaf96eb130832c54a80cc4c41973b7 100644 (file)
@@ -1560,9 +1560,12 @@ int vhost_init_device_iotlb(struct vhost_dev *d, bool enabled)
        d->iotlb = niotlb;
 
        for (i = 0; i < d->nvqs; ++i) {
-               mutex_lock(&d->vqs[i]->mutex);
-               d->vqs[i]->iotlb = niotlb;
-               mutex_unlock(&d->vqs[i]->mutex);
+               struct vhost_virtqueue *vq = d->vqs[i];
+
+               mutex_lock(&vq->mutex);
+               vq->iotlb = niotlb;
+               __vhost_vq_meta_reset(vq);
+               mutex_unlock(&vq->mutex);
        }
 
        vhost_umem_clean(oiotlb);
index 46a4484e3da79196b32c06cfbd3d0f092d03147c..c6f78d27947b9dae5f00b148c325198956910095 100644 (file)
@@ -20,7 +20,7 @@
 #include <drm/drm_connector.h>  /* For DRM_MODE_PANEL_ORIENTATION_* */
 
 static bool request_mem_succeeded = false;
-static bool nowc = false;
+static u64 mem_flags = EFI_MEMORY_WC | EFI_MEMORY_UC;
 
 static struct fb_var_screeninfo efifb_defined = {
        .activate               = FB_ACTIVATE_NOW,
@@ -68,8 +68,12 @@ static int efifb_setcolreg(unsigned regno, unsigned red, unsigned green,
 
 static void efifb_destroy(struct fb_info *info)
 {
-       if (info->screen_base)
-               iounmap(info->screen_base);
+       if (info->screen_base) {
+               if (mem_flags & (EFI_MEMORY_UC | EFI_MEMORY_WC))
+                       iounmap(info->screen_base);
+               else
+                       memunmap(info->screen_base);
+       }
        if (request_mem_succeeded)
                release_mem_region(info->apertures->ranges[0].base,
                                   info->apertures->ranges[0].size);
@@ -104,7 +108,7 @@ static int efifb_setup(char *options)
                        else if (!strncmp(this_opt, "width:", 6))
                                screen_info.lfb_width = simple_strtoul(this_opt+6, NULL, 0);
                        else if (!strcmp(this_opt, "nowc"))
-                               nowc = true;
+                               mem_flags &= ~EFI_MEMORY_WC;
                }
        }
 
@@ -164,6 +168,7 @@ static int efifb_probe(struct platform_device *dev)
        unsigned int size_remap;
        unsigned int size_total;
        char *option = NULL;
+       efi_memory_desc_t md;
 
        if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI || pci_dev_disabled)
                return -ENODEV;
@@ -272,12 +277,35 @@ static int efifb_probe(struct platform_device *dev)
        info->apertures->ranges[0].base = efifb_fix.smem_start;
        info->apertures->ranges[0].size = size_remap;
 
-       if (nowc)
-               info->screen_base = ioremap(efifb_fix.smem_start, efifb_fix.smem_len);
-       else
-               info->screen_base = ioremap_wc(efifb_fix.smem_start, efifb_fix.smem_len);
+       if (!efi_mem_desc_lookup(efifb_fix.smem_start, &md)) {
+               if ((efifb_fix.smem_start + efifb_fix.smem_len) >
+                   (md.phys_addr + (md.num_pages << EFI_PAGE_SHIFT))) {
+                       pr_err("efifb: video memory @ 0x%lx spans multiple EFI memory regions\n",
+                              efifb_fix.smem_start);
+                       err = -EIO;
+                       goto err_release_fb;
+               }
+               /*
+                * If the UEFI memory map covers the efifb region, we may only
+                * remap it using the attributes the memory map prescribes.
+                */
+               mem_flags |= EFI_MEMORY_WT | EFI_MEMORY_WB;
+               mem_flags &= md.attribute;
+       }
+       if (mem_flags & EFI_MEMORY_WC)
+               info->screen_base = ioremap_wc(efifb_fix.smem_start,
+                                              efifb_fix.smem_len);
+       else if (mem_flags & EFI_MEMORY_UC)
+               info->screen_base = ioremap(efifb_fix.smem_start,
+                                           efifb_fix.smem_len);
+       else if (mem_flags & EFI_MEMORY_WT)
+               info->screen_base = memremap(efifb_fix.smem_start,
+                                            efifb_fix.smem_len, MEMREMAP_WT);
+       else if (mem_flags & EFI_MEMORY_WB)
+               info->screen_base = memremap(efifb_fix.smem_start,
+                                            efifb_fix.smem_len, MEMREMAP_WB);
        if (!info->screen_base) {
-               pr_err("efifb: abort, cannot ioremap video memory 0x%x @ 0x%lx\n",
+               pr_err("efifb: abort, cannot remap video memory 0x%x @ 0x%lx\n",
                        efifb_fix.smem_len, efifb_fix.smem_start);
                err = -EIO;
                goto err_release_fb;
@@ -371,7 +399,10 @@ err_fb_dealoc:
 err_groups:
        sysfs_remove_groups(&dev->dev.kobj, efifb_groups);
 err_unmap:
-       iounmap(info->screen_base);
+       if (mem_flags & (EFI_MEMORY_UC | EFI_MEMORY_WC))
+               iounmap(info->screen_base);
+       else
+               memunmap(info->screen_base);
 err_release_fb:
        framebuffer_release(info);
 err_release_mem:
index 6b237e3f4983046cdc8327b75c173952fbc23f86..3988c0914322134cc1de978d3aedca96475b64ba 100644 (file)
@@ -513,7 +513,9 @@ static int virtballoon_migratepage(struct balloon_dev_info *vb_dev_info,
        tell_host(vb, vb->inflate_vq);
 
        /* balloon's page migration 2nd step -- deflate "page" */
+       spin_lock_irqsave(&vb_dev_info->pages_lock, flags);
        balloon_page_delete(page);
+       spin_unlock_irqrestore(&vb_dev_info->pages_lock, flags);
        vb->num_pfns = VIRTIO_BALLOON_PAGES_PER_PAGE;
        set_page_pfns(vb, vb->pfns, page);
        tell_host(vb, vb->deflate_vq);
index 451e833f593175886fd4fa6ae066860dad4cf95e..48b154276179f0269c7444e38d6717ac493a06d5 100644 (file)
@@ -41,4 +41,4 @@ obj-$(CONFIG_XEN_PVCALLS_FRONTEND)    += pvcalls-front.o
 xen-evtchn-y                           := evtchn.o
 xen-gntdev-y                           := gntdev.o
 xen-gntalloc-y                         := gntalloc.o
-xen-privcmd-y                          := privcmd.o
+xen-privcmd-y                          := privcmd.o privcmd-buf.o
index 762378f1811cc9069dc6171edb55aaa3610b82fa..08e4af04d6f2c32850a049a83721933a82883b8c 100644 (file)
@@ -628,8 +628,6 @@ static void __unbind_from_irq(unsigned int irq)
                xen_irq_info_cleanup(info);
        }
 
-       BUG_ON(info_for_irq(irq)->type == IRQT_UNBOUND);
-
        xen_free_irq(irq);
 }
 
index 2473b0a9e6e41d5d51b47e318d7e3b26d81ec5f6..ba9f3eec2bd00f6f39eb952ed5815e7b45c9735e 100644 (file)
@@ -799,7 +799,7 @@ int gnttab_alloc_pages(int nr_pages, struct page **pages)
 
        return 0;
 }
-EXPORT_SYMBOL(gnttab_alloc_pages);
+EXPORT_SYMBOL_GPL(gnttab_alloc_pages);
 
 /**
  * gnttab_free_pages - free pages allocated by gnttab_alloc_pages()
@@ -820,7 +820,7 @@ void gnttab_free_pages(int nr_pages, struct page **pages)
        }
        free_xenballooned_pages(nr_pages, pages);
 }
-EXPORT_SYMBOL(gnttab_free_pages);
+EXPORT_SYMBOL_GPL(gnttab_free_pages);
 
 /* Handling of paged out grant targets (GNTST_eagain) */
 #define MAX_DELAY 256
index 8835065029d34a150a91662bb4562b4a41be50ca..c93d8ef8df3483bbc393b2101c189120f844b634 100644 (file)
@@ -289,8 +289,15 @@ static void sysrq_handler(struct xenbus_watch *watch, const char *path,
                return;
        }
 
-       if (sysrq_key != '\0')
-               xenbus_printf(xbt, "control", "sysrq", "%c", '\0');
+       if (sysrq_key != '\0') {
+               err = xenbus_printf(xbt, "control", "sysrq", "%c", '\0');
+               if (err) {
+                       pr_err("%s: Error %d writing sysrq in control/sysrq\n",
+                              __func__, err);
+                       xenbus_transaction_end(xbt, 1);
+                       return;
+               }
+       }
 
        err = xenbus_transaction_end(xbt, 0);
        if (err == -EAGAIN)
@@ -342,7 +349,12 @@ static int setup_shutdown_watcher(void)
                        continue;
                snprintf(node, FEATURE_PATH_SIZE, "feature-%s",
                         shutdown_handlers[idx].command);
-               xenbus_printf(XBT_NIL, "control", node, "%u", 1);
+               err = xenbus_printf(XBT_NIL, "control", node, "%u", 1);
+               if (err) {
+                       pr_err("%s: Error %d writing %s\n", __func__,
+                               err, node);
+                       return err;
+               }
        }
 
        return 0;
diff --git a/drivers/xen/privcmd-buf.c b/drivers/xen/privcmd-buf.c
new file mode 100644 (file)
index 0000000..df1ed37
--- /dev/null
@@ -0,0 +1,210 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+
+/******************************************************************************
+ * privcmd-buf.c
+ *
+ * Mmap of hypercall buffers.
+ *
+ * Copyright (c) 2018 Juergen Gross
+ */
+
+#define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/miscdevice.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+
+#include "privcmd.h"
+
+MODULE_LICENSE("GPL");
+
+static unsigned int limit = 64;
+module_param(limit, uint, 0644);
+MODULE_PARM_DESC(limit, "Maximum number of pages that may be allocated by "
+                       "the privcmd-buf device per open file");
+
+struct privcmd_buf_private {
+       struct mutex lock;
+       struct list_head list;
+       unsigned int allocated;
+};
+
+struct privcmd_buf_vma_private {
+       struct privcmd_buf_private *file_priv;
+       struct list_head list;
+       unsigned int users;
+       unsigned int n_pages;
+       struct page *pages[];
+};
+
+static int privcmd_buf_open(struct inode *ino, struct file *file)
+{
+       struct privcmd_buf_private *file_priv;
+
+       file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
+       if (!file_priv)
+               return -ENOMEM;
+
+       mutex_init(&file_priv->lock);
+       INIT_LIST_HEAD(&file_priv->list);
+
+       file->private_data = file_priv;
+
+       return 0;
+}
+
+static void privcmd_buf_vmapriv_free(struct privcmd_buf_vma_private *vma_priv)
+{
+       unsigned int i;
+
+       vma_priv->file_priv->allocated -= vma_priv->n_pages;
+
+       list_del(&vma_priv->list);
+
+       for (i = 0; i < vma_priv->n_pages; i++)
+               if (vma_priv->pages[i])
+                       __free_page(vma_priv->pages[i]);
+
+       kfree(vma_priv);
+}
+
+static int privcmd_buf_release(struct inode *ino, struct file *file)
+{
+       struct privcmd_buf_private *file_priv = file->private_data;
+       struct privcmd_buf_vma_private *vma_priv;
+
+       mutex_lock(&file_priv->lock);
+
+       while (!list_empty(&file_priv->list)) {
+               vma_priv = list_first_entry(&file_priv->list,
+                                           struct privcmd_buf_vma_private,
+                                           list);
+               privcmd_buf_vmapriv_free(vma_priv);
+       }
+
+       mutex_unlock(&file_priv->lock);
+
+       kfree(file_priv);
+
+       return 0;
+}
+
+static void privcmd_buf_vma_open(struct vm_area_struct *vma)
+{
+       struct privcmd_buf_vma_private *vma_priv = vma->vm_private_data;
+
+       if (!vma_priv)
+               return;
+
+       mutex_lock(&vma_priv->file_priv->lock);
+       vma_priv->users++;
+       mutex_unlock(&vma_priv->file_priv->lock);
+}
+
+static void privcmd_buf_vma_close(struct vm_area_struct *vma)
+{
+       struct privcmd_buf_vma_private *vma_priv = vma->vm_private_data;
+       struct privcmd_buf_private *file_priv;
+
+       if (!vma_priv)
+               return;
+
+       file_priv = vma_priv->file_priv;
+
+       mutex_lock(&file_priv->lock);
+
+       vma_priv->users--;
+       if (!vma_priv->users)
+               privcmd_buf_vmapriv_free(vma_priv);
+
+       mutex_unlock(&file_priv->lock);
+}
+
+static vm_fault_t privcmd_buf_vma_fault(struct vm_fault *vmf)
+{
+       pr_debug("fault: vma=%p %lx-%lx, pgoff=%lx, uv=%p\n",
+                vmf->vma, vmf->vma->vm_start, vmf->vma->vm_end,
+                vmf->pgoff, (void *)vmf->address);
+
+       return VM_FAULT_SIGBUS;
+}
+
+static const struct vm_operations_struct privcmd_buf_vm_ops = {
+       .open = privcmd_buf_vma_open,
+       .close = privcmd_buf_vma_close,
+       .fault = privcmd_buf_vma_fault,
+};
+
+static int privcmd_buf_mmap(struct file *file, struct vm_area_struct *vma)
+{
+       struct privcmd_buf_private *file_priv = file->private_data;
+       struct privcmd_buf_vma_private *vma_priv;
+       unsigned long count = vma_pages(vma);
+       unsigned int i;
+       int ret = 0;
+
+       if (!(vma->vm_flags & VM_SHARED) || count > limit ||
+           file_priv->allocated + count > limit)
+               return -EINVAL;
+
+       vma_priv = kzalloc(sizeof(*vma_priv) + count * sizeof(void *),
+                          GFP_KERNEL);
+       if (!vma_priv)
+               return -ENOMEM;
+
+       vma_priv->n_pages = count;
+       count = 0;
+       for (i = 0; i < vma_priv->n_pages; i++) {
+               vma_priv->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO);
+               if (!vma_priv->pages[i])
+                       break;
+               count++;
+       }
+
+       mutex_lock(&file_priv->lock);
+
+       file_priv->allocated += count;
+
+       vma_priv->file_priv = file_priv;
+       vma_priv->users = 1;
+
+       vma->vm_flags |= VM_IO | VM_DONTEXPAND;
+       vma->vm_ops = &privcmd_buf_vm_ops;
+       vma->vm_private_data = vma_priv;
+
+       list_add(&vma_priv->list, &file_priv->list);
+
+       if (vma_priv->n_pages != count)
+               ret = -ENOMEM;
+       else
+               for (i = 0; i < vma_priv->n_pages; i++) {
+                       ret = vm_insert_page(vma, vma->vm_start + i * PAGE_SIZE,
+                                            vma_priv->pages[i]);
+                       if (ret)
+                               break;
+               }
+
+       if (ret)
+               privcmd_buf_vmapriv_free(vma_priv);
+
+       mutex_unlock(&file_priv->lock);
+
+       return ret;
+}
+
+const struct file_operations xen_privcmdbuf_fops = {
+       .owner = THIS_MODULE,
+       .open = privcmd_buf_open,
+       .release = privcmd_buf_release,
+       .mmap = privcmd_buf_mmap,
+};
+EXPORT_SYMBOL_GPL(xen_privcmdbuf_fops);
+
+struct miscdevice xen_privcmdbuf_dev = {
+       .minor = MISC_DYNAMIC_MINOR,
+       .name = "xen/hypercall",
+       .fops = &xen_privcmdbuf_fops,
+};
index 8ae0349d9f0ae47036ed2b6b8e968230c1fdfb41..7e6e682104dc4e9a77d8149e2f4500ded81b41b8 100644 (file)
@@ -1007,12 +1007,21 @@ static int __init privcmd_init(void)
                pr_err("Could not register Xen privcmd device\n");
                return err;
        }
+
+       err = misc_register(&xen_privcmdbuf_dev);
+       if (err != 0) {
+               pr_err("Could not register Xen hypercall-buf device\n");
+               misc_deregister(&privcmd_dev);
+               return err;
+       }
+
        return 0;
 }
 
 static void __exit privcmd_exit(void)
 {
        misc_deregister(&privcmd_dev);
+       misc_deregister(&xen_privcmdbuf_dev);
 }
 
 module_init(privcmd_init);
index 14facaeed36fda1a1492aaa2a88a72bc8855c450..0dd9f8f67ee30efc849a7bdf2085036c0c0e84ab 100644 (file)
@@ -1,3 +1,6 @@
 #include <linux/fs.h>
 
 extern const struct file_operations xen_privcmd_fops;
+extern const struct file_operations xen_privcmdbuf_fops;
+
+extern struct miscdevice xen_privcmdbuf_dev;
index 7bc88fd43cfc84d05873893ef4ddec8307e76c2a..e2f3e8b0fba9ff160a7c82a37e64cf5fe0b3c8f0 100644 (file)
@@ -1012,6 +1012,7 @@ static void scsiback_do_add_lun(struct vscsibk_info *info, const char *state,
 {
        struct v2p_entry *entry;
        unsigned long flags;
+       int err;
 
        if (try) {
                spin_lock_irqsave(&info->v2p_lock, flags);
@@ -1027,8 +1028,11 @@ static void scsiback_do_add_lun(struct vscsibk_info *info, const char *state,
                        scsiback_del_translation_entry(info, vir);
                }
        } else if (!try) {
-               xenbus_printf(XBT_NIL, info->dev->nodename, state,
+               err = xenbus_printf(XBT_NIL, info->dev->nodename, state,
                              "%d", XenbusStateClosed);
+               if (err)
+                       xenbus_dev_error(info->dev, err,
+                               "%s: writing %s", __func__, state);
        }
 }
 
@@ -1067,8 +1071,11 @@ static void scsiback_do_1lun_hotplug(struct vscsibk_info *info, int op,
        snprintf(str, sizeof(str), "vscsi-devs/%s/p-dev", ent);
        val = xenbus_read(XBT_NIL, dev->nodename, str, NULL);
        if (IS_ERR(val)) {
-               xenbus_printf(XBT_NIL, dev->nodename, state,
+               err = xenbus_printf(XBT_NIL, dev->nodename, state,
                              "%d", XenbusStateClosed);
+               if (err)
+                       xenbus_dev_error(info->dev, err,
+                               "%s: writing %s", __func__, state);
                return;
        }
        strlcpy(phy, val, VSCSI_NAMELEN);
@@ -1079,8 +1086,11 @@ static void scsiback_do_1lun_hotplug(struct vscsibk_info *info, int op,
        err = xenbus_scanf(XBT_NIL, dev->nodename, str, "%u:%u:%u:%u",
                           &vir.hst, &vir.chn, &vir.tgt, &vir.lun);
        if (XENBUS_EXIST_ERR(err)) {
-               xenbus_printf(XBT_NIL, dev->nodename, state,
+               err = xenbus_printf(XBT_NIL, dev->nodename, state,
                              "%d", XenbusStateClosed);
+               if (err)
+                       xenbus_dev_error(info->dev, err,
+                               "%s: writing %s", __func__, state);
                return;
        }
 
index 42e102e2e74a4084c4f8ddb2bbdab825d8ed0d73..85ff859d3af5f36c5ad3ae1d8823e7c5d4f7c366 100644 (file)
@@ -859,8 +859,7 @@ struct dentry *v9fs_vfs_lookup(struct inode *dir, struct dentry *dentry,
 
 static int
 v9fs_vfs_atomic_open(struct inode *dir, struct dentry *dentry,
-                    struct file *file, unsigned flags, umode_t mode,
-                    int *opened)
+                    struct file *file, unsigned flags, umode_t mode)
 {
        int err;
        u32 perm;
@@ -917,7 +916,7 @@ v9fs_vfs_atomic_open(struct inode *dir, struct dentry *dentry,
                v9inode->writeback_fid = (void *) inode_fid;
        }
        mutex_unlock(&v9inode->v_mutex);
-       err = finish_open(file, dentry, generic_file_open, opened);
+       err = finish_open(file, dentry, generic_file_open);
        if (err)
                goto error;
 
@@ -925,7 +924,7 @@ v9fs_vfs_atomic_open(struct inode *dir, struct dentry *dentry,
        if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE)
                v9fs_cache_inode_set_cookie(d_inode(dentry), file);
 
-       *opened |= FILE_CREATED;
+       file->f_mode |= FMODE_CREATED;
 out:
        dput(res);
        return err;
index 7f6ae21a27b3ca451d353398b4a0e25cb55a9177..4823e1c4699945bf5c8170928dbaa6228c06995d 100644 (file)
@@ -241,8 +241,7 @@ v9fs_vfs_create_dotl(struct inode *dir, struct dentry *dentry, umode_t omode,
 
 static int
 v9fs_vfs_atomic_open_dotl(struct inode *dir, struct dentry *dentry,
-                         struct file *file, unsigned flags, umode_t omode,
-                         int *opened)
+                         struct file *file, unsigned flags, umode_t omode)
 {
        int err = 0;
        kgid_t gid;
@@ -352,13 +351,13 @@ v9fs_vfs_atomic_open_dotl(struct inode *dir, struct dentry *dentry,
        }
        mutex_unlock(&v9inode->v_mutex);
        /* Since we are opening a file, assign the open fid to the file */
-       err = finish_open(file, dentry, generic_file_open, opened);
+       err = finish_open(file, dentry, generic_file_open);
        if (err)
                goto err_clunk_old_fid;
        file->private_data = ofid;
        if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE)
                v9fs_cache_inode_set_cookie(inode, file);
-       *opened |= FILE_CREATED;
+       file->f_mode |= FMODE_CREATED;
 out:
        v9fs_put_acl(dacl, pacl);
        dput(res);
index c836c425ca94587e381fc9c9a867594717d88cd6..e91028d4340abf9b7ad32c5cf77f668bf32bfd4b 100644 (file)
@@ -287,7 +287,7 @@ adfs_iget(struct super_block *sb, struct object_info *obj)
                ADFS_I(inode)->mmu_private = inode->i_size;
        }
 
-       insert_inode_hash(inode);
+       inode_fake_hash(inode);
 
 out:
        return inode;
index 71fa525d63a06c5f3b4196ddad6dcea517c4f4da..7e099a7a4eb1e50bc9914e73432ecc751d0b21ed 100644 (file)
@@ -291,6 +291,7 @@ static void destroy_inodecache(void)
 static const struct super_operations adfs_sops = {
        .alloc_inode    = adfs_alloc_inode,
        .destroy_inode  = adfs_destroy_inode,
+       .drop_inode     = generic_delete_inode,
        .write_inode    = adfs_write_inode,
        .put_super      = adfs_put_super,
        .statfs         = adfs_statfs,
index a1b18082991b2088711a2bca42f173fa951e49e8..183cc5418722602b61b87cfc6ca73c5d6dcbb5d8 100644 (file)
@@ -648,7 +648,7 @@ static void afs_wake_up_async_call(struct sock *sk, struct rxrpc_call *rxcall,
        trace_afs_notify_call(rxcall, call);
        call->need_attention = true;
 
-       u = __atomic_add_unless(&call->usage, 1, 0);
+       u = atomic_fetch_add_unless(&call->usage, 1, 0);
        if (u != 0) {
                trace_afs_call(call, afs_call_trace_wake, u,
                               atomic_read(&call->net->nr_outstanding_calls),
index e1d20124ec0e8698a1e8a5940537ff45f2e57d2c..16f1f25bfd897842a08189f00a0e73197ce4a80d 100644 (file)
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -5,7 +5,6 @@
  *     Implements an efficient asynchronous io interface.
  *
  *     Copyright 2000, 2001, 2002 Red Hat, Inc.  All Rights Reserved.
- *     Copyright 2018 Christoph Hellwig.
  *
  *     See ../COPYING for licensing terms.
  */
@@ -165,22 +164,10 @@ struct fsync_iocb {
        bool                    datasync;
 };
 
-struct poll_iocb {
-       struct file             *file;
-       __poll_t                events;
-       struct wait_queue_head  *head;
-
-       union {
-               struct wait_queue_entry wait;
-               struct work_struct      work;
-       };
-};
-
 struct aio_kiocb {
        union {
                struct kiocb            rw;
                struct fsync_iocb       fsync;
-               struct poll_iocb        poll;
        };
 
        struct kioctx           *ki_ctx;
@@ -215,9 +202,7 @@ static const struct address_space_operations aio_ctx_aops;
 
 static struct file *aio_private_file(struct kioctx *ctx, loff_t nr_pages)
 {
-       struct qstr this = QSTR_INIT("[aio]", 5);
        struct file *file;
-       struct path path;
        struct inode *inode = alloc_anon_inode(aio_mnt->mnt_sb);
        if (IS_ERR(inode))
                return ERR_CAST(inode);
@@ -226,31 +211,17 @@ static struct file *aio_private_file(struct kioctx *ctx, loff_t nr_pages)
        inode->i_mapping->private_data = ctx;
        inode->i_size = PAGE_SIZE * nr_pages;
 
-       path.dentry = d_alloc_pseudo(aio_mnt->mnt_sb, &this);
-       if (!path.dentry) {
+       file = alloc_file_pseudo(inode, aio_mnt, "[aio]",
+                               O_RDWR, &aio_ring_fops);
+       if (IS_ERR(file))
                iput(inode);
-               return ERR_PTR(-ENOMEM);
-       }
-       path.mnt = mntget(aio_mnt);
-
-       d_instantiate(path.dentry, inode);
-       file = alloc_file(&path, FMODE_READ | FMODE_WRITE, &aio_ring_fops);
-       if (IS_ERR(file)) {
-               path_put(&path);
-               return file;
-       }
-
-       file->f_flags = O_RDWR;
        return file;
 }
 
 static struct dentry *aio_mount(struct file_system_type *fs_type,
                                int flags, const char *dev_name, void *data)
 {
-       static const struct dentry_operations ops = {
-               .d_dname        = simple_dname,
-       };
-       struct dentry *root = mount_pseudo(fs_type, "aio:", NULL, &ops,
+       struct dentry *root = mount_pseudo(fs_type, "aio:", NULL, NULL,
                                           AIO_RING_MAGIC);
 
        if (!IS_ERR(root))
@@ -1590,6 +1561,7 @@ static int aio_fsync(struct fsync_iocb *req, struct iocb *iocb, bool datasync)
        if (unlikely(iocb->aio_buf || iocb->aio_offset || iocb->aio_nbytes ||
                        iocb->aio_rw_flags))
                return -EINVAL;
+
        req->file = fget(iocb->aio_fildes);
        if (unlikely(!req->file))
                return -EBADF;
@@ -1604,137 +1576,6 @@ static int aio_fsync(struct fsync_iocb *req, struct iocb *iocb, bool datasync)
        return 0;
 }
 
-/* need to use list_del_init so we can check if item was present */
-static inline bool __aio_poll_remove(struct poll_iocb *req)
-{
-       if (list_empty(&req->wait.entry))
-               return false;
-       list_del_init(&req->wait.entry);
-       return true;
-}
-
-static inline void __aio_poll_complete(struct aio_kiocb *iocb, __poll_t mask)
-{
-       fput(iocb->poll.file);
-       aio_complete(iocb, mangle_poll(mask), 0);
-}
-
-static void aio_poll_work(struct work_struct *work)
-{
-       struct aio_kiocb *iocb = container_of(work, struct aio_kiocb, poll.work);
-
-       if (!list_empty_careful(&iocb->ki_list))
-               aio_remove_iocb(iocb);
-       __aio_poll_complete(iocb, iocb->poll.events);
-}
-
-static int aio_poll_cancel(struct kiocb *iocb)
-{
-       struct aio_kiocb *aiocb = container_of(iocb, struct aio_kiocb, rw);
-       struct poll_iocb *req = &aiocb->poll;
-       struct wait_queue_head *head = req->head;
-       bool found = false;
-
-       spin_lock(&head->lock);
-       found = __aio_poll_remove(req);
-       spin_unlock(&head->lock);
-
-       if (found) {
-               req->events = 0;
-               INIT_WORK(&req->work, aio_poll_work);
-               schedule_work(&req->work);
-       }
-       return 0;
-}
-
-static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
-               void *key)
-{
-       struct poll_iocb *req = container_of(wait, struct poll_iocb, wait);
-       struct aio_kiocb *iocb = container_of(req, struct aio_kiocb, poll);
-       struct file *file = req->file;
-       __poll_t mask = key_to_poll(key);
-
-       assert_spin_locked(&req->head->lock);
-
-       /* for instances that support it check for an event match first: */
-       if (mask && !(mask & req->events))
-               return 0;
-
-       mask = file->f_op->poll_mask(file, req->events) & req->events;
-       if (!mask)
-               return 0;
-
-       __aio_poll_remove(req);
-
-       /*
-        * Try completing without a context switch if we can acquire ctx_lock
-        * without spinning.  Otherwise we need to defer to a workqueue to
-        * avoid a deadlock due to the lock order.
-        */
-       if (spin_trylock(&iocb->ki_ctx->ctx_lock)) {
-               list_del_init(&iocb->ki_list);
-               spin_unlock(&iocb->ki_ctx->ctx_lock);
-
-               __aio_poll_complete(iocb, mask);
-       } else {
-               req->events = mask;
-               INIT_WORK(&req->work, aio_poll_work);
-               schedule_work(&req->work);
-       }
-
-       return 1;
-}
-
-static ssize_t aio_poll(struct aio_kiocb *aiocb, struct iocb *iocb)
-{
-       struct kioctx *ctx = aiocb->ki_ctx;
-       struct poll_iocb *req = &aiocb->poll;
-       __poll_t mask;
-
-       /* reject any unknown events outside the normal event mask. */
-       if ((u16)iocb->aio_buf != iocb->aio_buf)
-               return -EINVAL;
-       /* reject fields that are not defined for poll */
-       if (iocb->aio_offset || iocb->aio_nbytes || iocb->aio_rw_flags)
-               return -EINVAL;
-
-       req->events = demangle_poll(iocb->aio_buf) | EPOLLERR | EPOLLHUP;
-       req->file = fget(iocb->aio_fildes);
-       if (unlikely(!req->file))
-               return -EBADF;
-       if (!file_has_poll_mask(req->file))
-               goto out_fail;
-
-       req->head = req->file->f_op->get_poll_head(req->file, req->events);
-       if (!req->head)
-               goto out_fail;
-       if (IS_ERR(req->head)) {
-               mask = EPOLLERR;
-               goto done;
-       }
-
-       init_waitqueue_func_entry(&req->wait, aio_poll_wake);
-       aiocb->ki_cancel = aio_poll_cancel;
-
-       spin_lock_irq(&ctx->ctx_lock);
-       spin_lock(&req->head->lock);
-       mask = req->file->f_op->poll_mask(req->file, req->events) & req->events;
-       if (!mask) {
-               __add_wait_queue(req->head, &req->wait);
-               list_add_tail(&aiocb->ki_list, &ctx->active_reqs);
-       }
-       spin_unlock(&req->head->lock);
-       spin_unlock_irq(&ctx->ctx_lock);
-done:
-       if (mask)
-               __aio_poll_complete(aiocb, mask);
-       return 0;
-out_fail:
-       fput(req->file);
-       return -EINVAL; /* same as no support for IOCB_CMD_POLL */
-}
-
 static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
                         bool compat)
 {
@@ -1808,9 +1649,6 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
        case IOCB_CMD_FDSYNC:
                ret = aio_fsync(&req->fsync, &iocb, true);
                break;
-       case IOCB_CMD_POLL:
-               ret = aio_poll(req, &iocb);
-               break;
        default:
                pr_debug("invalid aio operation %d\n", iocb.aio_lio_opcode);
                ret = -EINVAL;
@@ -2042,6 +1880,11 @@ SYSCALL_DEFINE5(io_getevents, aio_context_t, ctx_id,
        return ret;
 }
 
+struct __aio_sigset {
+       const sigset_t __user   *sigmask;
+       size_t          sigsetsize;
+};
+
 SYSCALL_DEFINE6(io_pgetevents,
                aio_context_t, ctx_id,
                long, min_nr,
index 3168ee4e77f4fe386d94e09a41cd2c1c98d76e39..91262c34b797c3932680640b3643fe360a643b97 100644 (file)
@@ -71,8 +71,6 @@ struct file *anon_inode_getfile(const char *name,
                                const struct file_operations *fops,
                                void *priv, int flags)
 {
-       struct qstr this;
-       struct path path;
        struct file *file;
 
        if (IS_ERR(anon_inode_inode))
@@ -81,40 +79,24 @@ struct file *anon_inode_getfile(const char *name,
        if (fops->owner && !try_module_get(fops->owner))
                return ERR_PTR(-ENOENT);
 
-       /*
-        * Link the inode to a directory entry by creating a unique name
-        * using the inode sequence number.
-        */
-       file = ERR_PTR(-ENOMEM);
-       this.name = name;
-       this.len = strlen(name);
-       this.hash = 0;
-       path.dentry = d_alloc_pseudo(anon_inode_mnt->mnt_sb, &this);
-       if (!path.dentry)
-               goto err_module;
-
-       path.mnt = mntget(anon_inode_mnt);
        /*
         * We know the anon_inode inode count is always greater than zero,
         * so ihold() is safe.
         */
        ihold(anon_inode_inode);
-
-       d_instantiate(path.dentry, anon_inode_inode);
-
-       file = alloc_file(&path, OPEN_FMODE(flags), fops);
+       file = alloc_file_pseudo(anon_inode_inode, anon_inode_mnt, name,
+                                flags & (O_ACCMODE | O_NONBLOCK), fops);
        if (IS_ERR(file))
-               goto err_dput;
+               goto err;
+
        file->f_mapping = anon_inode_inode->i_mapping;
 
-       file->f_flags = flags & (O_ACCMODE | O_NONBLOCK);
        file->private_data = priv;
 
        return file;
 
-err_dput:
-       path_put(&path);
-err_module:
+err:
+       iput(anon_inode_inode);
        module_put(fops->owner);
        return file;
 }
index 43fedde15c26203548c08c866aa7fbf5cfc27dd0..1f85d35ec8b7b7f6a3866962bce468e3ae7d56f6 100644 (file)
@@ -2,6 +2,6 @@
 # Makefile for the linux autofs-filesystem routines.
 #
 
-obj-$(CONFIG_AUTOFS_FS) += autofs.o
+obj-$(CONFIG_AUTOFS_FS) += autofs4.o
 
-autofs-objs := init.o inode.o root.o symlink.o waitq.o expire.o dev-ioctl.o
+autofs4-objs := init.o inode.o root.o symlink.o waitq.o expire.o dev-ioctl.o
index ea4ca1445ab78808644408de99430d8bbd6e1fd9..86eafda4a65226ef292f8713c2a86dee48e831ff 100644 (file)
@@ -135,6 +135,15 @@ static int validate_dev_ioctl(int cmd, struct autofs_dev_ioctl *param)
                                cmd);
                        goto out;
                }
+       } else {
+               unsigned int inr = _IOC_NR(cmd);
+
+               if (inr == AUTOFS_DEV_IOCTL_OPENMOUNT_CMD ||
+                   inr == AUTOFS_DEV_IOCTL_REQUESTER_CMD ||
+                   inr == AUTOFS_DEV_IOCTL_ISMOUNTPOINT_CMD) {
+                       err = -EINVAL;
+                       goto out;
+               }
        }
 
        err = 0;
@@ -271,7 +280,8 @@ static int autofs_dev_ioctl_openmount(struct file *fp,
        dev_t devid;
        int err, fd;
 
-       /* param->path has already been checked */
+       /* param->path has been checked in validate_dev_ioctl() */
+
        if (!param->openmount.devid)
                return -EINVAL;
 
@@ -433,10 +443,7 @@ static int autofs_dev_ioctl_requester(struct file *fp,
        dev_t devid;
        int err = -ENOENT;
 
-       if (param->size <= AUTOFS_DEV_IOCTL_SIZE) {
-               err = -EINVAL;
-               goto out;
-       }
+       /* param->path has been checked in validate_dev_ioctl() */
 
        devid = sbi->sb->s_dev;
 
@@ -521,10 +528,7 @@ static int autofs_dev_ioctl_ismountpoint(struct file *fp,
        unsigned int devid, magic;
        int err = -ENOENT;
 
-       if (param->size <= AUTOFS_DEV_IOCTL_SIZE) {
-               err = -EINVAL;
-               goto out;
-       }
+       /* param->path has been checked in validate_dev_ioctl() */
 
        name = param->path;
        type = param->ismountpoint.in.type;
index cc9447e1903f7a16d023067c0098c4123e764351..79ae07d9592f55cc06a10086cf45453250637d30 100644 (file)
@@ -23,7 +23,7 @@ static struct file_system_type autofs_fs_type = {
        .kill_sb        = autofs_kill_sb,
 };
 MODULE_ALIAS_FS("autofs");
-MODULE_ALIAS("autofs4");
+MODULE_ALIAS("autofs");
 
 static int __init init_autofs_fs(void)
 {
index 125e8bbd22a250e3ea1c7710f29f942f7eaef150..8035d2a445617905b595222e2b5bb2d4f52b80d4 100644 (file)
@@ -134,7 +134,7 @@ static int bad_inode_update_time(struct inode *inode, struct timespec64 *time,
 
 static int bad_inode_atomic_open(struct inode *inode, struct dentry *dentry,
                                 struct file *file, unsigned int open_flag,
-                                umode_t create_mode, int *opened)
+                                umode_t create_mode)
 {
        return -EIO;
 }
index 0ac456b52bddb62e9c817d61bccc886c7c8cde85..efae2fb0930aaa96c10595a462616d8caf418a9e 100644 (file)
@@ -1259,9 +1259,8 @@ static int load_elf_library(struct file *file)
                goto out_free_ph;
        }
 
-       len = ELF_PAGESTART(eppnt->p_filesz + eppnt->p_vaddr +
-                           ELF_MIN_ALIGN - 1);
-       bss = eppnt->p_memsz + eppnt->p_vaddr;
+       len = ELF_PAGEALIGN(eppnt->p_filesz + eppnt->p_vaddr);
+       bss = ELF_PAGEALIGN(eppnt->p_memsz + eppnt->p_vaddr);
        if (bss > len) {
                error = vm_brk(len, bss - len);
                if (error)
@@ -1752,7 +1751,7 @@ static int fill_thread_core_info(struct elf_thread_core_info *t,
                const struct user_regset *regset = &view->regsets[i];
                do_thread_regset_writeback(t->task, regset);
                if (regset->core_note_type && regset->get &&
-                   (!regset->active || regset->active(t->task, regset))) {
+                   (!regset->active || regset->active(t->task, regset) > 0)) {
                        int ret;
                        size_t size = regset_size(t->task, regset);
                        void *data = kmalloc(size, GFP_KERNEL);
index 4b5fff31ef279eb739a37ccba610eaf7a6a98787..aa4a7a23ff99d8a9a111a37eee80d5e75cf0886b 100644 (file)
@@ -205,7 +205,7 @@ static int load_misc_binary(struct linux_binprm *bprm)
                goto error;
 
        if (fmt->flags & MISC_FMT_OPEN_FILE) {
-               interp_file = filp_clone_open(fmt->interp_file);
+               interp_file = file_clone_open(fmt->interp_file);
                if (!IS_ERR(interp_file))
                        deny_write_access(interp_file);
        } else {
index 0dd87aaeb39a7d05bbec28ce01536b106c4f76c2..aba25414231a83af85df892bed8795bb3af1c2e5 100644 (file)
@@ -221,7 +221,7 @@ __blkdev_direct_IO_simple(struct kiocb *iocb, struct iov_iter *iter,
 
        ret = bio_iov_iter_get_pages(&bio, iter);
        if (unlikely(ret))
-               return ret;
+               goto out;
        ret = bio.bi_iter.bi_size;
 
        if (iov_iter_rw(iter) == READ) {
@@ -250,12 +250,13 @@ __blkdev_direct_IO_simple(struct kiocb *iocb, struct iov_iter *iter,
                put_page(bvec->bv_page);
        }
 
-       if (vecs != inline_vecs)
-               kfree(vecs);
-
        if (unlikely(bio.bi_status))
                ret = blk_status_to_errno(bio.bi_status);
 
+out:
+       if (vecs != inline_vecs)
+               kfree(vecs);
+
        bio_uninit(&bio);
 
        return ret;
index cce6087d6880fa4c1673dbc8aab0026fc62391f4..b3e45714d28f0507f40e590ff14076fcdb7728a5 100644 (file)
@@ -4238,8 +4238,9 @@ int try_release_extent_mapping(struct page *page, gfp_t mask)
        struct extent_map *em;
        u64 start = page_offset(page);
        u64 end = start + PAGE_SIZE - 1;
-       struct extent_io_tree *tree = &BTRFS_I(page->mapping->host)->io_tree;
-       struct extent_map_tree *map = &BTRFS_I(page->mapping->host)->extent_tree;
+       struct btrfs_inode *btrfs_inode = BTRFS_I(page->mapping->host);
+       struct extent_io_tree *tree = &btrfs_inode->io_tree;
+       struct extent_map_tree *map = &btrfs_inode->extent_tree;
 
        if (gfpflags_allow_blocking(mask) &&
            page->mapping->host->i_size > SZ_16M) {
@@ -4262,6 +4263,8 @@ int try_release_extent_mapping(struct page *page, gfp_t mask)
                                            extent_map_end(em) - 1,
                                            EXTENT_LOCKED | EXTENT_WRITEBACK,
                                            0, NULL)) {
+                               set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
+                                       &btrfs_inode->runtime_flags);
                                remove_extent_mapping(map, em);
                                /* once for the rb tree */
                                free_extent_map(em);
@@ -4542,8 +4545,11 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
                        offset_in_extent = em_start - em->start;
                em_end = extent_map_end(em);
                em_len = em_end - em_start;
-               disko = em->block_start + offset_in_extent;
                flags = 0;
+               if (em->block_start < EXTENT_MAP_LAST_BYTE)
+                       disko = em->block_start + offset_in_extent;
+               else
+                       disko = 0;
 
                /*
                 * bump off for our next call to get_extent
index e9482f0db9d08ffd79a117f0d6f08b6eb94cae99..def3ada0f0b8e575723908a3f1e311c217499c10 100644 (file)
@@ -6335,8 +6335,10 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
        location->type = BTRFS_INODE_ITEM_KEY;
 
        ret = btrfs_insert_inode_locked(inode);
-       if (ret < 0)
+       if (ret < 0) {
+               iput(inode);
                goto fail;
+       }
 
        path->leave_spinning = 1;
        ret = btrfs_insert_empty_items(trans, root, path, key, sizes, nitems);
@@ -6395,12 +6397,11 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
        return inode;
 
 fail_unlock:
-       unlock_new_inode(inode);
+       discard_new_inode(inode);
 fail:
        if (dir && name)
                BTRFS_I(dir)->index_cnt--;
        btrfs_free_path(path);
-       iput(inode);
        return ERR_PTR(ret);
 }
 
@@ -6505,7 +6506,6 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
        struct btrfs_root *root = BTRFS_I(dir)->root;
        struct inode *inode = NULL;
        int err;
-       int drop_inode = 0;
        u64 objectid;
        u64 index = 0;
 
@@ -6527,6 +6527,7 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
                        mode, &index);
        if (IS_ERR(inode)) {
                err = PTR_ERR(inode);
+               inode = NULL;
                goto out_unlock;
        }
 
@@ -6541,31 +6542,24 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
 
        err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
        if (err)
-               goto out_unlock_inode;
+               goto out_unlock;
 
        err = btrfs_add_nondir(trans, BTRFS_I(dir), dentry, BTRFS_I(inode),
                        0, index);
-       if (err) {
-               goto out_unlock_inode;
-       } else {
-               btrfs_update_inode(trans, root, inode);
-               d_instantiate_new(dentry, inode);
-       }
+       if (err)
+               goto out_unlock;
+
+       btrfs_update_inode(trans, root, inode);
+       d_instantiate_new(dentry, inode);
 
 out_unlock:
        btrfs_end_transaction(trans);
        btrfs_btree_balance_dirty(fs_info);
-       if (drop_inode) {
+       if (err && inode) {
                inode_dec_link_count(inode);
-               iput(inode);
+               discard_new_inode(inode);
        }
        return err;
-
-out_unlock_inode:
-       drop_inode = 1;
-       unlock_new_inode(inode);
-       goto out_unlock;
-
 }
 
 static int btrfs_create(struct inode *dir, struct dentry *dentry,
@@ -6575,7 +6569,6 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry,
        struct btrfs_trans_handle *trans;
        struct btrfs_root *root = BTRFS_I(dir)->root;
        struct inode *inode = NULL;
-       int drop_inode_on_err = 0;
        int err;
        u64 objectid;
        u64 index = 0;
@@ -6598,9 +6591,9 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry,
                        mode, &index);
        if (IS_ERR(inode)) {
                err = PTR_ERR(inode);
+               inode = NULL;
                goto out_unlock;
        }
-       drop_inode_on_err = 1;
        /*
        * If the active LSM wants to access the inode during
        * d_instantiate it needs these. Smack checks to see
@@ -6613,33 +6606,28 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry,
 
        err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
        if (err)
-               goto out_unlock_inode;
+               goto out_unlock;
 
        err = btrfs_update_inode(trans, root, inode);
        if (err)
-               goto out_unlock_inode;
+               goto out_unlock;
 
        err = btrfs_add_nondir(trans, BTRFS_I(dir), dentry, BTRFS_I(inode),
                        0, index);
        if (err)
-               goto out_unlock_inode;
+               goto out_unlock;
 
        BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
        d_instantiate_new(dentry, inode);
 
 out_unlock:
        btrfs_end_transaction(trans);
-       if (err && drop_inode_on_err) {
+       if (err && inode) {
                inode_dec_link_count(inode);
-               iput(inode);
+               discard_new_inode(inode);
        }
        btrfs_btree_balance_dirty(fs_info);
        return err;
-
-out_unlock_inode:
-       unlock_new_inode(inode);
-       goto out_unlock;
-
 }
 
 static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
@@ -6748,6 +6736,7 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
                        S_IFDIR | mode, &index);
        if (IS_ERR(inode)) {
                err = PTR_ERR(inode);
+               inode = NULL;
                goto out_fail;
        }
 
@@ -6758,34 +6747,30 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
 
        err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
        if (err)
-               goto out_fail_inode;
+               goto out_fail;
 
        btrfs_i_size_write(BTRFS_I(inode), 0);
        err = btrfs_update_inode(trans, root, inode);
        if (err)
-               goto out_fail_inode;
+               goto out_fail;
 
        err = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode),
                        dentry->d_name.name,
                        dentry->d_name.len, 0, index);
        if (err)
-               goto out_fail_inode;
+               goto out_fail;
 
        d_instantiate_new(dentry, inode);
        drop_on_err = 0;
 
 out_fail:
        btrfs_end_transaction(trans);
-       if (drop_on_err) {
+       if (err && inode) {
                inode_dec_link_count(inode);
-               iput(inode);
+               discard_new_inode(inode);
        }
        btrfs_btree_balance_dirty(fs_info);
        return err;
-
-out_fail_inode:
-       unlock_new_inode(inode);
-       goto out_fail;
 }
 
 static noinline int uncompress_inline(struct btrfs_path *path,
@@ -9005,13 +8990,14 @@ again:
 
        unlock_extent_cached(io_tree, page_start, page_end, &cached_state);
 
-out_unlock:
        if (!ret2) {
                btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE, true);
                sb_end_pagefault(inode->i_sb);
                extent_changeset_free(data_reserved);
                return VM_FAULT_LOCKED;
        }
+
+out_unlock:
        unlock_page(page);
 out:
        btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE, (ret != 0));
@@ -9443,6 +9429,7 @@ static int btrfs_rename_exchange(struct inode *old_dir,
        u64 new_idx = 0;
        u64 root_objectid;
        int ret;
+       int ret2;
        bool root_log_pinned = false;
        bool dest_log_pinned = false;
 
@@ -9639,7 +9626,8 @@ out_fail:
                        dest_log_pinned = false;
                }
        }
-       ret = btrfs_end_transaction(trans);
+       ret2 = btrfs_end_transaction(trans);
+       ret = ret ? ret : ret2;
 out_notrans:
        if (new_ino == BTRFS_FIRST_FREE_OBJECTID)
                up_read(&fs_info->subvol_sem);
@@ -10112,7 +10100,6 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
        struct btrfs_key key;
        struct inode *inode = NULL;
        int err;
-       int drop_inode = 0;
        u64 objectid;
        u64 index = 0;
        int name_len;
@@ -10145,6 +10132,7 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
                                objectid, S_IFLNK|S_IRWXUGO, &index);
        if (IS_ERR(inode)) {
                err = PTR_ERR(inode);
+               inode = NULL;
                goto out_unlock;
        }
 
@@ -10161,12 +10149,12 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
 
        err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
        if (err)
-               goto out_unlock_inode;
+               goto out_unlock;
 
        path = btrfs_alloc_path();
        if (!path) {
                err = -ENOMEM;
-               goto out_unlock_inode;
+               goto out_unlock;
        }
        key.objectid = btrfs_ino(BTRFS_I(inode));
        key.offset = 0;
@@ -10176,7 +10164,7 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
                                      datasize);
        if (err) {
                btrfs_free_path(path);
-               goto out_unlock_inode;
+               goto out_unlock;
        }
        leaf = path->nodes[0];
        ei = btrfs_item_ptr(leaf, path->slots[0],
@@ -10208,26 +10196,19 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
        if (!err)
                err = btrfs_add_nondir(trans, BTRFS_I(dir), dentry,
                                BTRFS_I(inode), 0, index);
-       if (err) {
-               drop_inode = 1;
-               goto out_unlock_inode;
-       }
+       if (err)
+               goto out_unlock;
 
        d_instantiate_new(dentry, inode);
 
 out_unlock:
        btrfs_end_transaction(trans);
-       if (drop_inode) {
+       if (err && inode) {
                inode_dec_link_count(inode);
-               iput(inode);
+               discard_new_inode(inode);
        }
        btrfs_btree_balance_dirty(fs_info);
        return err;
-
-out_unlock_inode:
-       drop_inode = 1;
-       unlock_new_inode(inode);
-       goto out_unlock;
 }
 
 static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
@@ -10436,14 +10417,14 @@ static int btrfs_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
 
        ret = btrfs_init_inode_security(trans, inode, dir, NULL);
        if (ret)
-               goto out_inode;
+               goto out;
 
        ret = btrfs_update_inode(trans, root, inode);
        if (ret)
-               goto out_inode;
+               goto out;
        ret = btrfs_orphan_add(trans, BTRFS_I(inode));
        if (ret)
-               goto out_inode;
+               goto out;
 
        /*
         * We set number of links to 0 in btrfs_new_inode(), and here we set
@@ -10453,21 +10434,15 @@ static int btrfs_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
         *    d_tmpfile() -> inode_dec_link_count() -> drop_nlink()
         */
        set_nlink(inode, 1);
-       unlock_new_inode(inode);
        d_tmpfile(dentry, inode);
+       unlock_new_inode(inode);
        mark_inode_dirty(inode);
-
 out:
        btrfs_end_transaction(trans);
-       if (ret)
-               iput(inode);
+       if (ret && inode)
+               discard_new_inode(inode);
        btrfs_btree_balance_dirty(fs_info);
        return ret;
-
-out_inode:
-       unlock_new_inode(inode);
-       goto out;
-
 }
 
 __attribute__((const))
index c2837a32d689de9a7d5d3bfc96d7d861cd221dfb..b077544b523245c05c6ec53710d4f9d45d1eb641 100644 (file)
@@ -3327,11 +3327,13 @@ static void btrfs_cmp_data_free(struct cmp_pages *cmp)
                if (pg) {
                        unlock_page(pg);
                        put_page(pg);
+                       cmp->src_pages[i] = NULL;
                }
                pg = cmp->dst_pages[i];
                if (pg) {
                        unlock_page(pg);
                        put_page(pg);
+                       cmp->dst_pages[i] = NULL;
                }
        }
 }
@@ -3577,7 +3579,7 @@ static int btrfs_extent_same(struct inode *src, u64 loff, u64 olen,
                ret = btrfs_extent_same_range(src, loff, BTRFS_MAX_DEDUPE_LEN,
                                              dst, dst_loff, &cmp);
                if (ret)
-                       goto out_unlock;
+                       goto out_free;
 
                loff += BTRFS_MAX_DEDUPE_LEN;
                dst_loff += BTRFS_MAX_DEDUPE_LEN;
@@ -3587,16 +3589,16 @@ static int btrfs_extent_same(struct inode *src, u64 loff, u64 olen,
                ret = btrfs_extent_same_range(src, loff, tail_len, dst,
                                              dst_loff, &cmp);
 
+out_free:
+       kvfree(cmp.src_pages);
+       kvfree(cmp.dst_pages);
+
 out_unlock:
        if (same_inode)
                inode_unlock(src);
        else
                btrfs_double_inode_unlock(src, dst);
 
-out_free:
-       kvfree(cmp.src_pages);
-       kvfree(cmp.dst_pages);
-
        return ret;
 }
 
index 1874a6d2e6f5422c809759d0ca29e9bb973826bb..c25dc47210a397560e929f55fc3feea8f26798dd 100644 (file)
@@ -2680,8 +2680,10 @@ out:
                free_extent_buffer(scratch_leaf);
        }
 
-       if (done && !ret)
+       if (done && !ret) {
                ret = 1;
+               fs_info->qgroup_rescan_progress.objectid = (u64)-1;
+       }
        return ret;
 }
 
@@ -2784,13 +2786,20 @@ qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid,
 
        if (!init_flags) {
                /* we're resuming qgroup rescan at mount time */
-               if (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN))
+               if (!(fs_info->qgroup_flags &
+                     BTRFS_QGROUP_STATUS_FLAG_RESCAN)) {
                        btrfs_warn(fs_info,
                        "qgroup rescan init failed, qgroup is not enabled");
-               else if (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON))
+                       ret = -EINVAL;
+               } else if (!(fs_info->qgroup_flags &
+                            BTRFS_QGROUP_STATUS_FLAG_ON)) {
                        btrfs_warn(fs_info,
                        "qgroup rescan init failed, qgroup rescan is not queued");
-               return -EINVAL;
+                       ret = -EINVAL;
+               }
+
+               if (ret)
+                       return ret;
        }
 
        mutex_lock(&fs_info->qgroup_rescan_lock);
index 5723060364776d1fd3e3e1e09bdfc09d4bb7eadb..6702896cdb8f7bcdb93a393f5ee3482498376445 100644 (file)
@@ -1151,11 +1151,6 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
                return ret;
        }
 
-       if (sctx->is_dev_replace && !is_metadata && !have_csum) {
-               sblocks_for_recheck = NULL;
-               goto nodatasum_case;
-       }
-
        /*
         * read all mirrors one after the other. This includes to
         * re-read the extent or metadata block that failed (that was
@@ -1268,13 +1263,19 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
                goto out;
        }
 
-       if (!is_metadata && !have_csum) {
+       /*
+        * NOTE: Even for nodatasum case, it's still possible that it's a
+        * compressed data extent, thus scrub_fixup_nodatasum(), which write
+        * inode page cache onto disk, could cause serious data corruption.
+        *
+        * So here we could only read from disk, and hope our recovery could
+        * reach disk before the newer write.
+        */
+       if (0 && !is_metadata && !have_csum) {
                struct scrub_fixup_nodatasum *fixup_nodatasum;
 
                WARN_ON(sctx->is_dev_replace);
 
-nodatasum_case:
-
                /*
                 * !is_metadata and !have_csum, this means that the data
                 * might not be COWed, that it might be modified
index e034ad9e23b48b42826de6bed1a8f59d6e926a20..1da162928d1a9b305ab36c2d99386afb2f060326 100644 (file)
@@ -1146,6 +1146,7 @@ int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
 {
        int ret;
 
+       mutex_lock(&uuid_mutex);
        mutex_lock(&fs_devices->device_list_mutex);
        if (fs_devices->opened) {
                fs_devices->opened++;
@@ -1155,6 +1156,7 @@ int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
                ret = open_fs_devices(fs_devices, flags, holder);
        }
        mutex_unlock(&fs_devices->device_list_mutex);
+       mutex_unlock(&uuid_mutex);
 
        return ret;
 }
index d9f001078e08f677591495935c29c3aabad87d56..4a717d40080754378921f06404767659e983abf9 100644 (file)
@@ -218,7 +218,8 @@ static int cachefiles_daemon_add_cache(struct cachefiles_cache *cache)
                           "%s",
                           fsdef->dentry->d_sb->s_id);
 
-       fscache_object_init(&fsdef->fscache, NULL, &cache->cache);
+       fscache_object_init(&fsdef->fscache, &fscache_fsdef_index,
+                           &cache->cache);
 
        ret = fscache_add_cache(&cache->cache, &fsdef->fscache, cache->tag);
        if (ret < 0)
index ab0bbe93b398ce68dd0dc04652a626635d2c7c23..af2b17b21b94ba0c97b1085dc7154a3ee4df5c62 100644 (file)
@@ -186,12 +186,12 @@ try_again:
         * need to wait for it to be destroyed */
 wait_for_old_object:
        trace_cachefiles_wait_active(object, dentry, xobject);
+       clear_bit(CACHEFILES_OBJECT_ACTIVE, &object->flags);
 
        if (fscache_object_is_live(&xobject->fscache)) {
                pr_err("\n");
                pr_err("Error: Unexpected object collision\n");
                cachefiles_printk_object(object, xobject);
-               BUG();
        }
        atomic_inc(&xobject->usage);
        write_unlock(&cache->active_lock);
@@ -248,7 +248,6 @@ wait_for_old_object:
        goto try_again;
 
 requeue:
-       clear_bit(CACHEFILES_OBJECT_ACTIVE, &object->flags);
        cache->cache.ops->put_object(&xobject->fscache, cachefiles_obj_put_wait_timeo);
        _leave(" = -ETIMEDOUT");
        return -ETIMEDOUT;
index 5082c8a496866dcab1740c63088f7d8f21fb1c5a..40f7595aad10f20666df7741b8ce8dce3db37b6e 100644 (file)
@@ -27,6 +27,7 @@ static int cachefiles_read_waiter(wait_queue_entry_t *wait, unsigned mode,
        struct cachefiles_one_read *monitor =
                container_of(wait, struct cachefiles_one_read, monitor);
        struct cachefiles_object *object;
+       struct fscache_retrieval *op = monitor->op;
        struct wait_bit_key *key = _key;
        struct page *page = wait->private;
 
@@ -51,16 +52,22 @@ static int cachefiles_read_waiter(wait_queue_entry_t *wait, unsigned mode,
        list_del(&wait->entry);
 
        /* move onto the action list and queue for FS-Cache thread pool */
-       ASSERT(monitor->op);
+       ASSERT(op);
 
-       object = container_of(monitor->op->op.object,
-                             struct cachefiles_object, fscache);
+       /* We need to temporarily bump the usage count as we don't own a ref
+        * here otherwise cachefiles_read_copier() may free the op between the
+        * monitor being enqueued on the op->to_do list and the op getting
+        * enqueued on the work queue.
+        */
+       fscache_get_retrieval(op);
 
+       object = container_of(op->op.object, struct cachefiles_object, fscache);
        spin_lock(&object->work_lock);
-       list_add_tail(&monitor->op_link, &monitor->op->to_do);
+       list_add_tail(&monitor->op_link, &op->to_do);
        spin_unlock(&object->work_lock);
 
-       fscache_enqueue_retrieval(monitor->op);
+       fscache_enqueue_retrieval(op);
+       fscache_put_retrieval(op);
        return 0;
 }
 
index ad0bed99b1d5ab0c61922ae561cab24d2e9d4933..e2679e8a25358d0fc96ee7aca8d304ff204bea99 100644 (file)
@@ -429,8 +429,7 @@ out:
  * file or symlink, return 1 so the VFS can retry.
  */
 int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
-                    struct file *file, unsigned flags, umode_t mode,
-                    int *opened)
+                    struct file *file, unsigned flags, umode_t mode)
 {
        struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
        struct ceph_mds_client *mdsc = fsc->mdsc;
@@ -507,9 +506,9 @@ int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
                dout("atomic_open finish_open on dn %p\n", dn);
                if (req->r_op == CEPH_MDS_OP_CREATE && req->r_reply_info.has_create_ino) {
                        ceph_init_inode_acls(d_inode(dentry), &acls);
-                       *opened |= FILE_CREATED;
+                       file->f_mode |= FMODE_CREATED;
                }
-               err = finish_open(file, dentry, ceph_open, opened);
+               err = finish_open(file, dentry, ceph_open);
        }
 out_req:
        if (!req->r_err && req->r_target_inode)
index ee764ac352ab7b855165b797c1daf579fbaa45e1..a866be999216a81bcfa90dfcb17cc11177442731 100644 (file)
@@ -1135,6 +1135,7 @@ static struct dentry *splice_dentry(struct dentry *dn, struct inode *in)
        if (IS_ERR(realdn)) {
                pr_err("splice_dentry error %ld %p inode %p ino %llx.%llx\n",
                       PTR_ERR(realdn), dn, in, ceph_vinop(in));
+               dput(dn);
                dn = realdn; /* note realdn contains the error */
                goto out;
        } else if (realdn) {
index a7077a0c989fb33cde837a41e6b4d8b9951d2c43..971328b99edecc3ccb58421b6e458ca7fbb01278 100644 (file)
@@ -1025,8 +1025,7 @@ extern const struct file_operations ceph_file_fops;
 extern int ceph_renew_caps(struct inode *inode);
 extern int ceph_open(struct inode *inode, struct file *file);
 extern int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
-                           struct file *file, unsigned flags, umode_t mode,
-                           int *opened);
+                           struct file *file, unsigned flags, umode_t mode);
 extern int ceph_release(struct inode *inode, struct file *filp);
 extern void ceph_fill_inline_data(struct inode *inode, struct page *locked_page,
                                  char *data, size_t len);
index 116146022aa1fa82d334790f7e2d7ff46b052bf3..bfe99950581527bcc494acb6419436e6373aa923 100644 (file)
@@ -126,6 +126,25 @@ static void cifs_debug_tcon(struct seq_file *m, struct cifs_tcon *tcon)
        seq_putc(m, '\n');
 }
 
+static void
+cifs_dump_iface(struct seq_file *m, struct cifs_server_iface *iface)
+{
+       struct sockaddr_in *ipv4 = (struct sockaddr_in *)&iface->sockaddr;
+       struct sockaddr_in6 *ipv6 = (struct sockaddr_in6 *)&iface->sockaddr;
+
+       seq_printf(m, "\t\tSpeed: %zu bps\n", iface->speed);
+       seq_puts(m, "\t\tCapabilities: ");
+       if (iface->rdma_capable)
+               seq_puts(m, "rdma ");
+       if (iface->rss_capable)
+               seq_puts(m, "rss ");
+       seq_putc(m, '\n');
+       if (iface->sockaddr.ss_family == AF_INET)
+               seq_printf(m, "\t\tIPv4: %pI4\n", &ipv4->sin_addr);
+       else if (iface->sockaddr.ss_family == AF_INET6)
+               seq_printf(m, "\t\tIPv6: %pI6\n", &ipv6->sin6_addr);
+}
+
 static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
 {
        struct list_head *tmp1, *tmp2, *tmp3;
@@ -312,6 +331,16 @@ skip_rdma:
                                              mid_entry->mid);
                        }
                        spin_unlock(&GlobalMid_Lock);
+
+                       spin_lock(&ses->iface_lock);
+                       if (ses->iface_count)
+                               seq_printf(m, "\n\tServer interfaces: %zu\n",
+                                          ses->iface_count);
+                       for (j = 0; j < ses->iface_count; j++) {
+                               seq_printf(m, "\t%d)\n", j);
+                               cifs_dump_iface(m, &ses->iface_list[j]);
+                       }
+                       spin_unlock(&ses->iface_lock);
                }
        }
        spin_unlock(&cifs_tcp_ses_lock);
index 937251cc61c046916228f150916c8c0a82a442a5..ee2a8ec70056f7451695cb75bfe1e00a95280ff0 100644 (file)
@@ -37,7 +37,6 @@
 #include <crypto/aead.h>
 
 int __cifs_calc_signature(struct smb_rqst *rqst,
-                       int start,
                        struct TCP_Server_Info *server, char *signature,
                        struct shash_desc *shash)
 {
@@ -45,16 +44,27 @@ int __cifs_calc_signature(struct smb_rqst *rqst,
        int rc;
        struct kvec *iov = rqst->rq_iov;
        int n_vec = rqst->rq_nvec;
+       int is_smb2 = server->vals->header_preamble_size == 0;
 
-       for (i = start; i < n_vec; i++) {
+       /* iov[0] is actual data and not the rfc1002 length for SMB2+ */
+       if (is_smb2) {
+               if (iov[0].iov_len <= 4)
+                       return -EIO;
+               i = 0;
+       } else {
+               if (n_vec < 2 || iov[0].iov_len != 4)
+                       return -EIO;
+               i = 1; /* skip rfc1002 length */
+       }
+
+       for (; i < n_vec; i++) {
                if (iov[i].iov_len == 0)
                        continue;
                if (iov[i].iov_base == NULL) {
                        cifs_dbg(VFS, "null iovec entry\n");
                        return -EIO;
                }
-               if (i == 1 && iov[1].iov_len <= 4)
-                       break; /* nothing to sign or corrupt header */
+
                rc = crypto_shash_update(shash,
                                         iov[i].iov_base, iov[i].iov_len);
                if (rc) {
@@ -118,7 +128,7 @@ static int cifs_calc_signature(struct smb_rqst *rqst,
                return rc;
        }
 
-       return __cifs_calc_signature(rqst, 1, server, signature,
+       return __cifs_calc_signature(rqst, server, signature,
                                     &server->secmech.sdescmd5->shash);
 }
 
index 5f0231803431e26e6a32758ec5581feeb6e437de..f3a78efc31094f5cb34de2601744f46b7d9925ab 100644 (file)
@@ -65,8 +65,7 @@ extern struct inode *cifs_root_iget(struct super_block *);
 extern int cifs_create(struct inode *, struct dentry *, umode_t,
                       bool excl);
 extern int cifs_atomic_open(struct inode *, struct dentry *,
-                           struct file *, unsigned, umode_t,
-                           int *);
+                           struct file *, unsigned, umode_t);
 extern struct dentry *cifs_lookup(struct inode *, struct dentry *,
                                  unsigned int);
 extern int cifs_unlink(struct inode *dir, struct dentry *dentry);
index 1efa2e65bc1a8971f01811ac699a82cb7c1f1727..c923c785402757c36d25528c5e77e53909b227dc 100644 (file)
@@ -33,6 +33,9 @@
 
 #define CIFS_MAGIC_NUMBER 0xFF534D42      /* the first four bytes of SMB PDUs */
 
+#define CIFS_PORT 445
+#define RFC1001_PORT 139
+
 /*
  * The sizes of various internal tables and strings
  */
@@ -312,6 +315,10 @@ struct smb_version_operations {
        /* send echo request */
        int (*echo)(struct TCP_Server_Info *);
        /* create directory */
+       int (*posix_mkdir)(const unsigned int xid, struct inode *inode,
+                       umode_t mode, struct cifs_tcon *tcon,
+                       const char *full_path,
+                       struct cifs_sb_info *cifs_sb);
        int (*mkdir)(const unsigned int, struct cifs_tcon *, const char *,
                     struct cifs_sb_info *);
        /* set info on created directory */
@@ -416,7 +423,7 @@ struct smb_version_operations {
        void (*set_oplock_level)(struct cifsInodeInfo *, __u32, unsigned int,
                                 bool *);
        /* create lease context buffer for CREATE request */
-       char * (*create_lease_buf)(u8 *, u8);
+       char * (*create_lease_buf)(u8 *lease_key, u8 oplock);
        /* parse lease context buffer and return oplock/epoch info */
        __u8 (*parse_lease_buf)(void *buf, unsigned int *epoch, char *lkey);
        ssize_t (*copychunk_range)(const unsigned int,
@@ -838,6 +845,13 @@ static inline void cifs_set_net_ns(struct TCP_Server_Info *srv, struct net *net)
 
 #endif
 
+struct cifs_server_iface {
+       size_t speed;
+       unsigned int rdma_capable : 1;
+       unsigned int rss_capable : 1;
+       struct sockaddr_storage sockaddr;
+};
+
 /*
  * Session structure.  One of these for each uid session with a particular host
  */
@@ -875,6 +889,20 @@ struct cifs_ses {
 #ifdef CONFIG_CIFS_SMB311
        __u8 preauth_sha_hash[SMB2_PREAUTH_HASH_SIZE];
 #endif /* 3.1.1 */
+
+       /*
+        * Network interfaces available on the server this session is
+        * connected to.
+        *
+        * Other channels can be opened by connecting and binding this
+        * session to interfaces from this list.
+        *
+        * iface_lock should be taken when accessing any of these fields
+        */
+       spinlock_t iface_lock;
+       struct cifs_server_iface *iface_list;
+       size_t iface_count;
+       unsigned long iface_last_update; /* jiffies */
 };
 
 static inline bool
@@ -883,6 +911,14 @@ cap_unix(struct cifs_ses *ses)
        return ses->server->vals->cap_unix & ses->capabilities;
 }
 
+struct cached_fid {
+       bool is_valid:1;        /* Do we have a useable root fid */
+       struct cifs_fid *fid;
+       struct mutex fid_mutex;
+       struct cifs_tcon *tcon;
+       struct work_struct lease_break;
+};
+
 /*
  * there is one of these for each connection to a resource on a particular
  * session
@@ -987,9 +1023,7 @@ struct cifs_tcon {
        struct fscache_cookie *fscache; /* cookie for share */
 #endif
        struct list_head pending_opens; /* list of incomplete opens */
-       bool valid_root_fid:1;  /* Do we have a useable root fid */
-       struct mutex prfid_mutex; /* prevents reopen race after dead ses*/
-       struct cifs_fid *prfid; /* handle to the directory at top of share */
+       struct cached_fid crfid; /* Cached root fid */
        /* BB add field for back pointer to sb struct(s)? */
 };
 
@@ -1382,6 +1416,7 @@ typedef int (mid_handle_t)(struct TCP_Server_Info *server,
 /* one of these for every pending CIFS request to the server */
 struct mid_q_entry {
        struct list_head qhead; /* mids waiting on reply from this server */
+       struct kref refcount;
        struct TCP_Server_Info *server; /* server corresponding to this mid */
        __u64 mid;              /* multiplex id */
        __u32 pid;              /* process id */
index 4e0d183c3d1016918d9934420af6e626d128d077..1890f534c88b168b8476a64fd165cce64f905887 100644 (file)
@@ -82,6 +82,7 @@ extern struct mid_q_entry *AllocMidQEntry(const struct smb_hdr *smb_buffer,
                                        struct TCP_Server_Info *server);
 extern void DeleteMidQEntry(struct mid_q_entry *midEntry);
 extern void cifs_delete_mid(struct mid_q_entry *mid);
+extern void cifs_mid_q_entry_release(struct mid_q_entry *midEntry);
 extern void cifs_wake_up_task(struct mid_q_entry *mid);
 extern int cifs_handle_standard(struct TCP_Server_Info *server,
                                struct mid_q_entry *mid);
@@ -112,10 +113,6 @@ extern int SendReceive2(const unsigned int /* xid */ , struct cifs_ses *,
                        struct kvec *, int /* nvec to send */,
                        int * /* type of buf returned */, const int flags,
                        struct kvec * /* resp vec */);
-extern int smb2_send_recv(const unsigned int xid, struct cifs_ses *pses,
-                         struct kvec *pkvec, int nvec_to_send,
-                         int *pbuftype, const int flags,
-                         struct kvec *presp);
 extern int SendReceiveBlockingLock(const unsigned int xid,
                        struct cifs_tcon *ptcon,
                        struct smb_hdr *in_buf ,
@@ -544,7 +541,7 @@ int cifs_create_mf_symlink(unsigned int xid, struct cifs_tcon *tcon,
                           struct cifs_sb_info *cifs_sb,
                           const unsigned char *path, char *pbuf,
                           unsigned int *pbytes_written);
-int __cifs_calc_signature(struct smb_rqst *rqst, int start,
+int __cifs_calc_signature(struct smb_rqst *rqst,
                        struct TCP_Server_Info *server, char *signature,
                        struct shash_desc *shash);
 enum securityEnum cifs_select_sectype(struct TCP_Server_Info *,
@@ -552,6 +549,7 @@ enum securityEnum cifs_select_sectype(struct TCP_Server_Info *,
 struct cifs_aio_ctx *cifs_aio_ctx_alloc(void);
 void cifs_aio_ctx_release(struct kref *refcount);
 int setup_aio_ctx_iter(struct cifs_aio_ctx *ctx, struct iov_iter *iter, int rw);
+void smb2_cached_lease_break(struct work_struct *work);
 
 int cifs_alloc_hash(const char *name, struct crypto_shash **shash,
                    struct sdesc **sdesc);
index 42329b25877db2b3de349b0ce5723f70bebad92b..93408eab92e78988bcf79b715ac77049db643e7f 100644 (file)
@@ -107,10 +107,10 @@ cifs_mark_open_files_invalid(struct cifs_tcon *tcon)
        }
        spin_unlock(&tcon->open_file_lock);
 
-       mutex_lock(&tcon->prfid_mutex);
-       tcon->valid_root_fid = false;
-       memset(tcon->prfid, 0, sizeof(struct cifs_fid));
-       mutex_unlock(&tcon->prfid_mutex);
+       mutex_lock(&tcon->crfid.fid_mutex);
+       tcon->crfid.is_valid = false;
+       memset(tcon->crfid.fid, 0, sizeof(struct cifs_fid));
+       mutex_unlock(&tcon->crfid.fid_mutex);
 
        /*
         * BB Add call to invalidate_inodes(sb) for all superblocks mounted
@@ -157,8 +157,14 @@ cifs_reconnect_tcon(struct cifs_tcon *tcon, int smb_command)
         * greater than cifs socket timeout which is 7 seconds
         */
        while (server->tcpStatus == CifsNeedReconnect) {
-               wait_event_interruptible_timeout(server->response_q,
-                       (server->tcpStatus != CifsNeedReconnect), 10 * HZ);
+               rc = wait_event_interruptible_timeout(server->response_q,
+                                                     (server->tcpStatus != CifsNeedReconnect),
+                                                     10 * HZ);
+               if (rc < 0) {
+                       cifs_dbg(FYI, "%s: aborting reconnect due to a received"
+                                " signal by the process\n", __func__);
+                       return -ERESTARTSYS;
+               }
 
                /* are we still trying to reconnect? */
                if (server->tcpStatus != CifsNeedReconnect)
index 96645a7d8f27144a885863578d33e2b757afeec6..5df2c0698cda7a5ae093db0e3886b275bc0565cb 100644 (file)
@@ -57,9 +57,6 @@
 #include "smb2proto.h"
 #include "smbdirect.h"
 
-#define CIFS_PORT 445
-#define RFC1001_PORT 139
-
 extern mempool_t *cifs_req_poolp;
 extern bool disable_legacy_dialects;
 
@@ -927,6 +924,7 @@ next_pdu:
                                server->pdu_size = next_offset;
                }
 
+               mid_entry = NULL;
                if (server->ops->is_transform_hdr &&
                    server->ops->receive_transform &&
                    server->ops->is_transform_hdr(buf)) {
@@ -941,8 +939,11 @@ next_pdu:
                                length = mid_entry->receive(server, mid_entry);
                }
 
-               if (length < 0)
+               if (length < 0) {
+                       if (mid_entry)
+                               cifs_mid_q_entry_release(mid_entry);
                        continue;
+               }
 
                if (server->large_buf)
                        buf = server->bigbuf;
@@ -959,6 +960,8 @@ next_pdu:
 
                        if (!mid_entry->multiRsp || mid_entry->multiEnd)
                                mid_entry->callback(mid_entry);
+
+                       cifs_mid_q_entry_release(mid_entry);
                } else if (server->ops->is_oplock_break &&
                           server->ops->is_oplock_break(buf, server)) {
                        cifs_dbg(FYI, "Received oplock break\n");
@@ -3029,8 +3032,11 @@ cifs_get_tcon(struct cifs_ses *ses, struct smb_vol *volume_info)
 
 #ifdef CONFIG_CIFS_SMB311
        if ((volume_info->linux_ext) && (ses->server->posix_ext_supported)) {
-               if (ses->server->vals->protocol_id == SMB311_PROT_ID)
+               if (ses->server->vals->protocol_id == SMB311_PROT_ID) {
                        tcon->posix_extensions = true;
+                       printk_once(KERN_WARNING
+                               "SMB3.11 POSIX Extensions are experimental\n");
+               }
        }
 #endif /* 311 */
 
index ddae52bd199318ef6d268c337e4cb0fcdd0fe4b3..3713d22b95a7011bda1e8701e5aea54fee96ea2a 100644 (file)
@@ -465,8 +465,7 @@ out_err:
 
 int
 cifs_atomic_open(struct inode *inode, struct dentry *direntry,
-                struct file *file, unsigned oflags, umode_t mode,
-                int *opened)
+                struct file *file, unsigned oflags, umode_t mode)
 {
        int rc;
        unsigned int xid;
@@ -539,9 +538,9 @@ cifs_atomic_open(struct inode *inode, struct dentry *direntry,
        }
 
        if ((oflags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
-               *opened |= FILE_CREATED;
+               file->f_mode |= FMODE_CREATED;
 
-       rc = finish_open(file, direntry, generic_file_open, opened);
+       rc = finish_open(file, direntry, generic_file_open);
        if (rc) {
                if (server->ops->close)
                        server->ops->close(xid, tcon, &fid);
index f4697f548a394dbf5c42f731bf13bd529c9aaea0..a2cfb33e85c1f8cb25a2d32a52bb5d60c93b79f1 100644 (file)
@@ -1575,6 +1575,17 @@ int cifs_mkdir(struct inode *inode, struct dentry *direntry, umode_t mode)
                goto mkdir_out;
        }
 
+       server = tcon->ses->server;
+
+#ifdef CONFIG_CIFS_SMB311
+       if ((server->ops->posix_mkdir) && (tcon->posix_extensions)) {
+               rc = server->ops->posix_mkdir(xid, inode, mode, tcon, full_path,
+                                             cifs_sb);
+               d_drop(direntry); /* for time being always refresh inode info */
+               goto mkdir_out;
+       }
+#endif /* SMB311 */
+
        if (cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
                                le64_to_cpu(tcon->fsUnixInfo.Capability))) {
                rc = cifs_posix_mkdir(inode, direntry, mode, full_path, cifs_sb,
@@ -1583,8 +1594,6 @@ int cifs_mkdir(struct inode *inode, struct dentry *direntry, umode_t mode)
                        goto mkdir_out;
        }
 
-       server = tcon->ses->server;
-
        if (!server->ops->mkdir) {
                rc = -ENOSYS;
                goto mkdir_out;
index af29ade195c002c0323d855edb391a155f1620f7..53e8362cbc4a953218d3fbd50f1c7133e5435cc9 100644 (file)
@@ -82,6 +82,7 @@ sesInfoAlloc(void)
                INIT_LIST_HEAD(&ret_buf->smb_ses_list);
                INIT_LIST_HEAD(&ret_buf->tcon_list);
                mutex_init(&ret_buf->session_mutex);
+               spin_lock_init(&ret_buf->iface_lock);
        }
        return ret_buf;
 }
@@ -102,6 +103,7 @@ sesInfoFree(struct cifs_ses *buf_to_free)
        kfree(buf_to_free->user_name);
        kfree(buf_to_free->domainName);
        kzfree(buf_to_free->auth_key.response);
+       kfree(buf_to_free->iface_list);
        kzfree(buf_to_free);
 }
 
@@ -117,8 +119,9 @@ tconInfoAlloc(void)
                INIT_LIST_HEAD(&ret_buf->openFileList);
                INIT_LIST_HEAD(&ret_buf->tcon_list);
                spin_lock_init(&ret_buf->open_file_lock);
-               mutex_init(&ret_buf->prfid_mutex);
-               ret_buf->prfid = kzalloc(sizeof(struct cifs_fid), GFP_KERNEL);
+               mutex_init(&ret_buf->crfid.fid_mutex);
+               ret_buf->crfid.fid = kzalloc(sizeof(struct cifs_fid),
+                                            GFP_KERNEL);
 #ifdef CONFIG_CIFS_STATS
                spin_lock_init(&ret_buf->stat_lock);
 #endif
@@ -136,7 +139,7 @@ tconInfoFree(struct cifs_tcon *buf_to_free)
        atomic_dec(&tconInfoAllocCount);
        kfree(buf_to_free->nativeFileSystem);
        kzfree(buf_to_free->password);
-       kfree(buf_to_free->prfid);
+       kfree(buf_to_free->crfid.fid);
        kfree(buf_to_free);
 }
 
index aff8ce8ba34d55485d1d15aa8b7ea498cf6726f3..646dcd149de1e368baebac10a940a70a095ef479 100644 (file)
@@ -107,6 +107,7 @@ cifs_find_mid(struct TCP_Server_Info *server, char *buffer)
                if (compare_mid(mid->mid, buf) &&
                    mid->mid_state == MID_REQUEST_SUBMITTED &&
                    le16_to_cpu(mid->command) == buf->Command) {
+                       kref_get(&mid->refcount);
                        spin_unlock(&GlobalMid_Lock);
                        return mid;
                }
index 788412675723e85589f78cc6056f2d67edbd5ff1..4ed10dd086e6f31f2816462c8a082ec8939175ae 100644 (file)
@@ -41,7 +41,7 @@ smb2_open_file(const unsigned int xid, struct cifs_open_parms *oparms,
        int rc;
        __le16 *smb2_path;
        struct smb2_file_all_info *smb2_data = NULL;
-       __u8 smb2_oplock[17];
+       __u8 smb2_oplock;
        struct cifs_fid *fid = oparms->fid;
        struct network_resiliency_req nr_ioctl_req;
 
@@ -59,12 +59,9 @@ smb2_open_file(const unsigned int xid, struct cifs_open_parms *oparms,
        }
 
        oparms->desired_access |= FILE_READ_ATTRIBUTES;
-       *smb2_oplock = SMB2_OPLOCK_LEVEL_BATCH;
+       smb2_oplock = SMB2_OPLOCK_LEVEL_BATCH;
 
-       if (oparms->tcon->ses->server->capabilities & SMB2_GLOBAL_CAP_LEASING)
-               memcpy(smb2_oplock + 1, fid->lease_key, SMB2_LEASE_KEY_SIZE);
-
-       rc = SMB2_open(xid, oparms, smb2_path, smb2_oplock, smb2_data, NULL,
+       rc = SMB2_open(xid, oparms, smb2_path, &smb2_oplock, smb2_data, NULL,
                       NULL);
        if (rc)
                goto out;
@@ -101,7 +98,7 @@ smb2_open_file(const unsigned int xid, struct cifs_open_parms *oparms,
                move_smb2_info_to_cifs(buf, smb2_data);
        }
 
-       *oplock = *smb2_oplock;
+       *oplock = smb2_oplock;
 out:
        kfree(smb2_data);
        kfree(smb2_path);
index e2bec47c684580089a70e7914ec71d2f523da3e3..3ff7cec2da81141f67482c57ab03de52aed855ba 100644 (file)
@@ -454,7 +454,8 @@ cifs_convert_path_to_utf16(const char *from, struct cifs_sb_info *cifs_sb)
 #ifdef CONFIG_CIFS_SMB311
        /* SMB311 POSIX extensions paths do not include leading slash */
        else if (cifs_sb_master_tlink(cifs_sb) &&
-                cifs_sb_master_tcon(cifs_sb)->posix_extensions) {
+                cifs_sb_master_tcon(cifs_sb)->posix_extensions &&
+                (from[0] == '/')) {
                start_of_path = from + 1;
        }
 #endif /* 311 */
@@ -492,10 +493,11 @@ cifs_ses_oplock_break(struct work_struct *work)
 {
        struct smb2_lease_break_work *lw = container_of(work,
                                struct smb2_lease_break_work, lease_break);
-       int rc;
+       int rc = 0;
 
        rc = SMB2_lease_break(0, tlink_tcon(lw->tlink), lw->lease_key,
                              lw->lease_state);
+
        cifs_dbg(FYI, "Lease release rc %d\n", rc);
        cifs_put_tlink(lw->tlink);
        kfree(lw);
@@ -561,6 +563,7 @@ smb2_tcon_has_lease(struct cifs_tcon *tcon, struct smb2_lease_break *rsp,
 
                open->oplock = lease_state;
        }
+
        return found;
 }
 
@@ -603,6 +606,18 @@ smb2_is_valid_lease_break(char *buffer)
                                        return true;
                                }
                                spin_unlock(&tcon->open_file_lock);
+
+                               if (tcon->crfid.is_valid &&
+                                   !memcmp(rsp->LeaseKey,
+                                           tcon->crfid.fid->lease_key,
+                                           SMB2_LEASE_KEY_SIZE)) {
+                                       INIT_WORK(&tcon->crfid.lease_break,
+                                                 smb2_cached_lease_break);
+                                       queue_work(cifsiod_wq,
+                                                  &tcon->crfid.lease_break);
+                                       spin_unlock(&cifs_tcp_ses_lock);
+                                       return true;
+                               }
                        }
                }
        }
index b15f5957d64591f0af611670088dd4dd8439fb43..ea92a38b2f08c34f2afd942d5fa933098f04cc07 100644 (file)
@@ -203,6 +203,7 @@ smb2_find_mid(struct TCP_Server_Info *server, char *buf)
                if ((mid->mid == wire_mid) &&
                    (mid->mid_state == MID_REQUEST_SUBMITTED) &&
                    (mid->command == shdr->Command)) {
+                       kref_get(&mid->refcount);
                        spin_unlock(&GlobalMid_Lock);
                        return mid;
                }
@@ -294,34 +295,191 @@ smb2_negotiate_rsize(struct cifs_tcon *tcon, struct smb_vol *volume_info)
        return rsize;
 }
 
-#ifdef CONFIG_CIFS_STATS2
+
+static int
+parse_server_interfaces(struct network_interface_info_ioctl_rsp *buf,
+                       size_t buf_len,
+                       struct cifs_server_iface **iface_list,
+                       size_t *iface_count)
+{
+       struct network_interface_info_ioctl_rsp *p;
+       struct sockaddr_in *addr4;
+       struct sockaddr_in6 *addr6;
+       struct iface_info_ipv4 *p4;
+       struct iface_info_ipv6 *p6;
+       struct cifs_server_iface *info;
+       ssize_t bytes_left;
+       size_t next = 0;
+       int nb_iface = 0;
+       int rc = 0;
+
+       *iface_list = NULL;
+       *iface_count = 0;
+
+       /*
+        * Fist pass: count and sanity check
+        */
+
+       bytes_left = buf_len;
+       p = buf;
+       while (bytes_left >= sizeof(*p)) {
+               nb_iface++;
+               next = le32_to_cpu(p->Next);
+               if (!next) {
+                       bytes_left -= sizeof(*p);
+                       break;
+               }
+               p = (struct network_interface_info_ioctl_rsp *)((u8 *)p+next);
+               bytes_left -= next;
+       }
+
+       if (!nb_iface) {
+               cifs_dbg(VFS, "%s: malformed interface info\n", __func__);
+               rc = -EINVAL;
+               goto out;
+       }
+
+       if (bytes_left || p->Next)
+               cifs_dbg(VFS, "%s: incomplete interface info\n", __func__);
+
+
+       /*
+        * Second pass: extract info to internal structure
+        */
+
+       *iface_list = kcalloc(nb_iface, sizeof(**iface_list), GFP_KERNEL);
+       if (!*iface_list) {
+               rc = -ENOMEM;
+               goto out;
+       }
+
+       info = *iface_list;
+       bytes_left = buf_len;
+       p = buf;
+       while (bytes_left >= sizeof(*p)) {
+               info->speed = le64_to_cpu(p->LinkSpeed);
+               info->rdma_capable = le32_to_cpu(p->Capability & RDMA_CAPABLE);
+               info->rss_capable = le32_to_cpu(p->Capability & RSS_CAPABLE);
+
+               cifs_dbg(FYI, "%s: adding iface %zu\n", __func__, *iface_count);
+               cifs_dbg(FYI, "%s: speed %zu bps\n", __func__, info->speed);
+               cifs_dbg(FYI, "%s: capabilities 0x%08x\n", __func__,
+                        le32_to_cpu(p->Capability));
+
+               switch (p->Family) {
+               /*
+                * The kernel and wire socket structures have the same
+                * layout and use network byte order but make the
+                * conversion explicit in case either one changes.
+                */
+               case INTERNETWORK:
+                       addr4 = (struct sockaddr_in *)&info->sockaddr;
+                       p4 = (struct iface_info_ipv4 *)p->Buffer;
+                       addr4->sin_family = AF_INET;
+                       memcpy(&addr4->sin_addr, &p4->IPv4Address, 4);
+
+                       /* [MS-SMB2] 2.2.32.5.1.1 Clients MUST ignore these */
+                       addr4->sin_port = cpu_to_be16(CIFS_PORT);
+
+                       cifs_dbg(FYI, "%s: ipv4 %pI4\n", __func__,
+                                &addr4->sin_addr);
+                       break;
+               case INTERNETWORKV6:
+                       addr6 = (struct sockaddr_in6 *)&info->sockaddr;
+                       p6 = (struct iface_info_ipv6 *)p->Buffer;
+                       addr6->sin6_family = AF_INET6;
+                       memcpy(&addr6->sin6_addr, &p6->IPv6Address, 16);
+
+                       /* [MS-SMB2] 2.2.32.5.1.2 Clients MUST ignore these */
+                       addr6->sin6_flowinfo = 0;
+                       addr6->sin6_scope_id = 0;
+                       addr6->sin6_port = cpu_to_be16(CIFS_PORT);
+
+                       cifs_dbg(FYI, "%s: ipv6 %pI6\n", __func__,
+                                &addr6->sin6_addr);
+                       break;
+               default:
+                       cifs_dbg(VFS,
+                                "%s: skipping unsupported socket family\n",
+                                __func__);
+                       goto next_iface;
+               }
+
+               (*iface_count)++;
+               info++;
+next_iface:
+               next = le32_to_cpu(p->Next);
+               if (!next)
+                       break;
+               p = (struct network_interface_info_ioctl_rsp *)((u8 *)p+next);
+               bytes_left -= next;
+       }
+
+       if (!*iface_count) {
+               rc = -EINVAL;
+               goto out;
+       }
+
+out:
+       if (rc) {
+               kfree(*iface_list);
+               *iface_count = 0;
+               *iface_list = NULL;
+       }
+       return rc;
+}
+
+
 static int
 SMB3_request_interfaces(const unsigned int xid, struct cifs_tcon *tcon)
 {
        int rc;
        unsigned int ret_data_len = 0;
-       struct network_interface_info_ioctl_rsp *out_buf;
+       struct network_interface_info_ioctl_rsp *out_buf = NULL;
+       struct cifs_server_iface *iface_list;
+       size_t iface_count;
+       struct cifs_ses *ses = tcon->ses;
 
        rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
                        FSCTL_QUERY_NETWORK_INTERFACE_INFO, true /* is_fsctl */,
                        NULL /* no data input */, 0 /* no data input */,
                        (char **)&out_buf, &ret_data_len);
-       if (rc != 0)
+       if (rc != 0) {
                cifs_dbg(VFS, "error %d on ioctl to get interface list\n", rc);
-       else if (ret_data_len < sizeof(struct network_interface_info_ioctl_rsp)) {
-               cifs_dbg(VFS, "server returned bad net interface info buf\n");
-               rc = -EINVAL;
-       } else {
-               /* Dump info on first interface */
-               cifs_dbg(FYI, "Adapter Capability 0x%x\t",
-                       le32_to_cpu(out_buf->Capability));
-               cifs_dbg(FYI, "Link Speed %lld\n",
-                       le64_to_cpu(out_buf->LinkSpeed));
+               goto out;
        }
+
+       rc = parse_server_interfaces(out_buf, ret_data_len,
+                                    &iface_list, &iface_count);
+       if (rc)
+               goto out;
+
+       spin_lock(&ses->iface_lock);
+       kfree(ses->iface_list);
+       ses->iface_list = iface_list;
+       ses->iface_count = iface_count;
+       ses->iface_last_update = jiffies;
+       spin_unlock(&ses->iface_lock);
+
+out:
        kfree(out_buf);
        return rc;
 }
-#endif /* STATS2 */
+
+void
+smb2_cached_lease_break(struct work_struct *work)
+{
+       struct cached_fid *cfid = container_of(work,
+                               struct cached_fid, lease_break);
+       mutex_lock(&cfid->fid_mutex);
+       if (cfid->is_valid) {
+               cifs_dbg(FYI, "clear cached root file handle\n");
+               SMB2_close(0, cfid->tcon, cfid->fid->persistent_fid,
+                          cfid->fid->volatile_fid);
+               cfid->is_valid = false;
+       }
+       mutex_unlock(&cfid->fid_mutex);
+}
 
 /*
  * Open the directory at the root of a share
@@ -331,13 +489,13 @@ int open_shroot(unsigned int xid, struct cifs_tcon *tcon, struct cifs_fid *pfid)
        struct cifs_open_parms oparams;
        int rc;
        __le16 srch_path = 0; /* Null - since an open of top of share */
-       u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
+       u8 oplock = SMB2_OPLOCK_LEVEL_II;
 
-       mutex_lock(&tcon->prfid_mutex);
-       if (tcon->valid_root_fid) {
+       mutex_lock(&tcon->crfid.fid_mutex);
+       if (tcon->crfid.is_valid) {
                cifs_dbg(FYI, "found a cached root file handle\n");
-               memcpy(pfid, tcon->prfid, sizeof(struct cifs_fid));
-               mutex_unlock(&tcon->prfid_mutex);
+               memcpy(pfid, tcon->crfid.fid, sizeof(struct cifs_fid));
+               mutex_unlock(&tcon->crfid.fid_mutex);
                return 0;
        }
 
@@ -350,10 +508,11 @@ int open_shroot(unsigned int xid, struct cifs_tcon *tcon, struct cifs_fid *pfid)
 
        rc = SMB2_open(xid, &oparams, &srch_path, &oplock, NULL, NULL, NULL);
        if (rc == 0) {
-               memcpy(tcon->prfid, pfid, sizeof(struct cifs_fid));
-               tcon->valid_root_fid = true;
+               memcpy(tcon->crfid.fid, pfid, sizeof(struct cifs_fid));
+               tcon->crfid.tcon = tcon;
+               tcon->crfid.is_valid = true;
        }
-       mutex_unlock(&tcon->prfid_mutex);
+       mutex_unlock(&tcon->crfid.fid_mutex);
        return rc;
 }
 
@@ -383,9 +542,7 @@ smb3_qfs_tcon(const unsigned int xid, struct cifs_tcon *tcon)
        if (rc)
                return;
 
-#ifdef CONFIG_CIFS_STATS2
        SMB3_request_interfaces(xid, tcon);
-#endif /* STATS2 */
 
        SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
                        FS_ATTRIBUTE_INFORMATION);
@@ -436,7 +593,7 @@ smb2_is_path_accessible(const unsigned int xid, struct cifs_tcon *tcon,
        struct cifs_open_parms oparms;
        struct cifs_fid fid;
 
-       if ((*full_path == 0) && tcon->valid_root_fid)
+       if ((*full_path == 0) && tcon->crfid.is_valid)
                return 0;
 
        utf16_path = cifs_convert_path_to_utf16(full_path, cifs_sb);
@@ -699,6 +856,8 @@ smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon,
 
        rc = SMB2_set_ea(xid, tcon, fid.persistent_fid, fid.volatile_fid, ea,
                         len);
+       kfree(ea);
+
        SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
 
        return rc;
@@ -2063,8 +2222,7 @@ smb2_create_lease_buf(u8 *lease_key, u8 oplock)
        if (!buf)
                return NULL;
 
-       buf->lcontext.LeaseKeyLow = cpu_to_le64(*((u64 *)lease_key));
-       buf->lcontext.LeaseKeyHigh = cpu_to_le64(*((u64 *)(lease_key + 8)));
+       memcpy(&buf->lcontext.LeaseKey, lease_key, SMB2_LEASE_KEY_SIZE);
        buf->lcontext.LeaseState = map_oplock_to_lease(oplock);
 
        buf->ccontext.DataOffset = cpu_to_le16(offsetof
@@ -2090,8 +2248,7 @@ smb3_create_lease_buf(u8 *lease_key, u8 oplock)
        if (!buf)
                return NULL;
 
-       buf->lcontext.LeaseKeyLow = cpu_to_le64(*((u64 *)lease_key));
-       buf->lcontext.LeaseKeyHigh = cpu_to_le64(*((u64 *)(lease_key + 8)));
+       memcpy(&buf->lcontext.LeaseKey, lease_key, SMB2_LEASE_KEY_SIZE);
        buf->lcontext.LeaseState = map_oplock_to_lease(oplock);
 
        buf->ccontext.DataOffset = cpu_to_le16(offsetof
@@ -2128,8 +2285,7 @@ smb3_parse_lease_buf(void *buf, unsigned int *epoch, char *lease_key)
        if (lc->lcontext.LeaseFlags & SMB2_LEASE_FLAG_BREAK_IN_PROGRESS)
                return SMB2_OPLOCK_LEVEL_NOCHANGE;
        if (lease_key)
-               memcpy(lease_key, &lc->lcontext.LeaseKeyLow,
-                      SMB2_LEASE_KEY_SIZE);
+               memcpy(lease_key, &lc->lcontext.LeaseKey, SMB2_LEASE_KEY_SIZE);
        return le32_to_cpu(lc->lcontext.LeaseState);
 }
 
@@ -2151,7 +2307,7 @@ fill_transform_hdr(struct smb2_transform_hdr *tr_hdr, unsigned int orig_len,
                   struct smb_rqst *old_rq)
 {
        struct smb2_sync_hdr *shdr =
-                       (struct smb2_sync_hdr *)old_rq->rq_iov[1].iov_base;
+                       (struct smb2_sync_hdr *)old_rq->rq_iov[0].iov_base;
 
        memset(tr_hdr, 0, sizeof(struct smb2_transform_hdr));
        tr_hdr->ProtocolId = SMB2_TRANSFORM_PROTO_NUM;
@@ -2171,14 +2327,13 @@ static inline void smb2_sg_set_buf(struct scatterlist *sg, const void *buf,
 }
 
 /* Assumes:
- * rqst->rq_iov[0]  is rfc1002 length
- * rqst->rq_iov[1]  is tranform header
- * rqst->rq_iov[2+] data to be encrypted/decrypted
+ * rqst->rq_iov[0]  is transform header
+ * rqst->rq_iov[1+] data to be encrypted/decrypted
  */
 static struct scatterlist *
 init_sg(struct smb_rqst *rqst, u8 *sign)
 {
-       unsigned int sg_len = rqst->rq_nvec + rqst->rq_npages;
+       unsigned int sg_len = rqst->rq_nvec + rqst->rq_npages + 1;
        unsigned int assoc_data_len = sizeof(struct smb2_transform_hdr) - 20;
        struct scatterlist *sg;
        unsigned int i;
@@ -2189,10 +2344,10 @@ init_sg(struct smb_rqst *rqst, u8 *sign)
                return NULL;
 
        sg_init_table(sg, sg_len);
-       smb2_sg_set_buf(&sg[0], rqst->rq_iov[1].iov_base + 20, assoc_data_len);
-       for (i = 1; i < rqst->rq_nvec - 1; i++)
-               smb2_sg_set_buf(&sg[i], rqst->rq_iov[i+1].iov_base,
-                                               rqst->rq_iov[i+1].iov_len);
+       smb2_sg_set_buf(&sg[0], rqst->rq_iov[0].iov_base + 20, assoc_data_len);
+       for (i = 1; i < rqst->rq_nvec; i++)
+               smb2_sg_set_buf(&sg[i], rqst->rq_iov[i].iov_base,
+                                               rqst->rq_iov[i].iov_len);
        for (j = 0; i < sg_len - 1; i++, j++) {
                unsigned int len, offset;
 
@@ -2224,18 +2379,17 @@ smb2_get_enc_key(struct TCP_Server_Info *server, __u64 ses_id, int enc, u8 *key)
        return 1;
 }
 /*
- * Encrypt or decrypt @rqst message. @rqst has the following format:
- * iov[0] - rfc1002 length
- * iov[1] - transform header (associate data),
- * iov[2-N] and pages - data to encrypt.
- * On success return encrypted data in iov[2-N] and pages, leave iov[0-1]
+ * Encrypt or decrypt @rqst message. @rqst[0] has the following format:
+ * iov[0]   - transform header (associate data),
+ * iov[1-N] - SMB2 header and pages - data to encrypt.
+ * On success return encrypted data in iov[1-N] and pages, leave iov[0]
  * untouched.
  */
 static int
 crypt_message(struct TCP_Server_Info *server, struct smb_rqst *rqst, int enc)
 {
        struct smb2_transform_hdr *tr_hdr =
-                       (struct smb2_transform_hdr *)rqst->rq_iov[1].iov_base;
+                       (struct smb2_transform_hdr *)rqst->rq_iov[0].iov_base;
        unsigned int assoc_data_len = sizeof(struct smb2_transform_hdr) - 20;
        int rc = 0;
        struct scatterlist *sg;
@@ -2323,10 +2477,6 @@ free_req:
        return rc;
 }
 
-/*
- * This is called from smb_send_rqst. At this point we have the rfc1002
- * header as the first element in the vector.
- */
 static int
 smb3_init_transform_rq(struct TCP_Server_Info *server, struct smb_rqst *new_rq,
                       struct smb_rqst *old_rq)
@@ -2335,7 +2485,7 @@ smb3_init_transform_rq(struct TCP_Server_Info *server, struct smb_rqst *new_rq,
        struct page **pages;
        struct smb2_transform_hdr *tr_hdr;
        unsigned int npages = old_rq->rq_npages;
-       unsigned int orig_len = get_rfc1002_length(old_rq->rq_iov[0].iov_base);
+       unsigned int orig_len;
        int i;
        int rc = -ENOMEM;
 
@@ -2355,18 +2505,14 @@ smb3_init_transform_rq(struct TCP_Server_Info *server, struct smb_rqst *new_rq,
                        goto err_free_pages;
        }
 
-       /* Make space for one extra iov to hold the transform header */
        iov = kmalloc_array(old_rq->rq_nvec + 1, sizeof(struct kvec),
                            GFP_KERNEL);
        if (!iov)
                goto err_free_pages;
 
-       /* copy all iovs from the old except the 1st one (rfc1002 length) */
-       memcpy(&iov[2], &old_rq->rq_iov[1],
-                               sizeof(struct kvec) * (old_rq->rq_nvec - 1));
-       /* copy the rfc1002 iov */
-       iov[0].iov_base = old_rq->rq_iov[0].iov_base;
-       iov[0].iov_len  = old_rq->rq_iov[0].iov_len;
+       /* copy all iovs from the old */
+       memcpy(&iov[1], &old_rq->rq_iov[0],
+                               sizeof(struct kvec) * old_rq->rq_nvec);
 
        new_rq->rq_iov = iov;
        new_rq->rq_nvec = old_rq->rq_nvec + 1;
@@ -2375,14 +2521,12 @@ smb3_init_transform_rq(struct TCP_Server_Info *server, struct smb_rqst *new_rq,
        if (!tr_hdr)
                goto err_free_iov;
 
+       orig_len = smb_rqst_len(server, old_rq);
+
        /* fill the 2nd iov with a transform header */
        fill_transform_hdr(tr_hdr, orig_len, old_rq);
-       new_rq->rq_iov[1].iov_base = tr_hdr;
-       new_rq->rq_iov[1].iov_len = sizeof(struct smb2_transform_hdr);
-
-       /* Update rfc1002 header */
-       inc_rfc1001_len(new_rq->rq_iov[0].iov_base,
-                       sizeof(struct smb2_transform_hdr));
+       new_rq->rq_iov[0].iov_base = tr_hdr;
+       new_rq->rq_iov[0].iov_len = sizeof(struct smb2_transform_hdr);
 
        /* copy pages form the old */
        for (i = 0; i < npages; i++) {
@@ -2426,7 +2570,7 @@ smb3_free_transform_rq(struct smb_rqst *rqst)
                put_page(rqst->rq_pages[i]);
        kfree(rqst->rq_pages);
        /* free transform header */
-       kfree(rqst->rq_iov[1].iov_base);
+       kfree(rqst->rq_iov[0].iov_base);
        kfree(rqst->rq_iov);
 }
 
@@ -2443,19 +2587,17 @@ decrypt_raw_data(struct TCP_Server_Info *server, char *buf,
                 unsigned int buf_data_size, struct page **pages,
                 unsigned int npages, unsigned int page_data_size)
 {
-       struct kvec iov[3];
+       struct kvec iov[2];
        struct smb_rqst rqst = {NULL};
        int rc;
 
-       iov[0].iov_base = NULL;
-       iov[0].iov_len = 0;
-       iov[1].iov_base = buf;
-       iov[1].iov_len = sizeof(struct smb2_transform_hdr);
-       iov[2].iov_base = buf + sizeof(struct smb2_transform_hdr);
-       iov[2].iov_len = buf_data_size;
+       iov[0].iov_base = buf;
+       iov[0].iov_len = sizeof(struct smb2_transform_hdr);
+       iov[1].iov_base = buf + sizeof(struct smb2_transform_hdr);
+       iov[1].iov_len = buf_data_size;
 
        rqst.rq_iov = iov;
-       rqst.rq_nvec = 3;
+       rqst.rq_nvec = 2;
        rqst.rq_pages = pages;
        rqst.rq_npages = npages;
        rqst.rq_pagesz = PAGE_SIZE;
@@ -2467,7 +2609,7 @@ decrypt_raw_data(struct TCP_Server_Info *server, char *buf,
        if (rc)
                return rc;
 
-       memmove(buf, iov[2].iov_base, buf_data_size);
+       memmove(buf, iov[1].iov_base, buf_data_size);
 
        server->total_read = buf_data_size + page_data_size;
 
@@ -3170,6 +3312,7 @@ struct smb_version_operations smb311_operations = {
        .set_compression = smb2_set_compression,
        .mkdir = smb2_mkdir,
        .mkdir_setinfo = smb2_mkdir_setinfo,
+       .posix_mkdir = smb311_posix_mkdir,
        .rmdir = smb2_rmdir,
        .unlink = smb2_unlink,
        .rename = smb2_rename_path,
index af032e1a3eac7adaf0570f5923e0ba6164e8ed6b..3c92678cb45bc8fab4ce27cfcbadaef43586a3e9 100644 (file)
@@ -155,7 +155,7 @@ out:
 static int
 smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon)
 {
-       int rc = 0;
+       int rc;
        struct nls_table *nls_codepage;
        struct cifs_ses *ses;
        struct TCP_Server_Info *server;
@@ -166,10 +166,10 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon)
         * for those three - in the calling routine.
         */
        if (tcon == NULL)
-               return rc;
+               return 0;
 
        if (smb2_command == SMB2_TREE_CONNECT)
-               return rc;
+               return 0;
 
        if (tcon->tidStatus == CifsExiting) {
                /*
@@ -212,8 +212,14 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon)
                        return -EAGAIN;
                }
 
-               wait_event_interruptible_timeout(server->response_q,
-                       (server->tcpStatus != CifsNeedReconnect), 10 * HZ);
+               rc = wait_event_interruptible_timeout(server->response_q,
+                                                     (server->tcpStatus != CifsNeedReconnect),
+                                                     10 * HZ);
+               if (rc < 0) {
+                       cifs_dbg(FYI, "%s: aborting reconnect due to a received"
+                                " signal by the process\n", __func__);
+                       return -ERESTARTSYS;
+               }
 
                /* are we still trying to reconnect? */
                if (server->tcpStatus != CifsNeedReconnect)
@@ -231,7 +237,7 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon)
        }
 
        if (!tcon->ses->need_reconnect && !tcon->need_reconnect)
-               return rc;
+               return 0;
 
        nls_codepage = load_nls_default();
 
@@ -340,7 +346,10 @@ smb2_plain_req_init(__le16 smb2_command, struct cifs_tcon *tcon,
                return rc;
 
        /* BB eventually switch this to SMB2 specific small buf size */
-       *request_buf = cifs_small_buf_get();
+       if (smb2_command == SMB2_SET_INFO)
+               *request_buf = cifs_buf_get();
+       else
+               *request_buf = cifs_small_buf_get();
        if (*request_buf == NULL) {
                /* BB should we add a retry in here if not a writepage? */
                return -ENOMEM;
@@ -602,6 +611,7 @@ static void assemble_neg_contexts(struct smb2_negotiate_req *req,
 int
 SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
 {
+       struct smb_rqst rqst;
        struct smb2_negotiate_req *req;
        struct smb2_negotiate_rsp *rsp;
        struct kvec iov[1];
@@ -673,7 +683,11 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
        iov[0].iov_base = (char *)req;
        iov[0].iov_len = total_len;
 
-       rc = smb2_send_recv(xid, ses, iov, 1, &resp_buftype, flags, &rsp_iov);
+       memset(&rqst, 0, sizeof(struct smb_rqst));
+       rqst.rq_iov = iov;
+       rqst.rq_nvec = 1;
+
+       rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
        cifs_small_buf_release(req);
        rsp = (struct smb2_negotiate_rsp *)rsp_iov.iov_base;
        /*
@@ -990,8 +1004,9 @@ SMB2_sess_alloc_buffer(struct SMB2_sess_data *sess_data)
        req->PreviousSessionId = sess_data->previous_session;
 
        req->Flags = 0; /* MBZ */
-       /* to enable echos and oplocks */
-       req->sync_hdr.CreditRequest = cpu_to_le16(3);
+
+       /* enough to enable echos and oplocks and one max size write */
+       req->sync_hdr.CreditRequest = cpu_to_le16(130);
 
        /* only one of SMB2 signing flags may be set in SMB2 request */
        if (server->sign)
@@ -1027,6 +1042,7 @@ static int
 SMB2_sess_sendreceive(struct SMB2_sess_data *sess_data)
 {
        int rc;
+       struct smb_rqst rqst;
        struct smb2_sess_setup_req *req = sess_data->iov[0].iov_base;
        struct kvec rsp_iov = { NULL, 0 };
 
@@ -1035,10 +1051,13 @@ SMB2_sess_sendreceive(struct SMB2_sess_data *sess_data)
                cpu_to_le16(sizeof(struct smb2_sess_setup_req) - 1 /* pad */);
        req->SecurityBufferLength = cpu_to_le16(sess_data->iov[1].iov_len);
 
-       /* BB add code to build os and lm fields */
+       memset(&rqst, 0, sizeof(struct smb_rqst));
+       rqst.rq_iov = sess_data->iov;
+       rqst.rq_nvec = 2;
 
-       rc = smb2_send_recv(sess_data->xid, sess_data->ses,
-                           sess_data->iov, 2,
+       /* BB add code to build os and lm fields */
+       rc = cifs_send_recv(sess_data->xid, sess_data->ses,
+                           &rqst,
                            &sess_data->buf0_type,
                            CIFS_LOG_ERROR | CIFS_NEG_OP, &rsp_iov);
        cifs_small_buf_release(sess_data->iov[0].iov_base);
@@ -1376,6 +1395,7 @@ out:
 int
 SMB2_logoff(const unsigned int xid, struct cifs_ses *ses)
 {
+       struct smb_rqst rqst;
        struct smb2_logoff_req *req; /* response is also trivial struct */
        int rc = 0;
        struct TCP_Server_Info *server;
@@ -1413,7 +1433,11 @@ SMB2_logoff(const unsigned int xid, struct cifs_ses *ses)
        iov[0].iov_base = (char *)req;
        iov[0].iov_len = total_len;
 
-       rc = smb2_send_recv(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
+       memset(&rqst, 0, sizeof(struct smb_rqst));
+       rqst.rq_iov = iov;
+       rqst.rq_nvec = 1;
+
+       rc = cifs_send_recv(xid, ses, &rqst, &resp_buf_type, flags, &rsp_iov);
        cifs_small_buf_release(req);
        /*
         * No tcon so can't do
@@ -1443,6 +1467,7 @@ int
 SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
          struct cifs_tcon *tcon, const struct nls_table *cp)
 {
+       struct smb_rqst rqst;
        struct smb2_tree_connect_req *req;
        struct smb2_tree_connect_rsp *rsp = NULL;
        struct kvec iov[2];
@@ -1499,7 +1524,11 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
            !smb3_encryption_required(tcon))
                req->sync_hdr.Flags |= SMB2_FLAGS_SIGNED;
 
-       rc = smb2_send_recv(xid, ses, iov, 2, &resp_buftype, flags, &rsp_iov);
+       memset(&rqst, 0, sizeof(struct smb_rqst));
+       rqst.rq_iov = iov;
+       rqst.rq_nvec = 2;
+
+       rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
        cifs_small_buf_release(req);
        rsp = (struct smb2_tree_connect_rsp *)rsp_iov.iov_base;
 
@@ -1563,6 +1592,7 @@ tcon_error_exit:
 int
 SMB2_tdis(const unsigned int xid, struct cifs_tcon *tcon)
 {
+       struct smb_rqst rqst;
        struct smb2_tree_disconnect_req *req; /* response is trivial */
        int rc = 0;
        struct cifs_ses *ses = tcon->ses;
@@ -1593,7 +1623,11 @@ SMB2_tdis(const unsigned int xid, struct cifs_tcon *tcon)
        iov[0].iov_base = (char *)req;
        iov[0].iov_len = total_len;
 
-       rc = smb2_send_recv(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
+       memset(&rqst, 0, sizeof(struct smb_rqst));
+       rqst.rq_iov = iov;
+       rqst.rq_nvec = 1;
+
+       rc = cifs_send_recv(xid, ses, &rqst, &resp_buf_type, flags, &rsp_iov);
        cifs_small_buf_release(req);
        if (rc)
                cifs_stats_fail_inc(tcon, SMB2_TREE_DISCONNECT_HE);
@@ -1682,12 +1716,12 @@ parse_lease_state(struct TCP_Server_Info *server, struct smb2_create_rsp *rsp,
 
 static int
 add_lease_context(struct TCP_Server_Info *server, struct kvec *iov,
-                 unsigned int *num_iovec, __u8 *oplock)
+                 unsigned int *num_iovec, u8 *lease_key, __u8 *oplock)
 {
        struct smb2_create_req *req = iov[0].iov_base;
        unsigned int num = *num_iovec;
 
-       iov[num].iov_base = server->ops->create_lease_buf(oplock+1, *oplock);
+       iov[num].iov_base = server->ops->create_lease_buf(lease_key, *oplock);
        if (iov[num].iov_base == NULL)
                return -ENOMEM;
        iov[num].iov_len = server->vals->create_lease_size;
@@ -1886,11 +1920,165 @@ alloc_path_with_tree_prefix(__le16 **out_path, int *out_size, int *out_len,
        return 0;
 }
 
+#ifdef CONFIG_CIFS_SMB311
+int smb311_posix_mkdir(const unsigned int xid, struct inode *inode,
+                              umode_t mode, struct cifs_tcon *tcon,
+                              const char *full_path,
+                              struct cifs_sb_info *cifs_sb)
+{
+       struct smb_rqst rqst;
+       struct smb2_create_req *req;
+       struct smb2_create_rsp *rsp;
+       struct TCP_Server_Info *server;
+       struct cifs_ses *ses = tcon->ses;
+       struct kvec iov[3]; /* make sure at least one for each open context */
+       struct kvec rsp_iov = {NULL, 0};
+       int resp_buftype;
+       int uni_path_len;
+       __le16 *copy_path = NULL;
+       int copy_size;
+       int rc = 0;
+       unsigned int n_iov = 2;
+       __u32 file_attributes = 0;
+       char *pc_buf = NULL;
+       int flags = 0;
+       unsigned int total_len;
+       __le16 *path = cifs_convert_path_to_utf16(full_path, cifs_sb);
+
+       if (!path)
+               return -ENOMEM;
+
+       cifs_dbg(FYI, "mkdir\n");
+
+       if (ses && (ses->server))
+               server = ses->server;
+       else
+               return -EIO;
+
+       rc = smb2_plain_req_init(SMB2_CREATE, tcon, (void **) &req, &total_len);
+
+       if (rc)
+               return rc;
+
+       if (smb3_encryption_required(tcon))
+               flags |= CIFS_TRANSFORM_REQ;
+
+
+       req->ImpersonationLevel = IL_IMPERSONATION;
+       req->DesiredAccess = cpu_to_le32(FILE_WRITE_ATTRIBUTES);
+       /* File attributes ignored on open (used in create though) */
+       req->FileAttributes = cpu_to_le32(file_attributes);
+       req->ShareAccess = FILE_SHARE_ALL_LE;
+       req->CreateDisposition = cpu_to_le32(FILE_CREATE);
+       req->CreateOptions = cpu_to_le32(CREATE_NOT_FILE);
+
+       iov[0].iov_base = (char *)req;
+       /* -1 since last byte is buf[0] which is sent below (path) */
+       iov[0].iov_len = total_len - 1;
+
+       req->NameOffset = cpu_to_le16(sizeof(struct smb2_create_req));
+
+       /* [MS-SMB2] 2.2.13 NameOffset:
+        * If SMB2_FLAGS_DFS_OPERATIONS is set in the Flags field of
+        * the SMB2 header, the file name includes a prefix that will
+        * be processed during DFS name normalization as specified in
+        * section 3.3.5.9. Otherwise, the file name is relative to
+        * the share that is identified by the TreeId in the SMB2
+        * header.
+        */
+       if (tcon->share_flags & SHI1005_FLAGS_DFS) {
+               int name_len;
+
+               req->sync_hdr.Flags |= SMB2_FLAGS_DFS_OPERATIONS;
+               rc = alloc_path_with_tree_prefix(&copy_path, &copy_size,
+                                                &name_len,
+                                                tcon->treeName, path);
+               if (rc) {
+                       cifs_small_buf_release(req);
+                       return rc;
+               }
+               req->NameLength = cpu_to_le16(name_len * 2);
+               uni_path_len = copy_size;
+               path = copy_path;
+       } else {
+               uni_path_len = (2 * UniStrnlen((wchar_t *)path, PATH_MAX)) + 2;
+               /* MUST set path len (NameLength) to 0 opening root of share */
+               req->NameLength = cpu_to_le16(uni_path_len - 2);
+               if (uni_path_len % 8 != 0) {
+                       copy_size = roundup(uni_path_len, 8);
+                       copy_path = kzalloc(copy_size, GFP_KERNEL);
+                       if (!copy_path) {
+                               cifs_small_buf_release(req);
+                               return -ENOMEM;
+                       }
+                       memcpy((char *)copy_path, (const char *)path,
+                              uni_path_len);
+                       uni_path_len = copy_size;
+                       path = copy_path;
+               }
+       }
+
+       iov[1].iov_len = uni_path_len;
+       iov[1].iov_base = path;
+       req->RequestedOplockLevel = SMB2_OPLOCK_LEVEL_NONE;
+
+       if (tcon->posix_extensions) {
+               if (n_iov > 2) {
+                       struct create_context *ccontext =
+                           (struct create_context *)iov[n_iov-1].iov_base;
+                       ccontext->Next =
+                               cpu_to_le32(iov[n_iov-1].iov_len);
+               }
+
+               rc = add_posix_context(iov, &n_iov, mode);
+               if (rc) {
+                       cifs_small_buf_release(req);
+                       kfree(copy_path);
+                       return rc;
+               }
+               pc_buf = iov[n_iov-1].iov_base;
+       }
+
+
+       memset(&rqst, 0, sizeof(struct smb_rqst));
+       rqst.rq_iov = iov;
+       rqst.rq_nvec = n_iov;
+
+       rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags,
+                           &rsp_iov);
+
+       cifs_small_buf_release(req);
+       rsp = (struct smb2_create_rsp *)rsp_iov.iov_base;
+
+       if (rc != 0) {
+               cifs_stats_fail_inc(tcon, SMB2_CREATE_HE);
+               trace_smb3_posix_mkdir_err(xid, tcon->tid, ses->Suid,
+                                   CREATE_NOT_FILE, FILE_WRITE_ATTRIBUTES, rc);
+               goto smb311_mkdir_exit;
+       } else
+               trace_smb3_posix_mkdir_done(xid, rsp->PersistentFileId, tcon->tid,
+                                    ses->Suid, CREATE_NOT_FILE,
+                                    FILE_WRITE_ATTRIBUTES);
+
+       SMB2_close(xid, tcon, rsp->PersistentFileId, rsp->VolatileFileId);
+
+       /* Eventually save off posix specific response info and timestaps */
+
+smb311_mkdir_exit:
+       kfree(copy_path);
+       kfree(pc_buf);
+       free_rsp_buf(resp_buftype, rsp);
+       return rc;
+
+}
+#endif /* SMB311 */
+
 int
 SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
          __u8 *oplock, struct smb2_file_all_info *buf,
          struct kvec *err_iov, int *buftype)
 {
+       struct smb_rqst rqst;
        struct smb2_create_req *req;
        struct smb2_create_rsp *rsp;
        struct TCP_Server_Info *server;
@@ -1993,7 +2181,8 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
            *oplock == SMB2_OPLOCK_LEVEL_NONE)
                req->RequestedOplockLevel = *oplock;
        else {
-               rc = add_lease_context(server, iov, &n_iov, oplock);
+               rc = add_lease_context(server, iov, &n_iov,
+                                      oparms->fid->lease_key, oplock);
                if (rc) {
                        cifs_small_buf_release(req);
                        kfree(copy_path);
@@ -2043,7 +2232,11 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
        }
 #endif /* SMB311 */
 
-       rc = smb2_send_recv(xid, ses, iov, n_iov, &resp_buftype, flags,
+       memset(&rqst, 0, sizeof(struct smb_rqst));
+       rqst.rq_iov = iov;
+       rqst.rq_nvec = n_iov;
+
+       rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags,
                            &rsp_iov);
        cifs_small_buf_release(req);
        rsp = (struct smb2_create_rsp *)rsp_iov.iov_base;
@@ -2099,6 +2292,7 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
           char *in_data, u32 indatalen,
           char **out_data, u32 *plen /* returned data len */)
 {
+       struct smb_rqst rqst;
        struct smb2_ioctl_req *req;
        struct smb2_ioctl_rsp *rsp;
        struct cifs_ses *ses;
@@ -2189,7 +2383,11 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
        if (opcode == FSCTL_VALIDATE_NEGOTIATE_INFO)
                req->sync_hdr.Flags |= SMB2_FLAGS_SIGNED;
 
-       rc = smb2_send_recv(xid, ses, iov, n_iov, &resp_buftype, flags,
+       memset(&rqst, 0, sizeof(struct smb_rqst));
+       rqst.rq_iov = iov;
+       rqst.rq_nvec = n_iov;
+
+       rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags,
                            &rsp_iov);
        cifs_small_buf_release(req);
        rsp = (struct smb2_ioctl_rsp *)rsp_iov.iov_base;
@@ -2274,6 +2472,7 @@ int
 SMB2_close_flags(const unsigned int xid, struct cifs_tcon *tcon,
                 u64 persistent_fid, u64 volatile_fid, int flags)
 {
+       struct smb_rqst rqst;
        struct smb2_close_req *req;
        struct smb2_close_rsp *rsp;
        struct cifs_ses *ses = tcon->ses;
@@ -2301,7 +2500,11 @@ SMB2_close_flags(const unsigned int xid, struct cifs_tcon *tcon,
        iov[0].iov_base = (char *)req;
        iov[0].iov_len = total_len;
 
-       rc = smb2_send_recv(xid, ses, iov, 1, &resp_buftype, flags, &rsp_iov);
+       memset(&rqst, 0, sizeof(struct smb_rqst));
+       rqst.rq_iov = iov;
+       rqst.rq_nvec = 1;
+
+       rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
        cifs_small_buf_release(req);
        rsp = (struct smb2_close_rsp *)rsp_iov.iov_base;
 
@@ -2387,6 +2590,7 @@ query_info(const unsigned int xid, struct cifs_tcon *tcon,
           u32 additional_info, size_t output_len, size_t min_len, void **data,
                u32 *dlen)
 {
+       struct smb_rqst rqst;
        struct smb2_query_info_req *req;
        struct smb2_query_info_rsp *rsp = NULL;
        struct kvec iov[2];
@@ -2427,7 +2631,11 @@ query_info(const unsigned int xid, struct cifs_tcon *tcon,
        /* 1 for Buffer */
        iov[0].iov_len = total_len - 1;
 
-       rc = smb2_send_recv(xid, ses, iov, 1, &resp_buftype, flags, &rsp_iov);
+       memset(&rqst, 0, sizeof(struct smb_rqst));
+       rqst.rq_iov = iov;
+       rqst.rq_nvec = 1;
+
+       rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
        cifs_small_buf_release(req);
        rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base;
 
@@ -2594,11 +2802,10 @@ SMB2_echo(struct TCP_Server_Info *server)
 {
        struct smb2_echo_req *req;
        int rc = 0;
-       struct kvec iov[2];
+       struct kvec iov[1];
        struct smb_rqst rqst = { .rq_iov = iov,
-                                .rq_nvec = 2 };
+                                .rq_nvec = 1 };
        unsigned int total_len;
-       __be32 rfc1002_marker;
 
        cifs_dbg(FYI, "In echo request\n");
 
@@ -2614,11 +2821,8 @@ SMB2_echo(struct TCP_Server_Info *server)
 
        req->sync_hdr.CreditRequest = cpu_to_le16(1);
 
-       iov[0].iov_len = 4;
-       rfc1002_marker = cpu_to_be32(total_len);
-       iov[0].iov_base = &rfc1002_marker;
-       iov[1].iov_len = total_len;
-       iov[1].iov_base = (char *)req;
+       iov[0].iov_len = total_len;
+       iov[0].iov_base = (char *)req;
 
        rc = cifs_call_async(server, &rqst, NULL, smb2_echo_callback, NULL,
                             server, CIFS_ECHO_OP);
@@ -2633,6 +2837,7 @@ int
 SMB2_flush(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
           u64 volatile_fid)
 {
+       struct smb_rqst rqst;
        struct smb2_flush_req *req;
        struct cifs_ses *ses = tcon->ses;
        struct kvec iov[1];
@@ -2660,7 +2865,11 @@ SMB2_flush(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
        iov[0].iov_base = (char *)req;
        iov[0].iov_len = total_len;
 
-       rc = smb2_send_recv(xid, ses, iov, 1, &resp_buftype, flags, &rsp_iov);
+       memset(&rqst, 0, sizeof(struct smb_rqst));
+       rqst.rq_iov = iov;
+       rqst.rq_nvec = 1;
+
+       rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
        cifs_small_buf_release(req);
 
        if (rc != 0) {
@@ -2848,10 +3057,9 @@ smb2_async_readv(struct cifs_readdata *rdata)
        struct smb2_sync_hdr *shdr;
        struct cifs_io_parms io_parms;
        struct smb_rqst rqst = { .rq_iov = rdata->iov,
-                                .rq_nvec = 2 };
+                                .rq_nvec = 1 };
        struct TCP_Server_Info *server;
        unsigned int total_len;
-       __be32 req_len;
 
        cifs_dbg(FYI, "%s: offset=%llu bytes=%u\n",
                 __func__, rdata->offset, rdata->bytes);
@@ -2882,12 +3090,8 @@ smb2_async_readv(struct cifs_readdata *rdata)
        if (smb3_encryption_required(io_parms.tcon))
                flags |= CIFS_TRANSFORM_REQ;
 
-       req_len = cpu_to_be32(total_len);
-
-       rdata->iov[0].iov_base = &req_len;
-       rdata->iov[0].iov_len = sizeof(__be32);
-       rdata->iov[1].iov_base = buf;
-       rdata->iov[1].iov_len = total_len;
+       rdata->iov[0].iov_base = buf;
+       rdata->iov[0].iov_len = total_len;
 
        shdr = (struct smb2_sync_hdr *)buf;
 
@@ -2926,6 +3130,7 @@ int
 SMB2_read(const unsigned int xid, struct cifs_io_parms *io_parms,
          unsigned int *nbytes, char **buf, int *buf_type)
 {
+       struct smb_rqst rqst;
        int resp_buftype, rc = -EACCES;
        struct smb2_read_plain_req *req = NULL;
        struct smb2_read_rsp *rsp = NULL;
@@ -2946,7 +3151,11 @@ SMB2_read(const unsigned int xid, struct cifs_io_parms *io_parms,
        iov[0].iov_base = (char *)req;
        iov[0].iov_len = total_len;
 
-       rc = smb2_send_recv(xid, ses, iov, 1, &resp_buftype, flags, &rsp_iov);
+       memset(&rqst, 0, sizeof(struct smb_rqst));
+       rqst.rq_iov = iov;
+       rqst.rq_nvec = 1;
+
+       rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
        cifs_small_buf_release(req);
 
        rsp = (struct smb2_read_rsp *)rsp_iov.iov_base;
@@ -3062,10 +3271,9 @@ smb2_async_writev(struct cifs_writedata *wdata,
        struct smb2_sync_hdr *shdr;
        struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink);
        struct TCP_Server_Info *server = tcon->ses->server;
-       struct kvec iov[2];
+       struct kvec iov[1];
        struct smb_rqst rqst = { };
        unsigned int total_len;
-       __be32 rfc1002_marker;
 
        rc = smb2_plain_req_init(SMB2_WRITE, tcon, (void **) &req, &total_len);
        if (rc) {
@@ -3137,15 +3345,11 @@ smb2_async_writev(struct cifs_writedata *wdata,
                v1->length = cpu_to_le32(wdata->mr->mr->length);
        }
 #endif
-       /* 4 for rfc1002 length field and 1 for Buffer */
-       iov[0].iov_len = 4;
-       rfc1002_marker = cpu_to_be32(total_len - 1 + wdata->bytes);
-       iov[0].iov_base = &rfc1002_marker;
-       iov[1].iov_len = total_len - 1;
-       iov[1].iov_base = (char *)req;
+       iov[0].iov_len = total_len - 1;
+       iov[0].iov_base = (char *)req;
 
        rqst.rq_iov = iov;
-       rqst.rq_nvec = 2;
+       rqst.rq_nvec = 1;
        rqst.rq_pages = wdata->pages;
        rqst.rq_offset = wdata->page_offset;
        rqst.rq_npages = wdata->nr_pages;
@@ -3153,7 +3357,7 @@ smb2_async_writev(struct cifs_writedata *wdata,
        rqst.rq_tailsz = wdata->tailsz;
 #ifdef CONFIG_CIFS_SMB_DIRECT
        if (wdata->mr) {
-               iov[1].iov_len += sizeof(struct smbd_buffer_descriptor_v1);
+               iov[0].iov_len += sizeof(struct smbd_buffer_descriptor_v1);
                rqst.rq_npages = 0;
        }
 #endif
@@ -3210,6 +3414,7 @@ int
 SMB2_write(const unsigned int xid, struct cifs_io_parms *io_parms,
           unsigned int *nbytes, struct kvec *iov, int n_vec)
 {
+       struct smb_rqst rqst;
        int rc = 0;
        struct smb2_write_req *req = NULL;
        struct smb2_write_rsp *rsp = NULL;
@@ -3251,7 +3456,11 @@ SMB2_write(const unsigned int xid, struct cifs_io_parms *io_parms,
        /* 1 for Buffer */
        iov[0].iov_len = total_len - 1;
 
-       rc = smb2_send_recv(xid, io_parms->tcon->ses, iov, n_vec + 1,
+       memset(&rqst, 0, sizeof(struct smb_rqst));
+       rqst.rq_iov = iov;
+       rqst.rq_nvec = n_vec + 1;
+
+       rc = cifs_send_recv(xid, io_parms->tcon->ses, &rqst,
                            &resp_buftype, flags, &rsp_iov);
        cifs_small_buf_release(req);
        rsp = (struct smb2_write_rsp *)rsp_iov.iov_base;
@@ -3323,6 +3532,7 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
                     u64 persistent_fid, u64 volatile_fid, int index,
                     struct cifs_search_info *srch_inf)
 {
+       struct smb_rqst rqst;
        struct smb2_query_directory_req *req;
        struct smb2_query_directory_rsp *rsp = NULL;
        struct kvec iov[2];
@@ -3395,7 +3605,11 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
        iov[1].iov_base = (char *)(req->Buffer);
        iov[1].iov_len = len;
 
-       rc = smb2_send_recv(xid, ses, iov, 2, &resp_buftype, flags, &rsp_iov);
+       memset(&rqst, 0, sizeof(struct smb_rqst));
+       rqst.rq_iov = iov;
+       rqst.rq_nvec = 2;
+
+       rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
        cifs_small_buf_release(req);
        rsp = (struct smb2_query_directory_rsp *)rsp_iov.iov_base;
 
@@ -3454,6 +3668,7 @@ send_set_info(const unsigned int xid, struct cifs_tcon *tcon,
               u8 info_type, u32 additional_info, unsigned int num,
                void **data, unsigned int *size)
 {
+       struct smb_rqst rqst;
        struct smb2_set_info_req *req;
        struct smb2_set_info_rsp *rsp = NULL;
        struct kvec *iov;
@@ -3509,9 +3724,13 @@ send_set_info(const unsigned int xid, struct cifs_tcon *tcon,
                iov[i].iov_len = size[i];
        }
 
-       rc = smb2_send_recv(xid, ses, iov, num, &resp_buftype, flags,
+       memset(&rqst, 0, sizeof(struct smb_rqst));
+       rqst.rq_iov = iov;
+       rqst.rq_nvec = num;
+
+       rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags,
                            &rsp_iov);
-       cifs_small_buf_release(req);
+       cifs_buf_release(req);
        rsp = (struct smb2_set_info_rsp *)rsp_iov.iov_base;
 
        if (rc != 0) {
@@ -3664,6 +3883,7 @@ SMB2_oplock_break(const unsigned int xid, struct cifs_tcon *tcon,
                  const u64 persistent_fid, const u64 volatile_fid,
                  __u8 oplock_level)
 {
+       struct smb_rqst rqst;
        int rc;
        struct smb2_oplock_break *req = NULL;
        struct cifs_ses *ses = tcon->ses;
@@ -3692,7 +3912,11 @@ SMB2_oplock_break(const unsigned int xid, struct cifs_tcon *tcon,
        iov[0].iov_base = (char *)req;
        iov[0].iov_len = total_len;
 
-       rc = smb2_send_recv(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
+       memset(&rqst, 0, sizeof(struct smb_rqst));
+       rqst.rq_iov = iov;
+       rqst.rq_nvec = 1;
+
+       rc = cifs_send_recv(xid, ses, &rqst, &resp_buf_type, flags, &rsp_iov);
        cifs_small_buf_release(req);
 
        if (rc) {
@@ -3755,6 +3979,7 @@ int
 SMB2_QFS_info(const unsigned int xid, struct cifs_tcon *tcon,
              u64 persistent_fid, u64 volatile_fid, struct kstatfs *fsdata)
 {
+       struct smb_rqst rqst;
        struct smb2_query_info_rsp *rsp = NULL;
        struct kvec iov;
        struct kvec rsp_iov;
@@ -3773,7 +3998,11 @@ SMB2_QFS_info(const unsigned int xid, struct cifs_tcon *tcon,
        if (smb3_encryption_required(tcon))
                flags |= CIFS_TRANSFORM_REQ;
 
-       rc = smb2_send_recv(xid, ses, &iov, 1, &resp_buftype, flags, &rsp_iov);
+       memset(&rqst, 0, sizeof(struct smb_rqst));
+       rqst.rq_iov = &iov;
+       rqst.rq_nvec = 1;
+
+       rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
        cifs_small_buf_release(iov.iov_base);
        if (rc) {
                cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE);
@@ -3798,6 +4027,7 @@ int
 SMB2_QFS_attr(const unsigned int xid, struct cifs_tcon *tcon,
              u64 persistent_fid, u64 volatile_fid, int level)
 {
+       struct smb_rqst rqst;
        struct smb2_query_info_rsp *rsp = NULL;
        struct kvec iov;
        struct kvec rsp_iov;
@@ -3829,7 +4059,11 @@ SMB2_QFS_attr(const unsigned int xid, struct cifs_tcon *tcon,
        if (smb3_encryption_required(tcon))
                flags |= CIFS_TRANSFORM_REQ;
 
-       rc = smb2_send_recv(xid, ses, &iov, 1, &resp_buftype, flags, &rsp_iov);
+       memset(&rqst, 0, sizeof(struct smb_rqst));
+       rqst.rq_iov = &iov;
+       rqst.rq_nvec = 1;
+
+       rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
        cifs_small_buf_release(iov.iov_base);
        if (rc) {
                cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE);
@@ -3868,6 +4102,7 @@ smb2_lockv(const unsigned int xid, struct cifs_tcon *tcon,
           const __u64 persist_fid, const __u64 volatile_fid, const __u32 pid,
           const __u32 num_lock, struct smb2_lock_element *buf)
 {
+       struct smb_rqst rqst;
        int rc = 0;
        struct smb2_lock_req *req = NULL;
        struct kvec iov[2];
@@ -3900,7 +4135,12 @@ smb2_lockv(const unsigned int xid, struct cifs_tcon *tcon,
        iov[1].iov_len = count;
 
        cifs_stats_inc(&tcon->stats.cifs_stats.num_locks);
-       rc = smb2_send_recv(xid, tcon->ses, iov, 2, &resp_buf_type, flags,
+
+       memset(&rqst, 0, sizeof(struct smb_rqst));
+       rqst.rq_iov = iov;
+       rqst.rq_nvec = 2;
+
+       rc = cifs_send_recv(xid, tcon->ses, &rqst, &resp_buf_type, flags,
                            &rsp_iov);
        cifs_small_buf_release(req);
        if (rc) {
@@ -3934,6 +4174,7 @@ int
 SMB2_lease_break(const unsigned int xid, struct cifs_tcon *tcon,
                 __u8 *lease_key, const __le32 lease_state)
 {
+       struct smb_rqst rqst;
        int rc;
        struct smb2_lease_ack *req = NULL;
        struct cifs_ses *ses = tcon->ses;
@@ -3964,7 +4205,11 @@ SMB2_lease_break(const unsigned int xid, struct cifs_tcon *tcon,
        iov[0].iov_base = (char *)req;
        iov[0].iov_len = total_len;
 
-       rc = smb2_send_recv(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
+       memset(&rqst, 0, sizeof(struct smb_rqst));
+       rqst.rq_iov = iov;
+       rqst.rq_nvec = 1;
+
+       rc = cifs_send_recv(xid, ses, &rqst, &resp_buf_type, flags, &rsp_iov);
        cifs_small_buf_release(req);
 
        if (rc) {
index a345560001ced354c550d6ab2f507a18d72ff9d2..a671adcc44a6c8c6d460585c9b2c8d6b546fc015 100644 (file)
@@ -678,16 +678,14 @@ struct create_context {
 #define SMB2_LEASE_KEY_SIZE 16
 
 struct lease_context {
-       __le64 LeaseKeyLow;
-       __le64 LeaseKeyHigh;
+       u8 LeaseKey[SMB2_LEASE_KEY_SIZE];
        __le32 LeaseState;
        __le32 LeaseFlags;
        __le64 LeaseDuration;
 } __packed;
 
 struct lease_context_v2 {
-       __le64 LeaseKeyLow;
-       __le64 LeaseKeyHigh;
+       u8 LeaseKey[SMB2_LEASE_KEY_SIZE];
        __le32 LeaseState;
        __le32 LeaseFlags;
        __le64 LeaseDuration;
@@ -851,8 +849,11 @@ struct validate_negotiate_info_rsp {
        __le16 Dialect; /* Dialect in use for the connection */
 } __packed;
 
-#define RSS_CAPABLE    0x00000001
-#define RDMA_CAPABLE   0x00000002
+#define RSS_CAPABLE    cpu_to_le32(0x00000001)
+#define RDMA_CAPABLE   cpu_to_le32(0x00000002)
+
+#define INTERNETWORK   cpu_to_le16(0x0002)
+#define INTERNETWORKV6 cpu_to_le16(0x0017)
 
 struct network_interface_info_ioctl_rsp {
        __le32 Next; /* next interface. zero if this is last one */
@@ -860,7 +861,21 @@ struct network_interface_info_ioctl_rsp {
        __le32 Capability; /* RSS or RDMA Capable */
        __le32 Reserved;
        __le64 LinkSpeed;
-       char    SockAddr_Storage[128];
+       __le16 Family;
+       __u8 Buffer[126];
+} __packed;
+
+struct iface_info_ipv4 {
+       __be16 Port;
+       __be32 IPv4Address;
+       __be64 Reserved;
+} __packed;
+
+struct iface_info_ipv6 {
+       __be16 Port;
+       __be32 FlowInfo;
+       __u8   IPv6Address[16];
+       __be32 ScopeId;
 } __packed;
 
 #define NO_FILE_ID 0xFFFFFFFFFFFFFFFFULL /* general ioctls to srv not to file */
index c84020057bd816c31a69fd173746374c5c224c8a..6e6a4f2ec890dc0f0ae02b53c9326ae379b02bf7 100644 (file)
@@ -79,6 +79,10 @@ extern int smb2_set_path_size(const unsigned int xid, struct cifs_tcon *tcon,
                              struct cifs_sb_info *cifs_sb, bool set_alloc);
 extern int smb2_set_file_info(struct inode *inode, const char *full_path,
                              FILE_BASIC_INFO *buf, const unsigned int xid);
+extern int smb311_posix_mkdir(const unsigned int xid, struct inode *inode,
+                              umode_t mode, struct cifs_tcon *tcon,
+                              const char *full_path,
+                              struct cifs_sb_info *cifs_sb);
 extern int smb2_mkdir(const unsigned int xid, struct cifs_tcon *tcon,
                      const char *name, struct cifs_sb_info *cifs_sb);
 extern void smb2_mkdir_setinfo(struct inode *inode, const char *full_path,
@@ -109,6 +113,8 @@ extern int smb2_unlock_range(struct cifsFileInfo *cfile,
 extern int smb2_push_mandatory_locks(struct cifsFileInfo *cfile);
 extern void smb2_reconnect_server(struct work_struct *work);
 extern int smb3_crypto_aead_allocate(struct TCP_Server_Info *server);
+extern unsigned long smb_rqst_len(struct TCP_Server_Info *server,
+                                 struct smb_rqst *rqst);
 
 /*
  * SMB2 Worker functions - most of protocol specific implementation details
index 349d5ccf854c26999ed8554f6d19cf64a89a33a3..719d55e63d88fe9efc307d16813ffe3f7b9d6762 100644 (file)
@@ -171,10 +171,10 @@ smb2_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server)
        unsigned char smb2_signature[SMB2_HMACSHA256_SIZE];
        unsigned char *sigptr = smb2_signature;
        struct kvec *iov = rqst->rq_iov;
-       int iov_hdr_index = rqst->rq_nvec > 1 ? 1 : 0;
-       struct smb2_sync_hdr *shdr =
-               (struct smb2_sync_hdr *)iov[iov_hdr_index].iov_base;
+       struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)iov[0].iov_base;
        struct cifs_ses *ses;
+       struct shash_desc *shash = &server->secmech.sdeschmacsha256->shash;
+       struct smb_rqst drqst;
 
        ses = smb2_find_smb_ses(server, shdr->SessionId);
        if (!ses) {
@@ -192,21 +192,39 @@ smb2_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server)
        }
 
        rc = crypto_shash_setkey(server->secmech.hmacsha256,
-               ses->auth_key.response, SMB2_NTLMV2_SESSKEY_SIZE);
+                                ses->auth_key.response, SMB2_NTLMV2_SESSKEY_SIZE);
        if (rc) {
                cifs_dbg(VFS, "%s: Could not update with response\n", __func__);
                return rc;
        }
 
-       rc = crypto_shash_init(&server->secmech.sdeschmacsha256->shash);
+       rc = crypto_shash_init(shash);
        if (rc) {
                cifs_dbg(VFS, "%s: Could not init sha256", __func__);
                return rc;
        }
 
-       rc = __cifs_calc_signature(rqst, iov_hdr_index,  server, sigptr,
-               &server->secmech.sdeschmacsha256->shash);
+       /*
+        * For SMB2+, __cifs_calc_signature() expects to sign only the actual
+        * data, that is, iov[0] should not contain a rfc1002 length.
+        *
+        * Sign the rfc1002 length prior to passing the data (iov[1-N]) down to
+        * __cifs_calc_signature().
+        */
+       drqst = *rqst;
+       if (drqst.rq_nvec >= 2 && iov[0].iov_len == 4) {
+               rc = crypto_shash_update(shash, iov[0].iov_base,
+                                        iov[0].iov_len);
+               if (rc) {
+                       cifs_dbg(VFS, "%s: Could not update with payload\n",
+                                __func__);
+                       return rc;
+               }
+               drqst.rq_iov++;
+               drqst.rq_nvec--;
+       }
 
+       rc = __cifs_calc_signature(&drqst, server, sigptr, shash);
        if (!rc)
                memcpy(shdr->Signature, sigptr, SMB2_SIGNATURE_SIZE);
 
@@ -410,14 +428,14 @@ generate_smb311signingkey(struct cifs_ses *ses)
 int
 smb3_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server)
 {
-       int rc = 0;
+       int rc;
        unsigned char smb3_signature[SMB2_CMACAES_SIZE];
        unsigned char *sigptr = smb3_signature;
        struct kvec *iov = rqst->rq_iov;
-       int iov_hdr_index = rqst->rq_nvec > 1 ? 1 : 0;
-       struct smb2_sync_hdr *shdr =
-               (struct smb2_sync_hdr *)iov[iov_hdr_index].iov_base;
+       struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)iov[0].iov_base;
        struct cifs_ses *ses;
+       struct shash_desc *shash = &server->secmech.sdesccmacaes->shash;
+       struct smb_rqst drqst;
 
        ses = smb2_find_smb_ses(server, shdr->SessionId);
        if (!ses) {
@@ -429,8 +447,7 @@ smb3_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server)
        memset(shdr->Signature, 0x0, SMB2_SIGNATURE_SIZE);
 
        rc = crypto_shash_setkey(server->secmech.cmacaes,
-               ses->smb3signingkey, SMB2_CMACAES_SIZE);
-
+                                ses->smb3signingkey, SMB2_CMACAES_SIZE);
        if (rc) {
                cifs_dbg(VFS, "%s: Could not set key for cmac aes\n", __func__);
                return rc;
@@ -441,15 +458,33 @@ smb3_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server)
         * so unlike smb2 case we do not have to check here if secmech are
         * initialized
         */
-       rc = crypto_shash_init(&server->secmech.sdesccmacaes->shash);
+       rc = crypto_shash_init(shash);
        if (rc) {
                cifs_dbg(VFS, "%s: Could not init cmac aes\n", __func__);
                return rc;
        }
 
-       rc = __cifs_calc_signature(rqst, iov_hdr_index, server, sigptr,
-                                  &server->secmech.sdesccmacaes->shash);
+       /*
+        * For SMB2+, __cifs_calc_signature() expects to sign only the actual
+        * data, that is, iov[0] should not contain a rfc1002 length.
+        *
+        * Sign the rfc1002 length prior to passing the data (iov[1-N]) down to
+        * __cifs_calc_signature().
+        */
+       drqst = *rqst;
+       if (drqst.rq_nvec >= 2 && iov[0].iov_len == 4) {
+               rc = crypto_shash_update(shash, iov[0].iov_base,
+                                        iov[0].iov_len);
+               if (rc) {
+                       cifs_dbg(VFS, "%s: Could not update with payload\n",
+                                __func__);
+                       return rc;
+               }
+               drqst.rq_iov++;
+               drqst.rq_nvec--;
+       }
 
+       rc = __cifs_calc_signature(&drqst, server, sigptr, shash);
        if (!rc)
                memcpy(shdr->Signature, sigptr, SMB2_SIGNATURE_SIZE);
 
@@ -462,7 +497,7 @@ smb2_sign_rqst(struct smb_rqst *rqst, struct TCP_Server_Info *server)
 {
        int rc = 0;
        struct smb2_sync_hdr *shdr =
-                       (struct smb2_sync_hdr *)rqst->rq_iov[1].iov_base;
+                       (struct smb2_sync_hdr *)rqst->rq_iov[0].iov_base;
 
        if (!(shdr->Flags & SMB2_FLAGS_SIGNED) ||
            server->tcpStatus == CifsNeedNegotiate)
@@ -552,6 +587,7 @@ smb2_mid_entry_alloc(const struct smb2_sync_hdr *shdr,
 
        temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
        memset(temp, 0, sizeof(struct mid_q_entry));
+       kref_init(&temp->refcount);
        temp->mid = le64_to_cpu(shdr->MessageId);
        temp->pid = current->pid;
        temp->command = shdr->Command; /* Always LE */
@@ -635,7 +671,7 @@ smb2_setup_request(struct cifs_ses *ses, struct smb_rqst *rqst)
 {
        int rc;
        struct smb2_sync_hdr *shdr =
-                       (struct smb2_sync_hdr *)rqst->rq_iov[1].iov_base;
+                       (struct smb2_sync_hdr *)rqst->rq_iov[0].iov_base;
        struct mid_q_entry *mid;
 
        smb2_seq_num_into_buf(ses->server, shdr);
@@ -656,7 +692,7 @@ smb2_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
 {
        int rc;
        struct smb2_sync_hdr *shdr =
-                       (struct smb2_sync_hdr *)rqst->rq_iov[1].iov_base;
+                       (struct smb2_sync_hdr *)rqst->rq_iov[0].iov_base;
        struct mid_q_entry *mid;
 
        smb2_seq_num_into_buf(server, shdr);
index e459c97151b34e684dc3f3cbbc36772fee5aaee5..c55ea4e6201bbf08041968e483ae26d0183a5f3a 100644 (file)
@@ -18,6 +18,7 @@
 #include "smbdirect.h"
 #include "cifs_debug.h"
 #include "cifsproto.h"
+#include "smb2proto.h"
 
 static struct smbd_response *get_empty_queue_buffer(
                struct smbd_connection *info);
@@ -2082,12 +2083,13 @@ int smbd_recv(struct smbd_connection *info, struct msghdr *msg)
  * rqst: the data to write
  * return value: 0 if successfully write, otherwise error code
  */
-int smbd_send(struct smbd_connection *info, struct smb_rqst *rqst)
+int smbd_send(struct TCP_Server_Info *server, struct smb_rqst *rqst)
 {
+       struct smbd_connection *info = server->smbd_conn;
        struct kvec vec;
        int nvecs;
        int size;
-       unsigned int buflen = 0, remaining_data_length;
+       unsigned int buflen, remaining_data_length;
        int start, i, j;
        int max_iov_size =
                info->max_send_size - sizeof(struct smbd_data_transfer);
@@ -2111,25 +2113,13 @@ int smbd_send(struct smbd_connection *info, struct smb_rqst *rqst)
                log_write(ERR, "expected the pdu length in 1st iov, but got %zu\n", rqst->rq_iov[0].iov_len);
                return -EINVAL;
        }
-       iov = &rqst->rq_iov[1];
-
-       /* total up iov array first */
-       for (i = 0; i < rqst->rq_nvec-1; i++) {
-               buflen += iov[i].iov_len;
-       }
 
        /*
         * Add in the page array if there is one. The caller needs to set
         * rq_tailsz to PAGE_SIZE when the buffer has multiple pages and
         * ends at page boundary
         */
-       if (rqst->rq_npages) {
-               if (rqst->rq_npages == 1)
-                       buflen += rqst->rq_tailsz;
-               else
-                       buflen += rqst->rq_pagesz * (rqst->rq_npages - 1) -
-                                       rqst->rq_offset + rqst->rq_tailsz;
-       }
+       buflen = smb_rqst_len(server, rqst);
 
        if (buflen + sizeof(struct smbd_data_transfer) >
                info->max_fragmented_send_size) {
@@ -2139,6 +2129,8 @@ int smbd_send(struct smbd_connection *info, struct smb_rqst *rqst)
                goto done;
        }
 
+       iov = &rqst->rq_iov[1];
+
        cifs_dbg(FYI, "Sending smb (RDMA): smb_len=%u\n", buflen);
        for (i = 0; i < rqst->rq_nvec-1; i++)
                dump_smb(iov[i].iov_base, iov[i].iov_len);
index 1e419c21dc60527c753747bee44625cafdc7ca3d..a11096254f2965d02478132af55e9ccf6613c578 100644 (file)
@@ -292,7 +292,7 @@ void smbd_destroy(struct smbd_connection *info);
 
 /* Interface for carrying upper layer I/O through send/recv */
 int smbd_recv(struct smbd_connection *info, struct msghdr *msg);
-int smbd_send(struct smbd_connection *info, struct smb_rqst *rqst);
+int smbd_send(struct TCP_Server_Info *server, struct smb_rqst *rqst);
 
 enum mr_state {
        MR_READY,
@@ -332,7 +332,7 @@ static inline void *smbd_get_connection(
 static inline int smbd_reconnect(struct TCP_Server_Info *server) {return -1; }
 static inline void smbd_destroy(struct smbd_connection *info) {}
 static inline int smbd_recv(struct smbd_connection *info, struct msghdr *msg) {return -1; }
-static inline int smbd_send(struct smbd_connection *info, struct smb_rqst *rqst) {return -1; }
+static inline int smbd_send(struct TCP_Server_Info *server, struct smb_rqst *rqst) {return -1; }
 #endif
 
 #endif
index 61e74d455d90625339591a3b47560f5bdb50c343..67e413f6ee4d8fd1dbd1eede0a7b0a9e6442a9e3 100644 (file)
@@ -378,7 +378,7 @@ DEFINE_EVENT(smb3_open_err_class, smb3_##name,    \
        TP_ARGS(xid, tid, sesid, create_options, desired_access, rc))
 
 DEFINE_SMB3_OPEN_ERR_EVENT(open_err);
-
+DEFINE_SMB3_OPEN_ERR_EVENT(posix_mkdir_err);
 
 DECLARE_EVENT_CLASS(smb3_open_done_class,
        TP_PROTO(unsigned int xid,
@@ -420,6 +420,7 @@ DEFINE_EVENT(smb3_open_done_class, smb3_##name,  \
        TP_ARGS(xid, fid, tid, sesid, create_options, desired_access))
 
 DEFINE_SMB3_OPEN_DONE_EVENT(open_done);
+DEFINE_SMB3_OPEN_DONE_EVENT(posix_mkdir_done);
 
 #endif /* _CIFS_TRACE_H */
 
index 1f1a68f8911001bae86976171e44a09402982d92..a341ec839c83de8ba9b9a10bb31f3b7ce8d45e8f 100644 (file)
@@ -61,6 +61,7 @@ AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
 
        temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
        memset(temp, 0, sizeof(struct mid_q_entry));
+       kref_init(&temp->refcount);
        temp->mid = get_mid(smb_buffer);
        temp->pid = current->pid;
        temp->command = cpu_to_le16(smb_buffer->Command);
@@ -82,6 +83,21 @@ AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
        return temp;
 }
 
+static void _cifs_mid_q_entry_release(struct kref *refcount)
+{
+       struct mid_q_entry *mid = container_of(refcount, struct mid_q_entry,
+                                              refcount);
+
+       mempool_free(mid, cifs_mid_poolp);
+}
+
+void cifs_mid_q_entry_release(struct mid_q_entry *midEntry)
+{
+       spin_lock(&GlobalMid_Lock);
+       kref_put(&midEntry->refcount, _cifs_mid_q_entry_release);
+       spin_unlock(&GlobalMid_Lock);
+}
+
 void
 DeleteMidQEntry(struct mid_q_entry *midEntry)
 {
@@ -110,7 +126,7 @@ DeleteMidQEntry(struct mid_q_entry *midEntry)
                }
        }
 #endif
-       mempool_free(midEntry, cifs_mid_poolp);
+       cifs_mid_q_entry_release(midEntry);
 }
 
 void
@@ -201,15 +217,25 @@ smb_send_kvec(struct TCP_Server_Info *server, struct msghdr *smb_msg,
        return 0;
 }
 
-static unsigned long
-rqst_len(struct smb_rqst *rqst)
+unsigned long
+smb_rqst_len(struct TCP_Server_Info *server, struct smb_rqst *rqst)
 {
        unsigned int i;
-       struct kvec *iov = rqst->rq_iov;
+       struct kvec *iov;
+       int nvec;
        unsigned long buflen = 0;
 
+       if (server->vals->header_preamble_size == 0 &&
+           rqst->rq_nvec >= 2 && rqst->rq_iov[0].iov_len == 4) {
+               iov = &rqst->rq_iov[1];
+               nvec = rqst->rq_nvec - 1;
+       } else {
+               iov = rqst->rq_iov;
+               nvec = rqst->rq_nvec;
+       }
+
        /* total up iov array first */
-       for (i = 0; i < rqst->rq_nvec; i++)
+       for (i = 0; i < nvec; i++)
                buflen += iov[i].iov_len;
 
        /*
@@ -236,70 +262,88 @@ rqst_len(struct smb_rqst *rqst)
 }
 
 static int
-__smb_send_rqst(struct TCP_Server_Info *server, struct smb_rqst *rqst)
+__smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
+               struct smb_rqst *rqst)
 {
-       int rc;
-       struct kvec *iov = rqst->rq_iov;
-       int n_vec = rqst->rq_nvec;
-       unsigned int smb_buf_length = get_rfc1002_length(iov[0].iov_base);
-       unsigned long send_length;
-       unsigned int i;
+       int rc = 0;
+       struct kvec *iov;
+       int n_vec;
+       unsigned int send_length = 0;
+       unsigned int i, j;
        size_t total_len = 0, sent, size;
        struct socket *ssocket = server->ssocket;
        struct msghdr smb_msg;
        int val = 1;
+       __be32 rfc1002_marker;
+
        if (cifs_rdma_enabled(server) && server->smbd_conn) {
-               rc = smbd_send(server->smbd_conn, rqst);
+               rc = smbd_send(server, rqst);
                goto smbd_done;
        }
        if (ssocket == NULL)
                return -ENOTSOCK;
 
-       /* sanity check send length */
-       send_length = rqst_len(rqst);
-       if (send_length != smb_buf_length + 4) {
-               WARN(1, "Send length mismatch(send_length=%lu smb_buf_length=%u)\n",
-                       send_length, smb_buf_length);
-               return -EIO;
-       }
-
-       if (n_vec < 2)
-               return -EIO;
-
-       cifs_dbg(FYI, "Sending smb: smb_len=%u\n", smb_buf_length);
-       dump_smb(iov[0].iov_base, iov[0].iov_len);
-       dump_smb(iov[1].iov_base, iov[1].iov_len);
-
        /* cork the socket */
        kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
                                (char *)&val, sizeof(val));
 
-       size = 0;
-       for (i = 0; i < n_vec; i++)
-               size += iov[i].iov_len;
+       for (j = 0; j < num_rqst; j++)
+               send_length += smb_rqst_len(server, &rqst[j]);
+       rfc1002_marker = cpu_to_be32(send_length);
 
-       iov_iter_kvec(&smb_msg.msg_iter, WRITE | ITER_KVEC, iov, n_vec, size);
+       /* Generate a rfc1002 marker for SMB2+ */
+       if (server->vals->header_preamble_size == 0) {
+               struct kvec hiov = {
+                       .iov_base = &rfc1002_marker,
+                       .iov_len  = 4
+               };
+               iov_iter_kvec(&smb_msg.msg_iter, WRITE | ITER_KVEC, &hiov,
+                             1, 4);
+               rc = smb_send_kvec(server, &smb_msg, &sent);
+               if (rc < 0)
+                       goto uncork;
 
-       rc = smb_send_kvec(server, &smb_msg, &sent);
-       if (rc < 0)
-               goto uncork;
+               total_len += sent;
+               send_length += 4;
+       }
 
-       total_len += sent;
+       cifs_dbg(FYI, "Sending smb: smb_len=%u\n", send_length);
 
-       /* now walk the page array and send each page in it */
-       for (i = 0; i < rqst->rq_npages; i++) {
-               struct bio_vec bvec;
+       for (j = 0; j < num_rqst; j++) {
+               iov = rqst[j].rq_iov;
+               n_vec = rqst[j].rq_nvec;
+
+               size = 0;
+               for (i = 0; i < n_vec; i++) {
+                       dump_smb(iov[i].iov_base, iov[i].iov_len);
+                       size += iov[i].iov_len;
+               }
 
-               bvec.bv_page = rqst->rq_pages[i];
-               rqst_page_get_length(rqst, i, &bvec.bv_len, &bvec.bv_offset);
+               iov_iter_kvec(&smb_msg.msg_iter, WRITE | ITER_KVEC,
+                             iov, n_vec, size);
 
-               iov_iter_bvec(&smb_msg.msg_iter, WRITE | ITER_BVEC,
-                             &bvec, 1, bvec.bv_len);
                rc = smb_send_kvec(server, &smb_msg, &sent);
                if (rc < 0)
-                       break;
+                       goto uncork;
 
                total_len += sent;
+
+               /* now walk the page array and send each page in it */
+               for (i = 0; i < rqst[j].rq_npages; i++) {
+                       struct bio_vec bvec;
+
+                       bvec.bv_page = rqst[j].rq_pages[i];
+                       rqst_page_get_length(&rqst[j], i, &bvec.bv_len,
+                                            &bvec.bv_offset);
+
+                       iov_iter_bvec(&smb_msg.msg_iter, WRITE | ITER_BVEC,
+                                     &bvec, 1, bvec.bv_len);
+                       rc = smb_send_kvec(server, &smb_msg, &sent);
+                       if (rc < 0)
+                               break;
+
+                       total_len += sent;
+               }
        }
 
 uncork:
@@ -308,9 +352,9 @@ uncork:
        kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
                                (char *)&val, sizeof(val));
 
-       if ((total_len > 0) && (total_len != smb_buf_length + 4)) {
+       if ((total_len > 0) && (total_len != send_length)) {
                cifs_dbg(FYI, "partial send (wanted=%u sent=%zu): terminating session\n",
-                        smb_buf_length + 4, total_len);
+                        send_length, total_len);
                /*
                 * If we have only sent part of an SMB then the next SMB could
                 * be taken as the remainder of this one. We need to kill the
@@ -335,7 +379,7 @@ smb_send_rqst(struct TCP_Server_Info *server, struct smb_rqst *rqst, int flags)
        int rc;
 
        if (!(flags & CIFS_TRANSFORM_REQ))
-               return __smb_send_rqst(server, rqst);
+               return __smb_send_rqst(server, 1, rqst);
 
        if (!server->ops->init_transform_rq ||
            !server->ops->free_transform_rq) {
@@ -347,7 +391,7 @@ smb_send_rqst(struct TCP_Server_Info *server, struct smb_rqst *rqst, int flags)
        if (rc)
                return rc;
 
-       rc = __smb_send_rqst(server, &cur_rqst);
+       rc = __smb_send_rqst(server, 1, &cur_rqst);
        server->ops->free_transform_rq(&cur_rqst);
        return rc;
 }
@@ -365,7 +409,7 @@ smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
        iov[1].iov_base = (char *)smb_buffer + 4;
        iov[1].iov_len = smb_buf_length;
 
-       return __smb_send_rqst(server, &rqst);
+       return __smb_send_rqst(server, 1, &rqst);
 }
 
 static int
@@ -730,7 +774,6 @@ cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
         * to the same server. We may make this configurable later or
         * use ses->maxReq.
         */
-
        rc = wait_for_free_request(ses->server, timeout, optype);
        if (rc)
                return rc;
@@ -766,8 +809,8 @@ cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
 
 #ifdef CONFIG_CIFS_SMB311
        if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP))
-               smb311_update_preauth_hash(ses, rqst->rq_iov+1,
-                                          rqst->rq_nvec-1);
+               smb311_update_preauth_hash(ses, rqst->rq_iov,
+                                          rqst->rq_nvec);
 #endif
 
        if (timeout == CIFS_ASYNC_OP)
@@ -812,8 +855,8 @@ cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
 #ifdef CONFIG_CIFS_SMB311
        if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP)) {
                struct kvec iov = {
-                       .iov_base = buf,
-                       .iov_len = midQ->resp_buf_size
+                       .iov_base = resp_iov->iov_base,
+                       .iov_len = resp_iov->iov_len
                };
                smb311_update_preauth_hash(ses, &iov, 1);
        }
@@ -872,49 +915,6 @@ SendReceive2(const unsigned int xid, struct cifs_ses *ses,
        return rc;
 }
 
-/* Like SendReceive2 but iov[0] does not contain an rfc1002 header */
-int
-smb2_send_recv(const unsigned int xid, struct cifs_ses *ses,
-              struct kvec *iov, int n_vec, int *resp_buf_type /* ret */,
-              const int flags, struct kvec *resp_iov)
-{
-       struct smb_rqst rqst;
-       struct kvec s_iov[CIFS_MAX_IOV_SIZE], *new_iov;
-       int rc;
-       int i;
-       __u32 count;
-       __be32 rfc1002_marker;
-
-       if (n_vec + 1 > CIFS_MAX_IOV_SIZE) {
-               new_iov = kmalloc_array(n_vec + 1, sizeof(struct kvec),
-                                       GFP_KERNEL);
-               if (!new_iov)
-                       return -ENOMEM;
-       } else
-               new_iov = s_iov;
-
-       /* 1st iov is an RFC1002 Session Message length */
-       memcpy(new_iov + 1, iov, (sizeof(struct kvec) * n_vec));
-
-       count = 0;
-       for (i = 1; i < n_vec + 1; i++)
-               count += new_iov[i].iov_len;
-
-       rfc1002_marker = cpu_to_be32(count);
-
-       new_iov[0].iov_base = &rfc1002_marker;
-       new_iov[0].iov_len = 4;
-
-       memset(&rqst, 0, sizeof(struct smb_rqst));
-       rqst.rq_iov = new_iov;
-       rqst.rq_nvec = n_vec + 1;
-
-       rc = cifs_send_recv(xid, ses, &rqst, resp_buf_type, flags, resp_iov);
-       if (n_vec + 1 > CIFS_MAX_IOV_SIZE)
-               kfree(new_iov);
-       return rc;
-}
-
 int
 SendReceive(const unsigned int xid, struct cifs_ses *ses,
            struct smb_hdr *in_buf, struct smb_hdr *out_buf,
index 0e8e5de3c48a56e24a70b0185932644ba92b93b0..34226c20d196cd029eab8df25d7ffc32a48a29fd 100644 (file)
@@ -358,14 +358,11 @@ static void dentry_unlink_inode(struct dentry * dentry)
        __releases(dentry->d_inode->i_lock)
 {
        struct inode *inode = dentry->d_inode;
-       bool hashed = !d_unhashed(dentry);
 
-       if (hashed)
-               raw_write_seqcount_begin(&dentry->d_seq);
+       raw_write_seqcount_begin(&dentry->d_seq);
        __d_clear_type_and_inode(dentry);
        hlist_del_init(&dentry->d_u.d_alias);
-       if (hashed)
-               raw_write_seqcount_end(&dentry->d_seq);
+       raw_write_seqcount_end(&dentry->d_seq);
        spin_unlock(&dentry->d_lock);
        spin_unlock(&inode->i_lock);
        if (!inode->i_nlink)
@@ -1892,50 +1889,25 @@ void d_instantiate_new(struct dentry *entry, struct inode *inode)
        spin_lock(&inode->i_lock);
        __d_instantiate(entry, inode);
        WARN_ON(!(inode->i_state & I_NEW));
-       inode->i_state &= ~I_NEW;
+       inode->i_state &= ~I_NEW & ~I_CREATING;
        smp_mb();
        wake_up_bit(&inode->i_state, __I_NEW);
        spin_unlock(&inode->i_lock);
 }
 EXPORT_SYMBOL(d_instantiate_new);
 
-/**
- * d_instantiate_no_diralias - instantiate a non-aliased dentry
- * @entry: dentry to complete
- * @inode: inode to attach to this dentry
- *
- * Fill in inode information in the entry.  If a directory alias is found, then
- * return an error (and drop inode).  Together with d_materialise_unique() this
- * guarantees that a directory inode may never have more than one alias.
- */
-int d_instantiate_no_diralias(struct dentry *entry, struct inode *inode)
-{
-       BUG_ON(!hlist_unhashed(&entry->d_u.d_alias));
-
-       security_d_instantiate(entry, inode);
-       spin_lock(&inode->i_lock);
-       if (S_ISDIR(inode->i_mode) && !hlist_empty(&inode->i_dentry)) {
-               spin_unlock(&inode->i_lock);
-               iput(inode);
-               return -EBUSY;
-       }
-       __d_instantiate(entry, inode);
-       spin_unlock(&inode->i_lock);
-
-       return 0;
-}
-EXPORT_SYMBOL(d_instantiate_no_diralias);
-
 struct dentry *d_make_root(struct inode *root_inode)
 {
        struct dentry *res = NULL;
 
        if (root_inode) {
                res = d_alloc_anon(root_inode->i_sb);
-               if (res)
+               if (res) {
+                       res->d_flags |= DCACHE_RCUACCESS;
                        d_instantiate(res, root_inode);
-               else
+               } else {
                        iput(root_inode);
+               }
        }
        return res;
 }
index 71fccccf317e8849580457680d6bd6ac3376c170..8c6ab6c95727ef219a9cb02845f1bb3e7046ad4d 100644 (file)
@@ -86,7 +86,9 @@ static int efivarfs_create(struct inode *dir, struct dentry *dentry,
        /* length of the variable name itself: remove GUID and separator */
        namelen = dentry->d_name.len - EFI_VARIABLE_GUID_LEN - 1;
 
-       uuid_le_to_bin(dentry->d_name.name + namelen + 1, &var->var.VendorGuid);
+       err = guid_parse(dentry->d_name.name + namelen + 1, &var->var.VendorGuid);
+       if (err)
+               goto out;
 
        if (efivar_variable_is_removable(var->var.VendorGuid,
                                         dentry->d_name.name, namelen))
index ceb1031f1cac948e74a970f02058cfeb52d7a351..08d3bd602f73d8f219ee1f259c0cbaa839245c56 100644 (file)
@@ -101,20 +101,14 @@ static int eventfd_release(struct inode *inode, struct file *file)
        return 0;
 }
 
-static struct wait_queue_head *
-eventfd_get_poll_head(struct file *file, __poll_t events)
-{
-       struct eventfd_ctx *ctx = file->private_data;
-
-       return &ctx->wqh;
-}
-
-static __poll_t eventfd_poll_mask(struct file *file, __poll_t eventmask)
+static __poll_t eventfd_poll(struct file *file, poll_table *wait)
 {
        struct eventfd_ctx *ctx = file->private_data;
        __poll_t events = 0;
        u64 count;
 
+       poll_wait(file, &ctx->wqh, wait);
+
        /*
         * All writes to ctx->count occur within ctx->wqh.lock.  This read
         * can be done outside ctx->wqh.lock because we know that poll_wait
@@ -156,11 +150,11 @@ static __poll_t eventfd_poll_mask(struct file *file, __poll_t eventmask)
        count = READ_ONCE(ctx->count);
 
        if (count > 0)
-               events |= (EPOLLIN & eventmask);
+               events |= EPOLLIN;
        if (count == ULLONG_MAX)
                events |= EPOLLERR;
        if (ULLONG_MAX - 1 > count)
-               events |= (EPOLLOUT & eventmask);
+               events |= EPOLLOUT;
 
        return events;
 }
@@ -311,8 +305,7 @@ static const struct file_operations eventfd_fops = {
        .show_fdinfo    = eventfd_show_fdinfo,
 #endif
        .release        = eventfd_release,
-       .get_poll_head  = eventfd_get_poll_head,
-       .poll_mask      = eventfd_poll_mask,
+       .poll           = eventfd_poll,
        .read           = eventfd_read,
        .write          = eventfd_write,
        .llseek         = noop_llseek,
index ea4436f409fb005a16edeca3f49f29f955db0171..67db22fe99c5ce8bf0ba606c0a45f221cbf69b38 100644 (file)
@@ -922,18 +922,14 @@ static __poll_t ep_read_events_proc(struct eventpoll *ep, struct list_head *head
        return 0;
 }
 
-static struct wait_queue_head *ep_eventpoll_get_poll_head(struct file *file,
-               __poll_t eventmask)
-{
-       struct eventpoll *ep = file->private_data;
-       return &ep->poll_wait;
-}
-
-static __poll_t ep_eventpoll_poll_mask(struct file *file, __poll_t eventmask)
+static __poll_t ep_eventpoll_poll(struct file *file, poll_table *wait)
 {
        struct eventpoll *ep = file->private_data;
        int depth = 0;
 
+       /* Insert inside our poll wait queue */
+       poll_wait(file, &ep->poll_wait, wait);
+
        /*
         * Proceed to find out if wanted events are really available inside
         * the ready list.
@@ -972,8 +968,7 @@ static const struct file_operations eventpoll_fops = {
        .show_fdinfo    = ep_show_fdinfo,
 #endif
        .release        = ep_eventpoll_release,
-       .get_poll_head  = ep_eventpoll_get_poll_head,
-       .poll_mask      = ep_eventpoll_poll_mask,
+       .poll           = ep_eventpoll_poll,
        .llseek         = noop_llseek,
 };
 
index 2d4e0075bd2457c83f9109d5a29365de61658840..bdd0eacefdf575b1351b5d00bf5d7d1bb05b50a1 100644 (file)
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -290,15 +290,15 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
        struct vm_area_struct *vma = NULL;
        struct mm_struct *mm = bprm->mm;
 
-       bprm->vma = vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
+       bprm->vma = vma = vm_area_alloc(mm);
        if (!vma)
                return -ENOMEM;
+       vma_set_anonymous(vma);
 
        if (down_write_killable(&mm->mmap_sem)) {
                err = -EINTR;
                goto err_free;
        }
-       vma->vm_mm = mm;
 
        /*
         * Place the stack at the largest stack address the architecture
@@ -311,7 +311,6 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
        vma->vm_start = vma->vm_end - PAGE_SIZE;
        vma->vm_flags = VM_SOFTDIRTY | VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
        vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
-       INIT_LIST_HEAD(&vma->anon_vma_chain);
 
        err = insert_vm_struct(mm, vma);
        if (err)
@@ -326,7 +325,7 @@ err:
        up_write(&mm->mmap_sem);
 err_free:
        bprm->vma = NULL;
-       kmem_cache_free(vm_area_cachep, vma);
+       vm_area_free(vma);
        return err;
 }
 
index cc40802ddfa856d14aefc8ef75ec9e61b89864b0..00e759f051619cfd37a58108265bc9f798554a21 100644 (file)
@@ -748,7 +748,6 @@ extern void ext2_free_blocks (struct inode *, unsigned long,
                              unsigned long);
 extern unsigned long ext2_count_free_blocks (struct super_block *);
 extern unsigned long ext2_count_dirs (struct super_block *);
-extern void ext2_check_blocks_bitmap (struct super_block *);
 extern struct ext2_group_desc * ext2_get_group_desc(struct super_block * sb,
                                                    unsigned int block_group,
                                                    struct buffer_head ** bh);
@@ -771,7 +770,6 @@ extern void ext2_set_link(struct inode *, struct ext2_dir_entry_2 *, struct page
 extern struct inode * ext2_new_inode (struct inode *, umode_t, const struct qstr *);
 extern void ext2_free_inode (struct inode *);
 extern unsigned long ext2_count_free_inodes (struct super_block *);
-extern void ext2_check_inodes_bitmap (struct super_block *);
 extern unsigned long ext2_count_free (struct buffer_head *, unsigned);
 
 /* inode.c */
index 6484199b35d1ec1bb63879593e5bb20f0af2bb77..5c3d7b7e49755ccbe22f65b607e8197d854df091 100644 (file)
@@ -611,8 +611,7 @@ fail_drop:
        dquot_drop(inode);
        inode->i_flags |= S_NOQUOTA;
        clear_nlink(inode);
-       unlock_new_inode(inode);
-       iput(inode);
+       discard_new_inode(inode);
        return ERR_PTR(err);
 
 fail:
index 152453a9187763a7173c2d1c41f33714c5287db9..0c26dcc5d85014d57c6f73d153af9314c69e9ca9 100644 (file)
@@ -45,8 +45,7 @@ static inline int ext2_add_nondir(struct dentry *dentry, struct inode *inode)
                return 0;
        }
        inode_dec_link_count(inode);
-       unlock_new_inode(inode);
-       iput(inode);
+       discard_new_inode(inode);
        return err;
 }
 
@@ -192,8 +191,7 @@ out:
 
 out_fail:
        inode_dec_link_count(inode);
-       unlock_new_inode(inode);
-       iput (inode);
+       discard_new_inode(inode);
        goto out;
 }
 
@@ -261,8 +259,7 @@ out:
 out_fail:
        inode_dec_link_count(inode);
        inode_dec_link_count(inode);
-       unlock_new_inode(inode);
-       iput(inode);
+       discard_new_inode(inode);
 out_dir:
        inode_dec_link_count(dir);
        goto out;
index 25ab1274090f8532254e783def084bccd24a21c4..8ff53f8da3bcc414fdad44ac3bb76a88258e4d51 100644 (file)
@@ -557,6 +557,9 @@ static int parse_options(char *options, struct super_block *sb,
                        set_opt (opts->s_mount_opt, NO_UID32);
                        break;
                case Opt_nocheck:
+                       ext2_msg(sb, KERN_WARNING,
+                               "Option nocheck/check=none is deprecated and"
+                               " will be removed in June 2020.");
                        clear_opt (opts->s_mount_opt, CHECK);
                        break;
                case Opt_debug:
@@ -1335,9 +1338,6 @@ static int ext2_remount (struct super_block * sb, int * flags, char * data)
        new_opts.s_resgid = sbi->s_resgid;
        spin_unlock(&sbi->s_lock);
 
-       /*
-        * Allow the "check" option to be passed as a remount option.
-        */
        if (!parse_options(data, sb, &new_opts))
                return -EINVAL;
 
index b00481c475cb1ea63195ef970bde30af613364c6..aa52d87985aaf30901a52ac6a605357ac6cbea12 100644 (file)
@@ -184,7 +184,6 @@ static int ext4_init_block_bitmap(struct super_block *sb,
        unsigned int bit, bit_max;
        struct ext4_sb_info *sbi = EXT4_SB(sb);
        ext4_fsblk_t start, tmp;
-       int flex_bg = 0;
 
        J_ASSERT_BH(bh, buffer_locked(bh));
 
@@ -207,22 +206,19 @@ static int ext4_init_block_bitmap(struct super_block *sb,
 
        start = ext4_group_first_block_no(sb, block_group);
 
-       if (ext4_has_feature_flex_bg(sb))
-               flex_bg = 1;
-
        /* Set bits for block and inode bitmaps, and inode table */
        tmp = ext4_block_bitmap(sb, gdp);
-       if (!flex_bg || ext4_block_in_group(sb, tmp, block_group))
+       if (ext4_block_in_group(sb, tmp, block_group))
                ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data);
 
        tmp = ext4_inode_bitmap(sb, gdp);
-       if (!flex_bg || ext4_block_in_group(sb, tmp, block_group))
+       if (ext4_block_in_group(sb, tmp, block_group))
                ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data);
 
        tmp = ext4_inode_table(sb, gdp);
        for (; tmp < ext4_inode_table(sb, gdp) +
                     sbi->s_itb_per_group; tmp++) {
-               if (!flex_bg || ext4_block_in_group(sb, tmp, block_group))
+               if (ext4_block_in_group(sb, tmp, block_group))
                        ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data);
        }
 
@@ -372,6 +368,8 @@ static int ext4_validate_block_bitmap(struct super_block *sb,
                return -EFSCORRUPTED;
 
        ext4_lock_group(sb, block_group);
+       if (buffer_verified(bh))
+               goto verified;
        if (unlikely(!ext4_block_bitmap_csum_verify(sb, block_group,
                        desc, bh))) {
                ext4_unlock_group(sb, block_group);
@@ -390,6 +388,7 @@ static int ext4_validate_block_bitmap(struct super_block *sb,
                return -EFSCORRUPTED;
        }
        set_buffer_verified(bh);
+verified:
        ext4_unlock_group(sb, block_group);
        return 0;
 }
@@ -442,7 +441,16 @@ ext4_read_block_bitmap_nowait(struct super_block *sb, ext4_group_t block_group)
                goto verify;
        }
        ext4_lock_group(sb, block_group);
-       if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
+       if (ext4_has_group_desc_csum(sb) &&
+           (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) {
+               if (block_group == 0) {
+                       ext4_unlock_group(sb, block_group);
+                       unlock_buffer(bh);
+                       ext4_error(sb, "Block bitmap for bg 0 marked "
+                                  "uninitialized");
+                       err = -EFSCORRUPTED;
+                       goto out;
+               }
                err = ext4_init_block_bitmap(sb, bh, block_group, desc);
                set_bitmap_uptodate(bh);
                set_buffer_uptodate(bh);
index 0b127853c5845aef5bcfeaa9ec2485f47d7939fb..7c7123f265c25ae9a586877dc30d7b80ede5b62c 100644 (file)
@@ -1114,6 +1114,7 @@ struct ext4_inode_info {
 #define EXT4_MOUNT_DIOREAD_NOLOCK      0x400000 /* Enable support for dio read nolocking */
 #define EXT4_MOUNT_JOURNAL_CHECKSUM    0x800000 /* Journal checksums */
 #define EXT4_MOUNT_JOURNAL_ASYNC_COMMIT        0x1000000 /* Journal Async Commit */
+#define EXT4_MOUNT_WARN_ON_ERROR       0x2000000 /* Trigger WARN_ON on error */
 #define EXT4_MOUNT_DELALLOC            0x8000000 /* Delalloc support */
 #define EXT4_MOUNT_DATA_ERR_ABORT      0x10000000 /* Abort on file data write */
 #define EXT4_MOUNT_BLOCK_VALIDITY      0x20000000 /* Block validity checking */
@@ -1507,11 +1508,6 @@ static inline struct ext4_inode_info *EXT4_I(struct inode *inode)
 static inline int ext4_valid_inum(struct super_block *sb, unsigned long ino)
 {
        return ino == EXT4_ROOT_INO ||
-               ino == EXT4_USR_QUOTA_INO ||
-               ino == EXT4_GRP_QUOTA_INO ||
-               ino == EXT4_BOOT_LOADER_INO ||
-               ino == EXT4_JOURNAL_INO ||
-               ino == EXT4_RESIZE_INO ||
                (ino >= EXT4_FIRST_INO(sb) &&
                 ino <= le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count));
 }
@@ -3018,9 +3014,6 @@ extern int ext4_inline_data_fiemap(struct inode *inode,
 struct iomap;
 extern int ext4_inline_data_iomap(struct inode *inode, struct iomap *iomap);
 
-extern int ext4_try_to_evict_inline_data(handle_t *handle,
-                                        struct inode *inode,
-                                        int needed);
 extern int ext4_inline_data_truncate(struct inode *inode, int *has_inline);
 
 extern int ext4_convert_inline_data(struct inode *inode);
index 98fb0c119c6827dd50b86ac3521f216bba41170c..adf6668b596f9e20aab3878b791009bfa5f051a0 100644 (file)
@@ -91,6 +91,7 @@ struct ext4_extent_header {
 };
 
 #define EXT4_EXT_MAGIC         cpu_to_le16(0xf30a)
+#define EXT4_MAX_EXTENT_DEPTH 5
 
 #define EXT4_EXTENT_TAIL_OFFSET(hdr) \
        (sizeof(struct ext4_extent_header) + \
index 0057fe3f248d195736ee58ec40131dadd98d59bb..8ce6fd5b10dd331a9cd86fb41e15ba84095c75e7 100644 (file)
@@ -869,6 +869,12 @@ ext4_find_extent(struct inode *inode, ext4_lblk_t block,
 
        eh = ext_inode_hdr(inode);
        depth = ext_depth(inode);
+       if (depth < 0 || depth > EXT4_MAX_EXTENT_DEPTH) {
+               EXT4_ERROR_INODE(inode, "inode has invalid extent depth: %d",
+                                depth);
+               ret = -EFSCORRUPTED;
+               goto err;
+       }
 
        if (path) {
                ext4_ext_drop_refs(path);
index f525f909b559c8c12e361f0750b56717a972ffb1..f336cbc6e932ee03113ebfa957e9b74ccc42352a 100644 (file)
@@ -90,6 +90,8 @@ static int ext4_validate_inode_bitmap(struct super_block *sb,
                return -EFSCORRUPTED;
 
        ext4_lock_group(sb, block_group);
+       if (buffer_verified(bh))
+               goto verified;
        blk = ext4_inode_bitmap(sb, desc);
        if (!ext4_inode_bitmap_csum_verify(sb, block_group, desc, bh,
                                           EXT4_INODES_PER_GROUP(sb) / 8)) {
@@ -101,6 +103,7 @@ static int ext4_validate_inode_bitmap(struct super_block *sb,
                return -EFSBADCRC;
        }
        set_buffer_verified(bh);
+verified:
        ext4_unlock_group(sb, block_group);
        return 0;
 }
@@ -150,7 +153,16 @@ ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group)
        }
 
        ext4_lock_group(sb, block_group);
-       if (desc->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) {
+       if (ext4_has_group_desc_csum(sb) &&
+           (desc->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT))) {
+               if (block_group == 0) {
+                       ext4_unlock_group(sb, block_group);
+                       unlock_buffer(bh);
+                       ext4_error(sb, "Inode bitmap for bg 0 marked "
+                                  "uninitialized");
+                       err = -EFSCORRUPTED;
+                       goto out;
+               }
                memset(bh->b_data, 0, (EXT4_INODES_PER_GROUP(sb) + 7) / 8);
                ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb),
                                     sb->s_blocksize * 8, bh->b_data);
@@ -994,7 +1006,8 @@ got:
 
                /* recheck and clear flag under lock if we still need to */
                ext4_lock_group(sb, group);
-               if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
+               if (ext4_has_group_desc_csum(sb) &&
+                   (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) {
                        gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
                        ext4_free_group_clusters_set(sb, gdp,
                                ext4_free_clusters_after_init(sb, group, gdp));
@@ -1375,7 +1388,10 @@ int ext4_init_inode_table(struct super_block *sb, ext4_group_t group,
                            ext4_itable_unused_count(sb, gdp)),
                            sbi->s_inodes_per_block);
 
-       if ((used_blks < 0) || (used_blks > sbi->s_itb_per_group)) {
+       if ((used_blks < 0) || (used_blks > sbi->s_itb_per_group) ||
+           ((group == 0) && ((EXT4_INODES_PER_GROUP(sb) -
+                              ext4_itable_unused_count(sb, gdp)) <
+                             EXT4_FIRST_INO(sb)))) {
                ext4_error(sb, "Something is wrong with group %u: "
                           "used itable blocks: %d; "
                           "itable unused count: %u",
index 285ed1588730c34c892566c0639b048e9c9a017e..3543fe80a3c442364d752fcbb74a7edd4df97dc9 100644 (file)
@@ -437,6 +437,7 @@ static int ext4_destroy_inline_data_nolock(handle_t *handle,
 
        memset((void *)ext4_raw_inode(&is.iloc)->i_block,
                0, EXT4_MIN_INLINE_DATA_SIZE);
+       memset(ei->i_data, 0, EXT4_MIN_INLINE_DATA_SIZE);
 
        if (ext4_has_feature_extents(inode->i_sb)) {
                if (S_ISDIR(inode->i_mode) ||
@@ -681,6 +682,10 @@ int ext4_try_to_write_inline_data(struct address_space *mapping,
                goto convert;
        }
 
+       ret = ext4_journal_get_write_access(handle, iloc.bh);
+       if (ret)
+               goto out;
+
        flags |= AOP_FLAG_NOFS;
 
        page = grab_cache_page_write_begin(mapping, 0, flags);
@@ -709,7 +714,7 @@ int ext4_try_to_write_inline_data(struct address_space *mapping,
 out_up_read:
        up_read(&EXT4_I(inode)->xattr_sem);
 out:
-       if (handle)
+       if (handle && (ret != 1))
                ext4_journal_stop(handle);
        brelse(iloc.bh);
        return ret;
@@ -751,6 +756,7 @@ int ext4_write_inline_data_end(struct inode *inode, loff_t pos, unsigned len,
 
        ext4_write_unlock_xattr(inode, &no_expand);
        brelse(iloc.bh);
+       mark_inode_dirty(inode);
 out:
        return copied;
 }
@@ -886,18 +892,17 @@ retry_journal:
        flags |= AOP_FLAG_NOFS;
 
        if (ret == -ENOSPC) {
+               ext4_journal_stop(handle);
                ret = ext4_da_convert_inline_data_to_extent(mapping,
                                                            inode,
                                                            flags,
                                                            fsdata);
-               ext4_journal_stop(handle);
                if (ret == -ENOSPC &&
                    ext4_should_retry_alloc(inode->i_sb, &retries))
                        goto retry_journal;
                goto out;
        }
 
-
        page = grab_cache_page_write_begin(mapping, 0, flags);
        if (!page) {
                ret = -ENOMEM;
@@ -915,6 +920,9 @@ retry_journal:
                if (ret < 0)
                        goto out_release_page;
        }
+       ret = ext4_journal_get_write_access(handle, iloc.bh);
+       if (ret)
+               goto out_release_page;
 
        up_read(&EXT4_I(inode)->xattr_sem);
        *pagep = page;
@@ -935,7 +943,6 @@ int ext4_da_write_inline_data_end(struct inode *inode, loff_t pos,
                                  unsigned len, unsigned copied,
                                  struct page *page)
 {
-       int i_size_changed = 0;
        int ret;
 
        ret = ext4_write_inline_data_end(inode, pos, len, copied, page);
@@ -953,10 +960,8 @@ int ext4_da_write_inline_data_end(struct inode *inode, loff_t pos,
         * But it's important to update i_size while still holding page lock:
         * page writeout could otherwise come in and zero beyond i_size.
         */
-       if (pos+copied > inode->i_size) {
+       if (pos+copied > inode->i_size)
                i_size_write(inode, pos+copied);
-               i_size_changed = 1;
-       }
        unlock_page(page);
        put_page(page);
 
@@ -966,8 +971,7 @@ int ext4_da_write_inline_data_end(struct inode *inode, loff_t pos,
         * ordering of page lock and transaction start for journaling
         * filesystems.
         */
-       if (i_size_changed)
-               mark_inode_dirty(inode);
+       mark_inode_dirty(inode);
 
        return copied;
 }
@@ -1890,42 +1894,6 @@ out:
        return (error < 0 ? error : 0);
 }
 
-/*
- * Called during xattr set, and if we can sparse space 'needed',
- * just create the extent tree evict the data to the outer block.
- *
- * We use jbd2 instead of page cache to move data to the 1st block
- * so that the whole transaction can be committed as a whole and
- * the data isn't lost because of the delayed page cache write.
- */
-int ext4_try_to_evict_inline_data(handle_t *handle,
-                                 struct inode *inode,
-                                 int needed)
-{
-       int error;
-       struct ext4_xattr_entry *entry;
-       struct ext4_inode *raw_inode;
-       struct ext4_iloc iloc;
-
-       error = ext4_get_inode_loc(inode, &iloc);
-       if (error)
-               return error;
-
-       raw_inode = ext4_raw_inode(&iloc);
-       entry = (struct ext4_xattr_entry *)((void *)raw_inode +
-                                           EXT4_I(inode)->i_inline_off);
-       if (EXT4_XATTR_LEN(entry->e_name_len) +
-           EXT4_XATTR_SIZE(le32_to_cpu(entry->e_value_size)) < needed) {
-               error = -ENOSPC;
-               goto out;
-       }
-
-       error = ext4_convert_inline_data_nolock(handle, inode, &iloc);
-out:
-       brelse(iloc.bh);
-       return error;
-}
-
 int ext4_inline_data_truncate(struct inode *inode, int *has_inline)
 {
        handle_t *handle;
index 2ea07efbe0165d0d5bbff1cd4a570cb8bc337ae6..4efe77286ecd55a6a1c79e45e61a93eb3554b901 100644 (file)
@@ -402,9 +402,9 @@ static int __check_block_validity(struct inode *inode, const char *func,
        if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), map->m_pblk,
                                   map->m_len)) {
                ext4_error_inode(inode, func, line, map->m_pblk,
-                                "lblock %lu mapped to illegal pblock "
+                                "lblock %lu mapped to illegal pblock %llu "
                                 "(length %d)", (unsigned long) map->m_lblk,
-                                map->m_len);
+                                map->m_pblk, map->m_len);
                return -EFSCORRUPTED;
        }
        return 0;
@@ -1389,9 +1389,10 @@ static int ext4_write_end(struct file *file,
        loff_t old_size = inode->i_size;
        int ret = 0, ret2;
        int i_size_changed = 0;
+       int inline_data = ext4_has_inline_data(inode);
 
        trace_ext4_write_end(inode, pos, len, copied);
-       if (ext4_has_inline_data(inode)) {
+       if (inline_data) {
                ret = ext4_write_inline_data_end(inode, pos, len,
                                                 copied, page);
                if (ret < 0) {
@@ -1419,7 +1420,7 @@ static int ext4_write_end(struct file *file,
         * ordering of page lock and transaction start for journaling
         * filesystems.
         */
-       if (i_size_changed)
+       if (i_size_changed || inline_data)
                ext4_mark_inode_dirty(handle, inode);
 
        if (pos + len > inode->i_size && ext4_can_truncate(inode))
@@ -1493,6 +1494,7 @@ static int ext4_journalled_write_end(struct file *file,
        int partial = 0;
        unsigned from, to;
        int size_changed = 0;
+       int inline_data = ext4_has_inline_data(inode);
 
        trace_ext4_journalled_write_end(inode, pos, len, copied);
        from = pos & (PAGE_SIZE - 1);
@@ -1500,7 +1502,7 @@ static int ext4_journalled_write_end(struct file *file,
 
        BUG_ON(!ext4_handle_valid(handle));
 
-       if (ext4_has_inline_data(inode)) {
+       if (inline_data) {
                ret = ext4_write_inline_data_end(inode, pos, len,
                                                 copied, page);
                if (ret < 0) {
@@ -1531,7 +1533,7 @@ static int ext4_journalled_write_end(struct file *file,
        if (old_size < pos)
                pagecache_isize_extended(inode, old_size, pos);
 
-       if (size_changed) {
+       if (size_changed || inline_data) {
                ret2 = ext4_mark_inode_dirty(handle, inode);
                if (!ret)
                        ret = ret2;
@@ -2028,11 +2030,7 @@ static int __ext4_journalled_writepage(struct page *page,
        }
 
        if (inline_data) {
-               BUFFER_TRACE(inode_bh, "get write access");
-               ret = ext4_journal_get_write_access(handle, inode_bh);
-
-               err = ext4_handle_dirty_metadata(handle, inode, inode_bh);
-
+               ret = ext4_mark_inode_dirty(handle, inode);
        } else {
                ret = ext4_walk_page_buffers(handle, page_bufs, 0, len, NULL,
                                             do_journal_get_write_access);
@@ -4506,7 +4504,8 @@ static int __ext4_get_inode_loc(struct inode *inode,
        int                     inodes_per_block, inode_offset;
 
        iloc->bh = NULL;
-       if (!ext4_valid_inum(sb, inode->i_ino))
+       if (inode->i_ino < EXT4_ROOT_INO ||
+           inode->i_ino > le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count))
                return -EFSCORRUPTED;
 
        iloc->block_group = (inode->i_ino - 1) / EXT4_INODES_PER_GROUP(sb);
index 6eae2b91aafa20b21fd19c61bb15fa7add625935..f7ab340881626be5f28407334c0f25f18717eb75 100644 (file)
@@ -2423,7 +2423,8 @@ int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
         * initialize bb_free to be able to skip
         * empty groups without initialization
         */
-       if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
+       if (ext4_has_group_desc_csum(sb) &&
+           (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) {
                meta_group_info[i]->bb_free =
                        ext4_free_clusters_after_init(sb, group, desc);
        } else {
@@ -2989,7 +2990,8 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
 #endif
        ext4_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start,
                      ac->ac_b_ex.fe_len);
-       if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
+       if (ext4_has_group_desc_csum(sb) &&
+           (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) {
                gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
                ext4_free_group_clusters_set(sb, gdp,
                                             ext4_free_clusters_after_init(sb,
index 27b9a76a0dfabeff3ee9ec65a15d8d5e711d851a..638ad47434771af1d2c2a90a17003271fe26e5a9 100644 (file)
@@ -186,11 +186,8 @@ static int kmmpd(void *data)
                        goto exit_thread;
                }
 
-               if (sb_rdonly(sb)) {
-                       ext4_warning(sb, "kmmpd being stopped since filesystem "
-                                    "has been remounted as readonly.");
-                       goto exit_thread;
-               }
+               if (sb_rdonly(sb))
+                       break;
 
                diff = jiffies - last_update_time;
                if (diff < mmp_update_interval * HZ)
index 0c4c2201b3aa2ee9680478f8fd11685e66634f50..b7f7922061be89928542176043b4cc78695edb17 100644 (file)
@@ -405,6 +405,9 @@ static void ext4_journal_commit_callback(journal_t *journal, transaction_t *txn)
 
 static void ext4_handle_error(struct super_block *sb)
 {
+       if (test_opt(sb, WARN_ON_ERROR))
+               WARN_ON_ONCE(1);
+
        if (sb_rdonly(sb))
                return;
 
@@ -740,6 +743,9 @@ __acquires(bitlock)
                va_end(args);
        }
 
+       if (test_opt(sb, WARN_ON_ERROR))
+               WARN_ON_ONCE(1);
+
        if (test_opt(sb, ERRORS_CONT)) {
                ext4_commit_super(sb, 0);
                return;
@@ -1371,7 +1377,8 @@ enum {
        Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0, Opt_jqfmt_vfsv1, Opt_quota,
        Opt_noquota, Opt_barrier, Opt_nobarrier, Opt_err,
        Opt_usrquota, Opt_grpquota, Opt_prjquota, Opt_i_version, Opt_dax,
-       Opt_stripe, Opt_delalloc, Opt_nodelalloc, Opt_mblk_io_submit,
+       Opt_stripe, Opt_delalloc, Opt_nodelalloc, Opt_warn_on_error,
+       Opt_nowarn_on_error, Opt_mblk_io_submit,
        Opt_lazytime, Opt_nolazytime, Opt_debug_want_extra_isize,
        Opt_nomblk_io_submit, Opt_block_validity, Opt_noblock_validity,
        Opt_inode_readahead_blks, Opt_journal_ioprio,
@@ -1438,6 +1445,8 @@ static const match_table_t tokens = {
        {Opt_dax, "dax"},
        {Opt_stripe, "stripe=%u"},
        {Opt_delalloc, "delalloc"},
+       {Opt_warn_on_error, "warn_on_error"},
+       {Opt_nowarn_on_error, "nowarn_on_error"},
        {Opt_lazytime, "lazytime"},
        {Opt_nolazytime, "nolazytime"},
        {Opt_debug_want_extra_isize, "debug_want_extra_isize=%u"},
@@ -1602,6 +1611,8 @@ static const struct mount_opts {
         MOPT_EXT4_ONLY | MOPT_SET | MOPT_EXPLICIT},
        {Opt_nodelalloc, EXT4_MOUNT_DELALLOC,
         MOPT_EXT4_ONLY | MOPT_CLEAR},
+       {Opt_warn_on_error, EXT4_MOUNT_WARN_ON_ERROR, MOPT_SET},
+       {Opt_nowarn_on_error, EXT4_MOUNT_WARN_ON_ERROR, MOPT_CLEAR},
        {Opt_nojournal_checksum, EXT4_MOUNT_JOURNAL_CHECKSUM,
         MOPT_EXT4_ONLY | MOPT_CLEAR},
        {Opt_journal_checksum, EXT4_MOUNT_JOURNAL_CHECKSUM,
@@ -2331,6 +2342,7 @@ static int ext4_check_descriptors(struct super_block *sb,
        struct ext4_sb_info *sbi = EXT4_SB(sb);
        ext4_fsblk_t first_block = le32_to_cpu(sbi->s_es->s_first_data_block);
        ext4_fsblk_t last_block;
+       ext4_fsblk_t last_bg_block = sb_block + ext4_bg_num_gdb(sb, 0);
        ext4_fsblk_t block_bitmap;
        ext4_fsblk_t inode_bitmap;
        ext4_fsblk_t inode_table;
@@ -2363,6 +2375,14 @@ static int ext4_check_descriptors(struct super_block *sb,
                        if (!sb_rdonly(sb))
                                return 0;
                }
+               if (block_bitmap >= sb_block + 1 &&
+                   block_bitmap <= last_bg_block) {
+                       ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
+                                "Block bitmap for group %u overlaps "
+                                "block group descriptors", i);
+                       if (!sb_rdonly(sb))
+                               return 0;
+               }
                if (block_bitmap < first_block || block_bitmap > last_block) {
                        ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
                               "Block bitmap for group %u not in group "
@@ -2377,6 +2397,14 @@ static int ext4_check_descriptors(struct super_block *sb,
                        if (!sb_rdonly(sb))
                                return 0;
                }
+               if (inode_bitmap >= sb_block + 1 &&
+                   inode_bitmap <= last_bg_block) {
+                       ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
+                                "Inode bitmap for group %u overlaps "
+                                "block group descriptors", i);
+                       if (!sb_rdonly(sb))
+                               return 0;
+               }
                if (inode_bitmap < first_block || inode_bitmap > last_block) {
                        ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
                               "Inode bitmap for group %u not in group "
@@ -2391,6 +2419,14 @@ static int ext4_check_descriptors(struct super_block *sb,
                        if (!sb_rdonly(sb))
                                return 0;
                }
+               if (inode_table >= sb_block + 1 &&
+                   inode_table <= last_bg_block) {
+                       ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
+                                "Inode table for group %u overlaps "
+                                "block group descriptors", i);
+                       if (!sb_rdonly(sb))
+                               return 0;
+               }
                if (inode_table < first_block ||
                    inode_table + sbi->s_itb_per_group - 1 > last_block) {
                        ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
@@ -3097,6 +3133,9 @@ static ext4_group_t ext4_has_uninit_itable(struct super_block *sb)
        ext4_group_t group, ngroups = EXT4_SB(sb)->s_groups_count;
        struct ext4_group_desc *gdp = NULL;
 
+       if (!ext4_has_group_desc_csum(sb))
+               return ngroups;
+
        for (group = 0; group < ngroups; group++) {
                gdp = ext4_get_group_desc(sb, group, NULL);
                if (!gdp)
@@ -3742,6 +3781,13 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
                         le32_to_cpu(es->s_log_block_size));
                goto failed_mount;
        }
+       if (le32_to_cpu(es->s_log_cluster_size) >
+           (EXT4_MAX_CLUSTER_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) {
+               ext4_msg(sb, KERN_ERR,
+                        "Invalid log cluster size: %u",
+                        le32_to_cpu(es->s_log_cluster_size));
+               goto failed_mount;
+       }
 
        if (le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks) > (blocksize / 4)) {
                ext4_msg(sb, KERN_ERR,
@@ -3806,6 +3852,11 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
        } else {
                sbi->s_inode_size = le16_to_cpu(es->s_inode_size);
                sbi->s_first_ino = le32_to_cpu(es->s_first_ino);
+               if (sbi->s_first_ino < EXT4_GOOD_OLD_FIRST_INO) {
+                       ext4_msg(sb, KERN_ERR, "invalid first ino: %u",
+                                sbi->s_first_ino);
+                       goto failed_mount;
+               }
                if ((sbi->s_inode_size < EXT4_GOOD_OLD_INODE_SIZE) ||
                    (!is_power_of_2(sbi->s_inode_size)) ||
                    (sbi->s_inode_size > blocksize)) {
@@ -3882,13 +3933,6 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
                                 "block size (%d)", clustersize, blocksize);
                        goto failed_mount;
                }
-               if (le32_to_cpu(es->s_log_cluster_size) >
-                   (EXT4_MAX_CLUSTER_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) {
-                       ext4_msg(sb, KERN_ERR,
-                                "Invalid log cluster size: %u",
-                                le32_to_cpu(es->s_log_cluster_size));
-                       goto failed_mount;
-               }
                sbi->s_cluster_bits = le32_to_cpu(es->s_log_cluster_size) -
                        le32_to_cpu(es->s_log_block_size);
                sbi->s_clusters_per_group =
@@ -3909,10 +3953,10 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
                }
        } else {
                if (clustersize != blocksize) {
-                       ext4_warning(sb, "fragment/cluster size (%d) != "
-                                    "block size (%d)", clustersize,
-                                    blocksize);
-                       clustersize = blocksize;
+                       ext4_msg(sb, KERN_ERR,
+                                "fragment/cluster size (%d) != "
+                                "block size (%d)", clustersize, blocksize);
+                       goto failed_mount;
                }
                if (sbi->s_blocks_per_group > blocksize * 8) {
                        ext4_msg(sb, KERN_ERR,
@@ -3966,6 +4010,13 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
                         ext4_blocks_count(es));
                goto failed_mount;
        }
+       if ((es->s_first_data_block == 0) && (es->s_log_block_size == 0) &&
+           (sbi->s_cluster_ratio == 1)) {
+               ext4_msg(sb, KERN_WARNING, "bad geometry: first data "
+                        "block is 0 with a 1k block and cluster size");
+               goto failed_mount;
+       }
+
        blocks_count = (ext4_blocks_count(es) -
                        le32_to_cpu(es->s_first_data_block) +
                        EXT4_BLOCKS_PER_GROUP(sb) - 1);
@@ -4001,6 +4052,14 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
                ret = -ENOMEM;
                goto failed_mount;
        }
+       if (((u64)sbi->s_groups_count * sbi->s_inodes_per_group) !=
+           le32_to_cpu(es->s_inodes_count)) {
+               ext4_msg(sb, KERN_ERR, "inodes count not valid: %u vs %llu",
+                        le32_to_cpu(es->s_inodes_count),
+                        ((u64)sbi->s_groups_count * sbi->s_inodes_per_group));
+               ret = -EINVAL;
+               goto failed_mount;
+       }
 
        bgl_lock_init(sbi->s_blockgroup_lock);
 
@@ -4020,14 +4079,13 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
                        goto failed_mount2;
                }
        }
+       sbi->s_gdb_count = db_count;
        if (!ext4_check_descriptors(sb, logical_sb_block, &first_not_zeroed)) {
                ext4_msg(sb, KERN_ERR, "group descriptors corrupted!");
                ret = -EFSCORRUPTED;
                goto failed_mount2;
        }
 
-       sbi->s_gdb_count = db_count;
-
        timer_setup(&sbi->s_err_report, print_daily_error_info, 0);
 
        /* Register extent status tree shrinker */
@@ -4736,6 +4794,14 @@ static int ext4_commit_super(struct super_block *sb, int sync)
 
        if (!sbh || block_device_ejected(sb))
                return error;
+
+       /*
+        * The superblock bh should be mapped, but it might not be if the
+        * device was hot-removed. Not much we can do but fail the I/O.
+        */
+       if (!buffer_mapped(sbh))
+               return error;
+
        /*
         * If the file system is mounted read-only, don't update the
         * superblock write time.  This avoids updating the superblock
@@ -5140,6 +5206,8 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
 
                        if (sbi->s_journal)
                                ext4_mark_recovery_complete(sb, es);
+                       if (sbi->s_mmp_tsk)
+                               kthread_stop(sbi->s_mmp_tsk);
                } else {
                        /* Make sure we can mount this feature set readwrite */
                        if (ext4_has_feature_readonly(sb) ||
index fc4ced59c565b7b8ad2d36af9b8e1894c7fd3029..723df14f408408607c123dbbb7b7f7fe1fe9b396 100644 (file)
@@ -230,12 +230,12 @@ __ext4_xattr_check_block(struct inode *inode, struct buffer_head *bh,
 {
        int error = -EFSCORRUPTED;
 
-       if (buffer_verified(bh))
-               return 0;
-
        if (BHDR(bh)->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC) ||
            BHDR(bh)->h_blocks != cpu_to_le32(1))
                goto errout;
+       if (buffer_verified(bh))
+               return 0;
+
        error = -EFSBADCRC;
        if (!ext4_xattr_block_csum_verify(inode, bh))
                goto errout;
@@ -1560,7 +1560,7 @@ static int ext4_xattr_set_entry(struct ext4_xattr_info *i,
                                handle_t *handle, struct inode *inode,
                                bool is_block)
 {
-       struct ext4_xattr_entry *last;
+       struct ext4_xattr_entry *last, *next;
        struct ext4_xattr_entry *here = s->here;
        size_t min_offs = s->end - s->base, name_len = strlen(i->name);
        int in_inode = i->in_inode;
@@ -1595,7 +1595,13 @@ static int ext4_xattr_set_entry(struct ext4_xattr_info *i,
 
        /* Compute min_offs and last. */
        last = s->first;
-       for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) {
+       for (; !IS_LAST_ENTRY(last); last = next) {
+               next = EXT4_XATTR_NEXT(last);
+               if ((void *)next >= s->end) {
+                       EXT4_ERROR_INODE(inode, "corrupted xattr entries");
+                       ret = -EFSCORRUPTED;
+                       goto out;
+               }
                if (!last->e_value_inum && last->e_value_size) {
                        size_t offs = le16_to_cpu(last->e_value_offs);
                        if (offs < min_offs)
@@ -2206,23 +2212,8 @@ int ext4_xattr_ibody_inline_set(handle_t *handle, struct inode *inode,
        if (EXT4_I(inode)->i_extra_isize == 0)
                return -ENOSPC;
        error = ext4_xattr_set_entry(i, s, handle, inode, false /* is_block */);
-       if (error) {
-               if (error == -ENOSPC &&
-                   ext4_has_inline_data(inode)) {
-                       error = ext4_try_to_evict_inline_data(handle, inode,
-                                       EXT4_XATTR_LEN(strlen(i->name) +
-                                       EXT4_XATTR_SIZE(i->value_len)));
-                       if (error)
-                               return error;
-                       error = ext4_xattr_ibody_find(inode, i, is);
-                       if (error)
-                               return error;
-                       error = ext4_xattr_set_entry(i, s, handle, inode,
-                                                    false /* is_block */);
-               }
-               if (error)
-                       return error;
-       }
+       if (error)
+               return error;
        header = IHDR(inode, ext4_raw_inode(&is->iloc));
        if (!IS_LAST_ENTRY(s->first)) {
                header->h_magic = cpu_to_le32(EXT4_XATTR_MAGIC);
@@ -2651,6 +2642,11 @@ static int ext4_xattr_make_inode_space(handle_t *handle, struct inode *inode,
                last = IFIRST(header);
                /* Find the entry best suited to be pushed into EA block */
                for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) {
+                       /* never move system.data out of the inode */
+                       if ((last->e_name_len == 4) &&
+                           (last->e_name_index == EXT4_XATTR_INDEX_SYSTEM) &&
+                           !memcmp(last->e_name, "data", 4))
+                               continue;
                        total_size = EXT4_XATTR_LEN(last->e_name_len);
                        if (!last->e_value_inum)
                                total_size += EXT4_XATTR_SIZE(
index 065dc919a0ce15963b21265f4872b007bcfc3310..bfd589ea74c01ebf74e4866d920759143b01be16 100644 (file)
@@ -707,13 +707,21 @@ static void fat_set_state(struct super_block *sb,
        brelse(bh);
 }
 
+static void fat_reset_iocharset(struct fat_mount_options *opts)
+{
+       if (opts->iocharset != fat_default_iocharset) {
+               /* Note: opts->iocharset can be NULL here */
+               kfree(opts->iocharset);
+               opts->iocharset = fat_default_iocharset;
+       }
+}
+
 static void delayed_free(struct rcu_head *p)
 {
        struct msdos_sb_info *sbi = container_of(p, struct msdos_sb_info, rcu);
        unload_nls(sbi->nls_disk);
        unload_nls(sbi->nls_io);
-       if (sbi->options.iocharset != fat_default_iocharset)
-               kfree(sbi->options.iocharset);
+       fat_reset_iocharset(&sbi->options);
        kfree(sbi);
 }
 
@@ -1132,7 +1140,7 @@ static int parse_options(struct super_block *sb, char *options, int is_vfat,
        opts->fs_fmask = opts->fs_dmask = current_umask();
        opts->allow_utime = -1;
        opts->codepage = fat_default_codepage;
-       opts->iocharset = fat_default_iocharset;
+       fat_reset_iocharset(opts);
        if (is_vfat) {
                opts->shortname = VFAT_SFN_DISPLAY_WINNT|VFAT_SFN_CREATE_WIN95;
                opts->rodir = 0;
@@ -1289,8 +1297,7 @@ static int parse_options(struct super_block *sb, char *options, int is_vfat,
 
                /* vfat specific */
                case Opt_charset:
-                       if (opts->iocharset != fat_default_iocharset)
-                               kfree(opts->iocharset);
+                       fat_reset_iocharset(opts);
                        iocharset = match_strdup(&args[0]);
                        if (!iocharset)
                                return -ENOMEM;
@@ -1881,8 +1888,7 @@ out_fail:
                iput(fat_inode);
        unload_nls(sbi->nls_io);
        unload_nls(sbi->nls_disk);
-       if (sbi->options.iocharset != fat_default_iocharset)
-               kfree(sbi->options.iocharset);
+       fat_reset_iocharset(&sbi->options);
        sb->s_fs_info = NULL;
        kfree(sbi);
        return error;
index 7ec0b3e5f05d19a54899e517d38c63d79abbbe00..d6eccd04d7038a9937e89630d4e1a00dbed98ef6 100644 (file)
@@ -51,6 +51,7 @@ static void file_free_rcu(struct rcu_head *head)
 
 static inline void file_free(struct file *f)
 {
+       security_file_free(f);
        percpu_counter_dec(&nr_files);
        call_rcu(&f->f_u.fu_rcuhead, file_free_rcu);
 }
@@ -100,9 +101,8 @@ int proc_nr_files(struct ctl_table *table, int write,
  * done, you will imbalance int the mount's writer count
  * and a warning at __fput() time.
  */
-struct file *get_empty_filp(void)
+struct file *alloc_empty_file(int flags, const struct cred *cred)
 {
-       const struct cred *cred = current_cred();
        static long old_max;
        struct file *f;
        int error;
@@ -123,11 +123,10 @@ struct file *get_empty_filp(void)
        if (unlikely(!f))
                return ERR_PTR(-ENOMEM);
 
-       percpu_counter_inc(&nr_files);
        f->f_cred = get_cred(cred);
        error = security_file_alloc(f);
        if (unlikely(error)) {
-               file_free(f);
+               file_free_rcu(&f->f_u.fu_rcuhead);
                return ERR_PTR(error);
        }
 
@@ -136,7 +135,10 @@ struct file *get_empty_filp(void)
        spin_lock_init(&f->f_lock);
        mutex_init(&f->f_pos_lock);
        eventpoll_init_file(f);
+       f->f_flags = flags;
+       f->f_mode = OPEN_FMODE(flags);
        /* f->f_version: 0 */
+       percpu_counter_inc(&nr_files);
        return f;
 
 over:
@@ -152,15 +154,15 @@ over:
  * alloc_file - allocate and initialize a 'struct file'
  *
  * @path: the (dentry, vfsmount) pair for the new file
- * @mode: the mode with which the new file will be opened
+ * @flags: O_... flags with which the new file will be opened
  * @fop: the 'struct file_operations' for the new file
  */
-struct file *alloc_file(const struct path *path, fmode_t mode,
+static struct file *alloc_file(const struct path *path, int flags,
                const struct file_operations *fop)
 {
        struct file *file;
 
-       file = get_empty_filp();
+       file = alloc_empty_file(flags, current_cred());
        if (IS_ERR(file))
                return file;
 
@@ -168,19 +170,56 @@ struct file *alloc_file(const struct path *path, fmode_t mode,
        file->f_inode = path->dentry->d_inode;
        file->f_mapping = path->dentry->d_inode->i_mapping;
        file->f_wb_err = filemap_sample_wb_err(file->f_mapping);
-       if ((mode & FMODE_READ) &&
+       if ((file->f_mode & FMODE_READ) &&
             likely(fop->read || fop->read_iter))
-               mode |= FMODE_CAN_READ;
-       if ((mode & FMODE_WRITE) &&
+               file->f_mode |= FMODE_CAN_READ;
+       if ((file->f_mode & FMODE_WRITE) &&
             likely(fop->write || fop->write_iter))
-               mode |= FMODE_CAN_WRITE;
-       file->f_mode = mode;
+               file->f_mode |= FMODE_CAN_WRITE;
+       file->f_mode |= FMODE_OPENED;
        file->f_op = fop;
-       if ((mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ)
+       if ((file->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ)
                i_readcount_inc(path->dentry->d_inode);
        return file;
 }
-EXPORT_SYMBOL(alloc_file);
+
+struct file *alloc_file_pseudo(struct inode *inode, struct vfsmount *mnt,
+                               const char *name, int flags,
+                               const struct file_operations *fops)
+{
+       static const struct dentry_operations anon_ops = {
+               .d_dname = simple_dname
+       };
+       struct qstr this = QSTR_INIT(name, strlen(name));
+       struct path path;
+       struct file *file;
+
+       path.dentry = d_alloc_pseudo(mnt->mnt_sb, &this);
+       if (!path.dentry)
+               return ERR_PTR(-ENOMEM);
+       if (!mnt->mnt_sb->s_d_op)
+               d_set_d_op(path.dentry, &anon_ops);
+       path.mnt = mntget(mnt);
+       d_instantiate(path.dentry, inode);
+       file = alloc_file(&path, flags, fops);
+       if (IS_ERR(file)) {
+               ihold(inode);
+               path_put(&path);
+       }
+       return file;
+}
+EXPORT_SYMBOL(alloc_file_pseudo);
+
+struct file *alloc_file_clone(struct file *base, int flags,
+                               const struct file_operations *fops)
+{
+       struct file *f = alloc_file(&base->f_path, flags, fops);
+       if (!IS_ERR(f)) {
+               path_get(&f->f_path);
+               f->f_mapping = base->f_mapping;
+       }
+       return f;
+}
 
 /* the real guts of fput() - releasing the last reference to file
  */
@@ -190,6 +229,9 @@ static void __fput(struct file *file)
        struct vfsmount *mnt = file->f_path.mnt;
        struct inode *inode = file->f_inode;
 
+       if (unlikely(!(file->f_mode & FMODE_OPENED)))
+               goto out;
+
        might_sleep();
 
        fsnotify_close(file);
@@ -207,7 +249,6 @@ static void __fput(struct file *file)
        }
        if (file->f_op->release)
                file->f_op->release(inode, file);
-       security_file_free(file);
        if (unlikely(S_ISCHR(inode->i_mode) && inode->i_cdev != NULL &&
                     !(file->f_mode & FMODE_PATH))) {
                cdev_put(inode->i_cdev);
@@ -220,12 +261,10 @@ static void __fput(struct file *file)
                put_write_access(inode);
                __mnt_drop_write(mnt);
        }
-       file->f_path.dentry = NULL;
-       file->f_path.mnt = NULL;
-       file->f_inode = NULL;
-       file_free(file);
        dput(dentry);
        mntput(mnt);
+out:
+       file_free(file);
 }
 
 static LLIST_HEAD(delayed_fput_list);
@@ -300,14 +339,6 @@ void __fput_sync(struct file *file)
 
 EXPORT_SYMBOL(fput);
 
-void put_filp(struct file *file)
-{
-       if (atomic_long_dec_and_test(&file->f_count)) {
-               security_file_free(file);
-               file_free(file);
-       }
-}
-
 void __init files_init(void)
 {
        filp_cachep = kmem_cache_create("filp", sizeof(struct file), 0,
index c184c5a356ff2b9f6d85534c67d01aeb75fda808..cdcb376ef8df4d3d48ee26d6d7bef2a1faa06377 100644 (file)
@@ -220,6 +220,7 @@ int fscache_add_cache(struct fscache_cache *cache,
 {
        struct fscache_cache_tag *tag;
 
+       ASSERTCMP(ifsdef->cookie, ==, &fscache_fsdef_index);
        BUG_ON(!cache->ops);
        BUG_ON(!ifsdef);
 
@@ -248,7 +249,6 @@ int fscache_add_cache(struct fscache_cache *cache,
        if (!cache->kobj)
                goto error;
 
-       ifsdef->cookie = &fscache_fsdef_index;
        ifsdef->cache = cache;
        cache->fsdef = ifsdef;
 
index 97137d7ec5ee8bfe21796abde94144d726785d29..83bfe04456b6a99a196c485830b2ba2bc7419dec 100644 (file)
@@ -516,6 +516,7 @@ static int fscache_alloc_object(struct fscache_cache *cache,
                goto error;
        }
 
+       ASSERTCMP(object->cookie, ==, cookie);
        fscache_stat(&fscache_n_object_alloc);
 
        object->debug_id = atomic_inc_return(&fscache_object_debug_id);
@@ -571,6 +572,8 @@ static int fscache_attach_object(struct fscache_cookie *cookie,
 
        _enter("{%s},{OBJ%x}", cookie->def->name, object->debug_id);
 
+       ASSERTCMP(object->cookie, ==, cookie);
+
        spin_lock(&cookie->lock);
 
        /* there may be multiple initial creations of this object, but we only
@@ -610,9 +613,7 @@ static int fscache_attach_object(struct fscache_cookie *cookie,
                spin_unlock(&cache->object_list_lock);
        }
 
-       /* attach to the cookie */
-       object->cookie = cookie;
-       fscache_cookie_get(cookie, fscache_cookie_get_attach_object);
+       /* Attach to the cookie.  The object already has a ref on it. */
        hlist_add_head(&object->cookie_link, &cookie->backing_objects);
 
        fscache_objlist_add(object);
index 20e0d0a4dc8cba917ef354e93f1aafe8bfee46a8..9edc920f651f3929f9ad4f27e061df728c424794 100644 (file)
@@ -327,6 +327,7 @@ void fscache_object_init(struct fscache_object *object,
        object->store_limit_l = 0;
        object->cache = cache;
        object->cookie = cookie;
+       fscache_cookie_get(cookie, fscache_cookie_get_attach_object);
        object->parent = NULL;
 #ifdef CONFIG_FSCACHE_OBJECT_LIST
        RB_CLEAR_NODE(&object->objlist_link);
index e30c5975ea585e73dd70dcba892c295e3d78e68a..8d265790374cdac651ff3c6c9920d8c9467b7441 100644 (file)
@@ -70,7 +70,8 @@ void fscache_enqueue_operation(struct fscache_operation *op)
        ASSERT(op->processor != NULL);
        ASSERT(fscache_object_is_available(op->object));
        ASSERTCMP(atomic_read(&op->usage), >, 0);
-       ASSERTCMP(op->state, ==, FSCACHE_OP_ST_IN_PROGRESS);
+       ASSERTIFCMP(op->state != FSCACHE_OP_ST_IN_PROGRESS,
+                   op->state, ==,  FSCACHE_OP_ST_CANCELLED);
 
        fscache_stat(&fscache_n_op_enqueue);
        switch (op->flags & FSCACHE_OP_TYPE) {
@@ -499,7 +500,8 @@ void fscache_put_operation(struct fscache_operation *op)
        struct fscache_cache *cache;
 
        _enter("{OBJ%x OP%x,%d}",
-              op->object->debug_id, op->debug_id, atomic_read(&op->usage));
+              op->object ? op->object->debug_id : 0,
+              op->debug_id, atomic_read(&op->usage));
 
        ASSERTCMP(atomic_read(&op->usage), >, 0);
 
index 56231b31f806b10a15d86006b0686312b5be2cb2..d80aab0d59822e416af65bb0e014902615adf0ac 100644 (file)
@@ -399,7 +399,7 @@ static struct dentry *fuse_lookup(struct inode *dir, struct dentry *entry,
  */
 static int fuse_create_open(struct inode *dir, struct dentry *entry,
                            struct file *file, unsigned flags,
-                           umode_t mode, int *opened)
+                           umode_t mode)
 {
        int err;
        struct inode *inode;
@@ -469,7 +469,7 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry,
        d_instantiate(entry, inode);
        fuse_change_entry_timeout(entry, &outentry);
        fuse_invalidate_attr(dir);
-       err = finish_open(file, entry, generic_file_open, opened);
+       err = finish_open(file, entry, generic_file_open);
        if (err) {
                fuse_sync_release(ff, flags);
        } else {
@@ -489,7 +489,7 @@ out_err:
 static int fuse_mknod(struct inode *, struct dentry *, umode_t, dev_t);
 static int fuse_atomic_open(struct inode *dir, struct dentry *entry,
                            struct file *file, unsigned flags,
-                           umode_t mode, int *opened)
+                           umode_t mode)
 {
        int err;
        struct fuse_conn *fc = get_fuse_conn(dir);
@@ -508,12 +508,12 @@ static int fuse_atomic_open(struct inode *dir, struct dentry *entry,
                goto no_open;
 
        /* Only creates */
-       *opened |= FILE_CREATED;
+       file->f_mode |= FMODE_CREATED;
 
        if (fc->no_create)
                goto mknod;
 
-       err = fuse_create_open(dir, entry, file, flags, mode, opened);
+       err = fuse_create_open(dir, entry, file, flags, mode);
        if (err == -ENOSYS) {
                fc->no_create = 1;
                goto mknod;
@@ -539,6 +539,7 @@ static int create_new_entry(struct fuse_conn *fc, struct fuse_args *args,
 {
        struct fuse_entry_out outarg;
        struct inode *inode;
+       struct dentry *d;
        int err;
        struct fuse_forget_link *forget;
 
@@ -570,11 +571,17 @@ static int create_new_entry(struct fuse_conn *fc, struct fuse_args *args,
        }
        kfree(forget);
 
-       err = d_instantiate_no_diralias(entry, inode);
-       if (err)
-               return err;
+       d_drop(entry);
+       d = d_splice_alias(inode, entry);
+       if (IS_ERR(d))
+               return PTR_ERR(d);
 
-       fuse_change_entry_timeout(entry, &outarg);
+       if (d) {
+               fuse_change_entry_timeout(d, &outarg);
+               dput(d);
+       } else {
+               fuse_change_entry_timeout(entry, &outarg);
+       }
        fuse_invalidate_attr(dir);
        return 0;
 
index feda55f6705026168cdf7c479d71d527245b44a2..648f0ca1ad57e7893631d8c51f771aa47194fc7c 100644 (file)
@@ -580,7 +580,7 @@ static int gfs2_initxattrs(struct inode *inode, const struct xattr *xattr_array,
 static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
                             struct file *file,
                             umode_t mode, dev_t dev, const char *symname,
-                            unsigned int size, int excl, int *opened)
+                            unsigned int size, int excl)
 {
        const struct qstr *name = &dentry->d_name;
        struct posix_acl *default_acl, *acl;
@@ -626,7 +626,7 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
                error = 0;
                if (file) {
                        if (S_ISREG(inode->i_mode))
-                               error = finish_open(file, dentry, gfs2_open_common, opened);
+                               error = finish_open(file, dentry, gfs2_open_common);
                        else
                                error = finish_no_open(file, NULL);
                }
@@ -767,8 +767,8 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
        mark_inode_dirty(inode);
        d_instantiate(dentry, inode);
        if (file) {
-               *opened |= FILE_CREATED;
-               error = finish_open(file, dentry, gfs2_open_common, opened);
+               file->f_mode |= FMODE_CREATED;
+               error = finish_open(file, dentry, gfs2_open_common);
        }
        gfs2_glock_dq_uninit(ghs);
        gfs2_glock_dq_uninit(ghs + 1);
@@ -822,7 +822,7 @@ fail:
 static int gfs2_create(struct inode *dir, struct dentry *dentry,
                       umode_t mode, bool excl)
 {
-       return gfs2_create_inode(dir, dentry, NULL, S_IFREG | mode, 0, NULL, 0, excl, NULL);
+       return gfs2_create_inode(dir, dentry, NULL, S_IFREG | mode, 0, NULL, 0, excl);
 }
 
 /**
@@ -830,14 +830,13 @@ static int gfs2_create(struct inode *dir, struct dentry *dentry,
  * @dir: The directory inode
  * @dentry: The dentry of the new inode
  * @file: File to be opened
- * @opened: atomic_open flags
  *
  *
  * Returns: errno
  */
 
 static struct dentry *__gfs2_lookup(struct inode *dir, struct dentry *dentry,
-                                   struct file *file, int *opened)
+                                   struct file *file)
 {
        struct inode *inode;
        struct dentry *d;
@@ -866,7 +865,7 @@ static struct dentry *__gfs2_lookup(struct inode *dir, struct dentry *dentry,
                return d;
        }
        if (file && S_ISREG(inode->i_mode))
-               error = finish_open(file, dentry, gfs2_open_common, opened);
+               error = finish_open(file, dentry, gfs2_open_common);
 
        gfs2_glock_dq_uninit(&gh);
        if (error) {
@@ -879,7 +878,7 @@ static struct dentry *__gfs2_lookup(struct inode *dir, struct dentry *dentry,
 static struct dentry *gfs2_lookup(struct inode *dir, struct dentry *dentry,
                                  unsigned flags)
 {
-       return __gfs2_lookup(dir, dentry, NULL, NULL);
+       return __gfs2_lookup(dir, dentry, NULL);
 }
 
 /**
@@ -1189,7 +1188,7 @@ static int gfs2_symlink(struct inode *dir, struct dentry *dentry,
        if (size >= gfs2_max_stuffed_size(GFS2_I(dir)))
                return -ENAMETOOLONG;
 
-       return gfs2_create_inode(dir, dentry, NULL, S_IFLNK | S_IRWXUGO, 0, symname, size, 0, NULL);
+       return gfs2_create_inode(dir, dentry, NULL, S_IFLNK | S_IRWXUGO, 0, symname, size, 0);
 }
 
 /**
@@ -1204,7 +1203,7 @@ static int gfs2_symlink(struct inode *dir, struct dentry *dentry,
 static int gfs2_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
 {
        unsigned dsize = gfs2_max_stuffed_size(GFS2_I(dir));
-       return gfs2_create_inode(dir, dentry, NULL, S_IFDIR | mode, 0, NULL, dsize, 0, NULL);
+       return gfs2_create_inode(dir, dentry, NULL, S_IFDIR | mode, 0, NULL, dsize, 0);
 }
 
 /**
@@ -1219,7 +1218,7 @@ static int gfs2_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
 static int gfs2_mknod(struct inode *dir, struct dentry *dentry, umode_t mode,
                      dev_t dev)
 {
-       return gfs2_create_inode(dir, dentry, NULL, mode, dev, NULL, 0, 0, NULL);
+       return gfs2_create_inode(dir, dentry, NULL, mode, dev, NULL, 0, 0);
 }
 
 /**
@@ -1229,14 +1228,13 @@ static int gfs2_mknod(struct inode *dir, struct dentry *dentry, umode_t mode,
  * @file: The proposed new struct file
  * @flags: open flags
  * @mode: File mode
- * @opened: Flag to say whether the file has been opened or not
  *
  * Returns: error code or 0 for success
  */
 
 static int gfs2_atomic_open(struct inode *dir, struct dentry *dentry,
                            struct file *file, unsigned flags,
-                           umode_t mode, int *opened)
+                           umode_t mode)
 {
        struct dentry *d;
        bool excl = !!(flags & O_EXCL);
@@ -1244,13 +1242,13 @@ static int gfs2_atomic_open(struct inode *dir, struct dentry *dentry,
        if (!d_in_lookup(dentry))
                goto skip_lookup;
 
-       d = __gfs2_lookup(dir, dentry, file, opened);
+       d = __gfs2_lookup(dir, dentry, file);
        if (IS_ERR(d))
                return PTR_ERR(d);
        if (d != NULL)
                dentry = d;
        if (d_really_is_positive(dentry)) {
-               if (!(*opened & FILE_OPENED))
+               if (!(file->f_mode & FMODE_OPENED))
                        return finish_no_open(file, d);
                dput(d);
                return 0;
@@ -1262,7 +1260,7 @@ skip_lookup:
        if (!(flags & O_CREAT))
                return -ENOENT;
 
-       return gfs2_create_inode(dir, dentry, file, S_IFREG | mode, 0, NULL, 0, excl, opened);
+       return gfs2_create_inode(dir, dentry, file, S_IFREG | mode, 0, NULL, 0, excl);
 }
 
 /*
index 2a16111d312fcaded7e265d294031035b63a9a3a..a2dfa1b2a89c7982deb26ea0fcd8eae46ac0dbbe 100644 (file)
@@ -541,7 +541,7 @@ static struct dentry *hfs_file_lookup(struct inode *dir, struct dentry *dentry,
        HFS_I(inode)->rsrc_inode = dir;
        HFS_I(dir)->rsrc_inode = inode;
        igrab(dir);
-       hlist_add_fake(&inode->i_hash);
+       inode_fake_hash(inode);
        mark_inode_dirty(inode);
        dont_mount(dentry);
 out:
index d508c7844681fb4427ed8f66e2886cd9f39acbad..346a146c7617d10ab34ae8eb1bb5d6641e734d88 100644 (file)
@@ -411,6 +411,7 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart,
        bool truncate_op = (lend == LLONG_MAX);
 
        memset(&pseudo_vma, 0, sizeof(struct vm_area_struct));
+       vma_init(&pseudo_vma, current->mm);
        pseudo_vma.vm_flags = (VM_HUGETLB | VM_MAYSHARE | VM_SHARED);
        pagevec_init(&pvec);
        next = start;
@@ -595,6 +596,7 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
         * as input to create an allocation policy.
         */
        memset(&pseudo_vma, 0, sizeof(struct vm_area_struct));
+       vma_init(&pseudo_vma, mm);
        pseudo_vma.vm_flags = (VM_HUGETLB | VM_MAYSHARE | VM_SHARED);
        pseudo_vma.vm_file = file;
 
@@ -1308,10 +1310,6 @@ static int get_hstate_idx(int page_size_log)
        return h - hstates;
 }
 
-static const struct dentry_operations anon_ops = {
-       .d_dname = simple_dname
-};
-
 /*
  * Note that size should be aligned to proper hugepage size in caller side,
  * otherwise hugetlb_reserve_pages reserves one less hugepages than intended.
@@ -1320,19 +1318,18 @@ struct file *hugetlb_file_setup(const char *name, size_t size,
                                vm_flags_t acctflag, struct user_struct **user,
                                int creat_flags, int page_size_log)
 {
-       struct file *file = ERR_PTR(-ENOMEM);
        struct inode *inode;
-       struct path path;
-       struct super_block *sb;
-       struct qstr quick_string;
+       struct vfsmount *mnt;
        int hstate_idx;
+       struct file *file;
 
        hstate_idx = get_hstate_idx(page_size_log);
        if (hstate_idx < 0)
                return ERR_PTR(-ENODEV);
 
        *user = NULL;
-       if (!hugetlbfs_vfsmount[hstate_idx])
+       mnt = hugetlbfs_vfsmount[hstate_idx];
+       if (!mnt)
                return ERR_PTR(-ENOENT);
 
        if (creat_flags == HUGETLB_SHMFS_INODE && !can_do_hugetlb_shm()) {
@@ -1348,45 +1345,28 @@ struct file *hugetlb_file_setup(const char *name, size_t size,
                }
        }
 
-       sb = hugetlbfs_vfsmount[hstate_idx]->mnt_sb;
-       quick_string.name = name;
-       quick_string.len = strlen(quick_string.name);
-       quick_string.hash = 0;
-       path.dentry = d_alloc_pseudo(sb, &quick_string);
-       if (!path.dentry)
-               goto out_shm_unlock;
-
-       d_set_d_op(path.dentry, &anon_ops);
-       path.mnt = mntget(hugetlbfs_vfsmount[hstate_idx]);
        file = ERR_PTR(-ENOSPC);
-       inode = hugetlbfs_get_inode(sb, NULL, S_IFREG | S_IRWXUGO, 0);
+       inode = hugetlbfs_get_inode(mnt->mnt_sb, NULL, S_IFREG | S_IRWXUGO, 0);
        if (!inode)
-               goto out_dentry;
+               goto out;
        if (creat_flags == HUGETLB_SHMFS_INODE)
                inode->i_flags |= S_PRIVATE;
 
-       file = ERR_PTR(-ENOMEM);
-       if (hugetlb_reserve_pages(inode, 0,
-                       size >> huge_page_shift(hstate_inode(inode)), NULL,
-                       acctflag))
-               goto out_inode;
-
-       d_instantiate(path.dentry, inode);
        inode->i_size = size;
        clear_nlink(inode);
 
-       file = alloc_file(&path, FMODE_WRITE | FMODE_READ,
-                       &hugetlbfs_file_operations);
-       if (IS_ERR(file))
-               goto out_dentry; /* inode is already attached */
-
-       return file;
+       if (hugetlb_reserve_pages(inode, 0,
+                       size >> huge_page_shift(hstate_inode(inode)), NULL,
+                       acctflag))
+               file = ERR_PTR(-ENOMEM);
+       else
+               file = alloc_file_pseudo(inode, mnt, name, O_RDWR,
+                                       &hugetlbfs_file_operations);
+       if (!IS_ERR(file))
+               return file;
 
-out_inode:
        iput(inode);
-out_dentry:
-       path_put(&path);
-out_shm_unlock:
+out:
        if (*user) {
                user_shm_unlock(size, *user);
                *user = NULL;
index 2c300e98179607ea0062a2c1dbcee17e9bc926c4..a06de44542325a2848a3882774df0959d1dfa17b 100644 (file)
@@ -804,6 +804,10 @@ repeat:
                        __wait_on_freeing_inode(inode);
                        goto repeat;
                }
+               if (unlikely(inode->i_state & I_CREATING)) {
+                       spin_unlock(&inode->i_lock);
+                       return ERR_PTR(-ESTALE);
+               }
                __iget(inode);
                spin_unlock(&inode->i_lock);
                return inode;
@@ -831,6 +835,10 @@ repeat:
                        __wait_on_freeing_inode(inode);
                        goto repeat;
                }
+               if (unlikely(inode->i_state & I_CREATING)) {
+                       spin_unlock(&inode->i_lock);
+                       return ERR_PTR(-ESTALE);
+               }
                __iget(inode);
                spin_unlock(&inode->i_lock);
                return inode;
@@ -961,13 +969,26 @@ void unlock_new_inode(struct inode *inode)
        lockdep_annotate_inode_mutex_key(inode);
        spin_lock(&inode->i_lock);
        WARN_ON(!(inode->i_state & I_NEW));
-       inode->i_state &= ~I_NEW;
+       inode->i_state &= ~I_NEW & ~I_CREATING;
        smp_mb();
        wake_up_bit(&inode->i_state, __I_NEW);
        spin_unlock(&inode->i_lock);
 }
 EXPORT_SYMBOL(unlock_new_inode);
 
+void discard_new_inode(struct inode *inode)
+{
+       lockdep_annotate_inode_mutex_key(inode);
+       spin_lock(&inode->i_lock);
+       WARN_ON(!(inode->i_state & I_NEW));
+       inode->i_state &= ~I_NEW;
+       smp_mb();
+       wake_up_bit(&inode->i_state, __I_NEW);
+       spin_unlock(&inode->i_lock);
+       iput(inode);
+}
+EXPORT_SYMBOL(discard_new_inode);
+
 /**
  * lock_two_nondirectories - take two i_mutexes on non-directory objects
  *
@@ -1029,6 +1050,7 @@ struct inode *inode_insert5(struct inode *inode, unsigned long hashval,
 {
        struct hlist_head *head = inode_hashtable + hash(inode->i_sb, hashval);
        struct inode *old;
+       bool creating = inode->i_state & I_CREATING;
 
 again:
        spin_lock(&inode_hash_lock);
@@ -1039,6 +1061,8 @@ again:
                 * Use the old inode instead of the preallocated one.
                 */
                spin_unlock(&inode_hash_lock);
+               if (IS_ERR(old))
+                       return NULL;
                wait_on_inode(old);
                if (unlikely(inode_unhashed(old))) {
                        iput(old);
@@ -1060,6 +1084,8 @@ again:
        inode->i_state |= I_NEW;
        hlist_add_head(&inode->i_hash, head);
        spin_unlock(&inode->i_lock);
+       if (!creating)
+               inode_sb_list_add(inode);
 unlock:
        spin_unlock(&inode_hash_lock);
 
@@ -1094,12 +1120,13 @@ struct inode *iget5_locked(struct super_block *sb, unsigned long hashval,
        struct inode *inode = ilookup5(sb, hashval, test, data);
 
        if (!inode) {
-               struct inode *new = new_inode(sb);
+               struct inode *new = alloc_inode(sb);
 
                if (new) {
+                       new->i_state = 0;
                        inode = inode_insert5(new, hashval, test, set, data);
                        if (unlikely(inode != new))
-                               iput(new);
+                               destroy_inode(new);
                }
        }
        return inode;
@@ -1128,6 +1155,8 @@ again:
        inode = find_inode_fast(sb, head, ino);
        spin_unlock(&inode_hash_lock);
        if (inode) {
+               if (IS_ERR(inode))
+                       return NULL;
                wait_on_inode(inode);
                if (unlikely(inode_unhashed(inode))) {
                        iput(inode);
@@ -1165,6 +1194,8 @@ again:
                 */
                spin_unlock(&inode_hash_lock);
                destroy_inode(inode);
+               if (IS_ERR(old))
+                       return NULL;
                inode = old;
                wait_on_inode(inode);
                if (unlikely(inode_unhashed(inode))) {
@@ -1282,7 +1313,7 @@ struct inode *ilookup5_nowait(struct super_block *sb, unsigned long hashval,
        inode = find_inode(sb, head, test, data);
        spin_unlock(&inode_hash_lock);
 
-       return inode;
+       return IS_ERR(inode) ? NULL : inode;
 }
 EXPORT_SYMBOL(ilookup5_nowait);
 
@@ -1338,6 +1369,8 @@ again:
        spin_unlock(&inode_hash_lock);
 
        if (inode) {
+               if (IS_ERR(inode))
+                       return NULL;
                wait_on_inode(inode);
                if (unlikely(inode_unhashed(inode))) {
                        iput(inode);
@@ -1421,12 +1454,17 @@ int insert_inode_locked(struct inode *inode)
                }
                if (likely(!old)) {
                        spin_lock(&inode->i_lock);
-                       inode->i_state |= I_NEW;
+                       inode->i_state |= I_NEW | I_CREATING;
                        hlist_add_head(&inode->i_hash, head);
                        spin_unlock(&inode->i_lock);
                        spin_unlock(&inode_hash_lock);
                        return 0;
                }
+               if (unlikely(old->i_state & I_CREATING)) {
+                       spin_unlock(&old->i_lock);
+                       spin_unlock(&inode_hash_lock);
+                       return -EBUSY;
+               }
                __iget(old);
                spin_unlock(&old->i_lock);
                spin_unlock(&inode_hash_lock);
@@ -1443,7 +1481,10 @@ EXPORT_SYMBOL(insert_inode_locked);
 int insert_inode_locked4(struct inode *inode, unsigned long hashval,
                int (*test)(struct inode *, void *), void *data)
 {
-       struct inode *old = inode_insert5(inode, hashval, test, NULL, data);
+       struct inode *old;
+
+       inode->i_state |= I_CREATING;
+       old = inode_insert5(inode, hashval, test, NULL, data);
 
        if (old != inode) {
                iput(old);
@@ -1999,8 +2040,14 @@ void inode_init_owner(struct inode *inode, const struct inode *dir,
        inode->i_uid = current_fsuid();
        if (dir && dir->i_mode & S_ISGID) {
                inode->i_gid = dir->i_gid;
+
+               /* Directories are special, and always inherit S_ISGID */
                if (S_ISDIR(mode))
                        mode |= S_ISGID;
+               else if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP) &&
+                        !in_group_p(inode->i_gid) &&
+                        !capable_wrt_inode_uidgid(dir, CAP_FSETID))
+                       mode &= ~S_ISGID;
        } else
                inode->i_gid = current_fsgid();
        inode->i_mode = mode;
index 980d005b21b4111084ab7a56b17fda7e5b1320f5..52a346903748b6a104a0dcc8e86548a1d9963eb1 100644 (file)
@@ -93,7 +93,7 @@ extern void chroot_fs_refs(const struct path *, const struct path *);
 /*
  * file_table.c
  */
-extern struct file *get_empty_filp(void);
+extern struct file *alloc_empty_file(int, const struct cred *);
 
 /*
  * super.c
@@ -125,9 +125,7 @@ int do_fchmodat(int dfd, const char __user *filename, umode_t mode);
 int do_fchownat(int dfd, const char __user *filename, uid_t user, gid_t group,
                int flag);
 
-extern int open_check_o_direct(struct file *f);
-extern int vfs_open(const struct path *, struct file *, const struct cred *);
-extern struct file *filp_clone_open(struct file *);
+extern int vfs_open(const struct path *, struct file *);
 
 /*
  * inode.c
index 77397b5a96ef9c8f90a8e2b9c7afb50afdbae025..0d0bd88455867f9dacd20421e278a4121851c0cb 100644 (file)
@@ -1443,7 +1443,7 @@ iomap_bmap(struct address_space *mapping, sector_t bno,
                const struct iomap_ops *ops)
 {
        struct inode *inode = mapping->host;
-       loff_t pos = bno >> inode->i_blkbits;
+       loff_t pos = bno << inode->i_blkbits;
        unsigned blocksize = i_blocksize(inode);
 
        if (filemap_write_and_wait(mapping))
index 51dd68e67b0f3abfcd115196724079e226467d09..c0b66a7a795b1cd22de3061930e454ff24394925 100644 (file)
@@ -1361,6 +1361,13 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
                if (jh->b_transaction == transaction &&
                    jh->b_jlist != BJ_Metadata) {
                        jbd_lock_bh_state(bh);
+                       if (jh->b_transaction == transaction &&
+                           jh->b_jlist != BJ_Metadata)
+                               pr_err("JBD2: assertion failure: h_type=%u "
+                                      "h_line_no=%u block_no=%llu jlist=%u\n",
+                                      handle->h_type, handle->h_line_no,
+                                      (unsigned long long) bh->b_blocknr,
+                                      jh->b_jlist);
                        J_ASSERT_JH(jh, jh->b_transaction != transaction ||
                                        jh->b_jlist == BJ_Metadata);
                        jbd_unlock_bh_state(bh);
@@ -1380,11 +1387,11 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
                 * of the transaction. This needs to be done
                 * once a transaction -bzzz
                 */
-               jh->b_modified = 1;
                if (handle->h_buffer_credits <= 0) {
                        ret = -ENOSPC;
                        goto out_unlock_bh;
                }
+               jh->b_modified = 1;
                handle->h_buffer_credits--;
        }
 
index 395c4c0d0f0667bf0f8e604d2e9b4b3eb1600698..1682a87c00b25992975bffacf2df5d7609931b1f 100644 (file)
@@ -115,6 +115,13 @@ struct dinode {
                                        dxd_t _dxd;     /* 16: */
                                        union {
                                                __le32 _rdev;   /* 4: */
+                                               /*
+                                                * The fast symlink area
+                                                * is expected to overflow
+                                                * into _inlineea when
+                                                * needed (which will clear
+                                                * INLINEEA).
+                                                */
                                                u8 _fastsymlink[128];
                                        } _u;
                                        u8 _inlineea[128];
index f36ef68905a74d830e75e7566c1145c8f55fc0cf..93e8c590ff5c060eb6026bf96d15d3db07a25a9f 100644 (file)
@@ -491,13 +491,7 @@ struct inode *diReadSpecial(struct super_block *sb, ino_t inum, int secondary)
        /* release the page */
        release_metapage(mp);
 
-       /*
-        * __mark_inode_dirty expects inodes to be hashed.  Since we don't
-        * want special inodes in the fileset inode space, we make them
-        * appear hashed, but do not put on any lists.  hlist_del()
-        * will work fine and require no locking.
-        */
-       hlist_add_fake(&ip->i_hash);
+       inode_fake_hash(ip);
 
        return (ip);
 }
index 1f26d1910409afb8d3de7ba4455b7b195ed5c4bd..9940a1e04cbfb766bb8609cb85890b0823fa1af5 100644 (file)
@@ -87,6 +87,7 @@ struct jfs_inode_info {
                struct {
                        unchar _unused[16];     /* 16: */
                        dxd_t _dxd;             /* 16: */
+                       /* _inline may overflow into _inline_ea when needed */
                        unchar _inline[128];    /* 128: inline symlink */
                        /* _inline_ea may overlay the last part of
                         * file._xtroot if maxentry = XTROOTINITSLOT
index 5e9b7bb3aabf9d5827d367d35ee1bd4740af90e3..4572b7cf183d855b0b3312c8b5fb098510f5f86c 100644 (file)
@@ -61,8 +61,7 @@ struct inode *ialloc(struct inode *parent, umode_t mode)
        inode = new_inode(sb);
        if (!inode) {
                jfs_warn("ialloc: new_inode returned NULL!");
-               rc = -ENOMEM;
-               goto fail;
+               return ERR_PTR(-ENOMEM);
        }
 
        jfs_inode = JFS_IP(inode);
@@ -70,8 +69,6 @@ struct inode *ialloc(struct inode *parent, umode_t mode)
        rc = diAlloc(parent, S_ISDIR(mode), inode);
        if (rc) {
                jfs_warn("ialloc: diAlloc returned %d!", rc);
-               if (rc == -EIO)
-                       make_bad_inode(inode);
                goto fail_put;
        }
 
@@ -141,9 +138,10 @@ fail_drop:
        dquot_drop(inode);
        inode->i_flags |= S_NOQUOTA;
        clear_nlink(inode);
-       unlock_new_inode(inode);
+       discard_new_inode(inode);
+       return ERR_PTR(rc);
+
 fail_put:
        iput(inode);
-fail:
        return ERR_PTR(rc);
 }
index 56c3fcbfe80ed0b69156bcab2f981b4d322b0aba..14528c0ffe635a30cb1a3efb403e4f2a429d5034 100644 (file)
@@ -175,8 +175,7 @@ static int jfs_create(struct inode *dip, struct dentry *dentry, umode_t mode,
        if (rc) {
                free_ea_wmap(ip);
                clear_nlink(ip);
-               unlock_new_inode(ip);
-               iput(ip);
+               discard_new_inode(ip);
        } else {
                d_instantiate_new(dentry, ip);
        }
@@ -309,8 +308,7 @@ static int jfs_mkdir(struct inode *dip, struct dentry *dentry, umode_t mode)
        if (rc) {
                free_ea_wmap(ip);
                clear_nlink(ip);
-               unlock_new_inode(ip);
-               iput(ip);
+               discard_new_inode(ip);
        } else {
                d_instantiate_new(dentry, ip);
        }
@@ -1054,8 +1052,7 @@ static int jfs_symlink(struct inode *dip, struct dentry *dentry,
        if (rc) {
                free_ea_wmap(ip);
                clear_nlink(ip);
-               unlock_new_inode(ip);
-               iput(ip);
+               discard_new_inode(ip);
        } else {
                d_instantiate_new(dentry, ip);
        }
@@ -1441,8 +1438,7 @@ static int jfs_mknod(struct inode *dir, struct dentry *dentry,
        if (rc) {
                free_ea_wmap(ip);
                clear_nlink(ip);
-               unlock_new_inode(ip);
-               iput(ip);
+               discard_new_inode(ip);
        } else {
                d_instantiate_new(dentry, ip);
        }
index 1b9264fd54b68606ac73b749ce1f3af3ea860381..09da5cf14e2774022b1879eba30997a8035fc4da 100644 (file)
@@ -581,7 +581,7 @@ static int jfs_fill_super(struct super_block *sb, void *data, int silent)
        inode->i_ino = 0;
        inode->i_size = i_size_read(sb->s_bdev->bd_inode);
        inode->i_mapping->a_ops = &jfs_metapage_aops;
-       hlist_add_fake(&inode->i_hash);
+       inode_fake_hash(inode);
        mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
 
        sbi->direct_inode = inode;
@@ -967,8 +967,7 @@ static int __init init_jfs_fs(void)
        jfs_inode_cachep =
            kmem_cache_create_usercopy("jfs_ip", sizeof(struct jfs_inode_info),
                        0, SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_ACCOUNT,
-                       offsetof(struct jfs_inode_info, i_inline),
-                       sizeof_field(struct jfs_inode_info, i_inline),
+                       offsetof(struct jfs_inode_info, i_inline), IDATASIZE,
                        init_once);
        if (jfs_inode_cachep == NULL)
                return -ENOMEM;
index c60f3d32ee911192c0cd8dae3b7cb11c0f416411..a6797986b625a34d19e097050c58f582c177c30c 100644 (file)
@@ -491,15 +491,17 @@ static int ea_get(struct inode *inode, struct ea_buffer *ea_buf, int min_size)
        if (size > PSIZE) {
                /*
                 * To keep the rest of the code simple.  Allocate a
-                * contiguous buffer to work with
+                * contiguous buffer to work with. Make the buffer large
+                * enough to make use of the whole extent.
                 */
-               ea_buf->xattr = kmalloc(size, GFP_KERNEL);
+               ea_buf->max_size = (size + sb->s_blocksize - 1) &
+                   ~(sb->s_blocksize - 1);
+
+               ea_buf->xattr = kmalloc(ea_buf->max_size, GFP_KERNEL);
                if (ea_buf->xattr == NULL)
                        return -ENOMEM;
 
                ea_buf->flag = EA_MALLOC;
-               ea_buf->max_size = (size + sb->s_blocksize - 1) &
-                   ~(sb->s_blocksize - 1);
 
                if (ea_size == 0)
                        return 0;
index 734cef54fdf8b0cc3c4136dc3b11f6e07c5952e2..278e494bcbd273e4b05268e497ced5cefbb435b3 100644 (file)
@@ -2028,6 +2028,8 @@ static int link_path_walk(const char *name, struct nameidata *nd)
 {
        int err;
 
+       if (IS_ERR(name))
+               return PTR_ERR(name);
        while (*name=='/')
                name++;
        if (!*name)
@@ -2125,12 +2127,15 @@ OK:
        }
 }
 
+/* must be paired with terminate_walk() */
 static const char *path_init(struct nameidata *nd, unsigned flags)
 {
        const char *s = nd->name->name;
 
        if (!*s)
                flags &= ~LOOKUP_RCU;
+       if (flags & LOOKUP_RCU)
+               rcu_read_lock();
 
        nd->last_type = LAST_ROOT; /* if there are only slashes... */
        nd->flags = flags | LOOKUP_JUMPED | LOOKUP_PARENT;
@@ -2143,7 +2148,6 @@ static const char *path_init(struct nameidata *nd, unsigned flags)
                nd->path = nd->root;
                nd->inode = inode;
                if (flags & LOOKUP_RCU) {
-                       rcu_read_lock();
                        nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq);
                        nd->root_seq = nd->seq;
                        nd->m_seq = read_seqbegin(&mount_lock);
@@ -2159,21 +2163,15 @@ static const char *path_init(struct nameidata *nd, unsigned flags)
 
        nd->m_seq = read_seqbegin(&mount_lock);
        if (*s == '/') {
-               if (flags & LOOKUP_RCU)
-                       rcu_read_lock();
                set_root(nd);
                if (likely(!nd_jump_root(nd)))
                        return s;
-               nd->root.mnt = NULL;
-               rcu_read_unlock();
                return ERR_PTR(-ECHILD);
        } else if (nd->dfd == AT_FDCWD) {
                if (flags & LOOKUP_RCU) {
                        struct fs_struct *fs = current->fs;
                        unsigned seq;
 
-                       rcu_read_lock();
-
                        do {
                                seq = read_seqcount_begin(&fs->seq);
                                nd->path = fs->pwd;
@@ -2195,16 +2193,13 @@ static const char *path_init(struct nameidata *nd, unsigned flags)
 
                dentry = f.file->f_path.dentry;
 
-               if (*s) {
-                       if (!d_can_lookup(dentry)) {
-                               fdput(f);
-                               return ERR_PTR(-ENOTDIR);
-                       }
+               if (*s && unlikely(!d_can_lookup(dentry))) {
+                       fdput(f);
+                       return ERR_PTR(-ENOTDIR);
                }
 
                nd->path = f.file->f_path;
                if (flags & LOOKUP_RCU) {
-                       rcu_read_lock();
                        nd->inode = nd->path.dentry->d_inode;
                        nd->seq = read_seqcount_begin(&nd->path.dentry->d_seq);
                } else {
@@ -2272,24 +2267,15 @@ static int path_lookupat(struct nameidata *nd, unsigned flags, struct path *path
        const char *s = path_init(nd, flags);
        int err;
 
-       if (IS_ERR(s))
-               return PTR_ERR(s);
-
-       if (unlikely(flags & LOOKUP_DOWN)) {
+       if (unlikely(flags & LOOKUP_DOWN) && !IS_ERR(s)) {
                err = handle_lookup_down(nd);
-               if (unlikely(err < 0)) {
-                       terminate_walk(nd);
-                       return err;
-               }
+               if (unlikely(err < 0))
+                       s = ERR_PTR(err);
        }
 
        while (!(err = link_path_walk(s, nd))
                && ((err = lookup_last(nd)) > 0)) {
                s = trailing_symlink(nd);
-               if (IS_ERR(s)) {
-                       err = PTR_ERR(s);
-                       break;
-               }
        }
        if (!err)
                err = complete_walk(nd);
@@ -2336,10 +2322,7 @@ static int path_parentat(struct nameidata *nd, unsigned flags,
                                struct path *parent)
 {
        const char *s = path_init(nd, flags);
-       int err;
-       if (IS_ERR(s))
-               return PTR_ERR(s);
-       err = link_path_walk(s, nd);
+       int err = link_path_walk(s, nd);
        if (!err)
                err = complete_walk(nd);
        if (!err) {
@@ -2666,15 +2649,10 @@ path_mountpoint(struct nameidata *nd, unsigned flags, struct path *path)
 {
        const char *s = path_init(nd, flags);
        int err;
-       if (IS_ERR(s))
-               return PTR_ERR(s);
+
        while (!(err = link_path_walk(s, nd)) &&
                (err = mountpoint_last(nd)) > 0) {
                s = trailing_symlink(nd);
-               if (IS_ERR(s)) {
-                       err = PTR_ERR(s);
-                       break;
-               }
        }
        if (!err) {
                *path = nd->path;
@@ -3027,17 +3005,16 @@ static int may_o_create(const struct path *dir, struct dentry *dentry, umode_t m
  * Returns 0 if successful.  The file will have been created and attached to
  * @file by the filesystem calling finish_open().
  *
- * Returns 1 if the file was looked up only or didn't need creating.  The
- * caller will need to perform the open themselves.  @path will have been
- * updated to point to the new dentry.  This may be negative.
+ * If the file was looked up only or didn't need creating, FMODE_OPENED won't
+ * be set.  The caller will need to perform the open themselves.  @path will
+ * have been updated to point to the new dentry.  This may be negative.
  *
  * Returns an error code otherwise.
  */
 static int atomic_open(struct nameidata *nd, struct dentry *dentry,
                        struct path *path, struct file *file,
                        const struct open_flags *op,
-                       int open_flag, umode_t mode,
-                       int *opened)
+                       int open_flag, umode_t mode)
 {
        struct dentry *const DENTRY_NOT_SET = (void *) -1UL;
        struct inode *dir =  nd->path.dentry->d_inode;
@@ -3052,39 +3029,38 @@ static int atomic_open(struct nameidata *nd, struct dentry *dentry,
        file->f_path.dentry = DENTRY_NOT_SET;
        file->f_path.mnt = nd->path.mnt;
        error = dir->i_op->atomic_open(dir, dentry, file,
-                                      open_to_namei_flags(open_flag),
-                                      mode, opened);
+                                      open_to_namei_flags(open_flag), mode);
        d_lookup_done(dentry);
        if (!error) {
-               /*
-                * We didn't have the inode before the open, so check open
-                * permission here.
-                */
-               int acc_mode = op->acc_mode;
-               if (*opened & FILE_CREATED) {
-                       WARN_ON(!(open_flag & O_CREAT));
-                       fsnotify_create(dir, dentry);
-                       acc_mode = 0;
-               }
-               error = may_open(&file->f_path, acc_mode, open_flag);
-               if (WARN_ON(error > 0))
-                       error = -EINVAL;
-       } else if (error > 0) {
-               if (WARN_ON(file->f_path.dentry == DENTRY_NOT_SET)) {
+               if (file->f_mode & FMODE_OPENED) {
+                       /*
+                        * We didn't have the inode before the open, so check open
+                        * permission here.
+                        */
+                       int acc_mode = op->acc_mode;
+                       if (file->f_mode & FMODE_CREATED) {
+                               WARN_ON(!(open_flag & O_CREAT));
+                               fsnotify_create(dir, dentry);
+                               acc_mode = 0;
+                       }
+                       error = may_open(&file->f_path, acc_mode, open_flag);
+                       if (WARN_ON(error > 0))
+                               error = -EINVAL;
+               } else if (WARN_ON(file->f_path.dentry == DENTRY_NOT_SET)) {
                        error = -EIO;
                } else {
                        if (file->f_path.dentry) {
                                dput(dentry);
                                dentry = file->f_path.dentry;
                        }
-                       if (*opened & FILE_CREATED)
+                       if (file->f_mode & FMODE_CREATED)
                                fsnotify_create(dir, dentry);
                        if (unlikely(d_is_negative(dentry))) {
                                error = -ENOENT;
                        } else {
                                path->dentry = dentry;
                                path->mnt = nd->path.mnt;
-                               return 1;
+                               return 0;
                        }
                }
        }
@@ -3095,25 +3071,22 @@ static int atomic_open(struct nameidata *nd, struct dentry *dentry,
 /*
  * Look up and maybe create and open the last component.
  *
- * Must be called with i_mutex held on parent.
- *
- * Returns 0 if the file was successfully atomically created (if necessary) and
- * opened.  In this case the file will be returned attached to @file.
+ * Must be called with parent locked (exclusive in O_CREAT case).
  *
- * Returns 1 if the file was not completely opened at this time, though lookups
- * and creations will have been performed and the dentry returned in @path will
- * be positive upon return if O_CREAT was specified.  If O_CREAT wasn't
- * specified then a negative dentry may be returned.
+ * Returns 0 on success, that is, if
+ *  the file was successfully atomically created (if necessary) and opened, or
+ *  the file was not completely opened at this time, though lookups and
+ *  creations were performed.
+ * These case are distinguished by presence of FMODE_OPENED on file->f_mode.
+ * In the latter case dentry returned in @path might be negative if O_CREAT
+ * hadn't been specified.
  *
- * An error code is returned otherwise.
- *
- * FILE_CREATE will be set in @*opened if the dentry was created and will be
- * cleared otherwise prior to returning.
+ * An error code is returned on failure.
  */
 static int lookup_open(struct nameidata *nd, struct path *path,
                        struct file *file,
                        const struct open_flags *op,
-                       bool got_write, int *opened)
+                       bool got_write)
 {
        struct dentry *dir = nd->path.dentry;
        struct inode *dir_inode = dir->d_inode;
@@ -3126,7 +3099,7 @@ static int lookup_open(struct nameidata *nd, struct path *path,
        if (unlikely(IS_DEADDIR(dir_inode)))
                return -ENOENT;
 
-       *opened &= ~FILE_CREATED;
+       file->f_mode &= ~FMODE_CREATED;
        dentry = d_lookup(dir, &nd->last);
        for (;;) {
                if (!dentry) {
@@ -3188,7 +3161,7 @@ static int lookup_open(struct nameidata *nd, struct path *path,
 
        if (dir_inode->i_op->atomic_open) {
                error = atomic_open(nd, dentry, path, file, op, open_flag,
-                                   mode, opened);
+                                   mode);
                if (unlikely(error == -ENOENT) && create_error)
                        error = create_error;
                return error;
@@ -3211,7 +3184,7 @@ no_open:
 
        /* Negative dentry, just create the file */
        if (!dentry->d_inode && (open_flag & O_CREAT)) {
-               *opened |= FILE_CREATED;
+               file->f_mode |= FMODE_CREATED;
                audit_inode_child(dir_inode, dentry, AUDIT_TYPE_CHILD_CREATE);
                if (!dir_inode->i_op->create) {
                        error = -EACCES;
@@ -3230,7 +3203,7 @@ no_open:
 out_no_open:
        path->dentry = dentry;
        path->mnt = nd->path.mnt;
-       return 1;
+       return 0;
 
 out_dput:
        dput(dentry);
@@ -3241,8 +3214,7 @@ out_dput:
  * Handle the last step of open()
  */
 static int do_last(struct nameidata *nd,
-                  struct file *file, const struct open_flags *op,
-                  int *opened)
+                  struct file *file, const struct open_flags *op)
 {
        struct dentry *dir = nd->path.dentry;
        int open_flag = op->open_flag;
@@ -3308,17 +3280,17 @@ static int do_last(struct nameidata *nd,
                inode_lock(dir->d_inode);
        else
                inode_lock_shared(dir->d_inode);
-       error = lookup_open(nd, &path, file, op, got_write, opened);
+       error = lookup_open(nd, &path, file, op, got_write);
        if (open_flag & O_CREAT)
                inode_unlock(dir->d_inode);
        else
                inode_unlock_shared(dir->d_inode);
 
-       if (error <= 0) {
-               if (error)
-                       goto out;
+       if (error)
+               goto out;
 
-               if ((*opened & FILE_CREATED) ||
+       if (file->f_mode & FMODE_OPENED) {
+               if ((file->f_mode & FMODE_CREATED) ||
                    !S_ISREG(file_inode(file)->i_mode))
                        will_truncate = false;
 
@@ -3326,7 +3298,7 @@ static int do_last(struct nameidata *nd,
                goto opened;
        }
 
-       if (*opened & FILE_CREATED) {
+       if (file->f_mode & FMODE_CREATED) {
                /* Don't check for write permission, don't truncate */
                open_flag &= ~O_TRUNC;
                will_truncate = false;
@@ -3395,20 +3367,15 @@ finish_open_created:
        error = may_open(&nd->path, acc_mode, open_flag);
        if (error)
                goto out;
-       BUG_ON(*opened & FILE_OPENED); /* once it's opened, it's opened */
-       error = vfs_open(&nd->path, file, current_cred());
+       BUG_ON(file->f_mode & FMODE_OPENED); /* once it's opened, it's opened */
+       error = vfs_open(&nd->path, file);
        if (error)
                goto out;
-       *opened |= FILE_OPENED;
 opened:
-       error = open_check_o_direct(file);
-       if (!error)
-               error = ima_file_check(file, op->acc_mode, *opened);
+       error = ima_file_check(file, op->acc_mode);
        if (!error && will_truncate)
                error = handle_truncate(file);
 out:
-       if (unlikely(error) && (*opened & FILE_OPENED))
-               fput(file);
        if (unlikely(error > 0)) {
                WARN_ON(1);
                error = -EINVAL;
@@ -3458,7 +3425,7 @@ EXPORT_SYMBOL(vfs_tmpfile);
 
 static int do_tmpfile(struct nameidata *nd, unsigned flags,
                const struct open_flags *op,
-               struct file *file, int *opened)
+               struct file *file)
 {
        struct dentry *child;
        struct path path;
@@ -3480,12 +3447,7 @@ static int do_tmpfile(struct nameidata *nd, unsigned flags,
        if (error)
                goto out2;
        file->f_path.mnt = path.mnt;
-       error = finish_open(file, child, NULL, opened);
-       if (error)
-               goto out2;
-       error = open_check_o_direct(file);
-       if (error)
-               fput(file);
+       error = finish_open(file, child, NULL);
 out2:
        mnt_drop_write(path.mnt);
 out:
@@ -3499,7 +3461,7 @@ static int do_o_path(struct nameidata *nd, unsigned flags, struct file *file)
        int error = path_lookupat(nd, flags, &path);
        if (!error) {
                audit_inode(nd->name, path.dentry, 0);
-               error = vfs_open(&path, file, current_cred());
+               error = vfs_open(&path, file);
                path_put(&path);
        }
        return error;
@@ -3508,59 +3470,40 @@ static int do_o_path(struct nameidata *nd, unsigned flags, struct file *file)
 static struct file *path_openat(struct nameidata *nd,
                        const struct open_flags *op, unsigned flags)
 {
-       const char *s;
        struct file *file;
-       int opened = 0;
        int error;
 
-       file = get_empty_filp();
+       file = alloc_empty_file(op->open_flag, current_cred());
        if (IS_ERR(file))
                return file;
 
-       file->f_flags = op->open_flag;
-
        if (unlikely(file->f_flags & __O_TMPFILE)) {
-               error = do_tmpfile(nd, flags, op, file, &opened);
-               goto out2;
-       }
-
-       if (unlikely(file->f_flags & O_PATH)) {
+               error = do_tmpfile(nd, flags, op, file);
+       } else if (unlikely(file->f_flags & O_PATH)) {
                error = do_o_path(nd, flags, file);
-               if (!error)
-                       opened |= FILE_OPENED;
-               goto out2;
-       }
-
-       s = path_init(nd, flags);
-       if (IS_ERR(s)) {
-               put_filp(file);
-               return ERR_CAST(s);
-       }
-       while (!(error = link_path_walk(s, nd)) &&
-               (error = do_last(nd, file, op, &opened)) > 0) {
-               nd->flags &= ~(LOOKUP_OPEN|LOOKUP_CREATE|LOOKUP_EXCL);
-               s = trailing_symlink(nd);
-               if (IS_ERR(s)) {
-                       error = PTR_ERR(s);
-                       break;
+       } else {
+               const char *s = path_init(nd, flags);
+               while (!(error = link_path_walk(s, nd)) &&
+                       (error = do_last(nd, file, op)) > 0) {
+                       nd->flags &= ~(LOOKUP_OPEN|LOOKUP_CREATE|LOOKUP_EXCL);
+                       s = trailing_symlink(nd);
                }
+               terminate_walk(nd);
        }
-       terminate_walk(nd);
-out2:
-       if (!(opened & FILE_OPENED)) {
-               BUG_ON(!error);
-               put_filp(file);
+       if (likely(!error)) {
+               if (likely(file->f_mode & FMODE_OPENED))
+                       return file;
+               WARN_ON(1);
+               error = -EINVAL;
        }
-       if (unlikely(error)) {
-               if (error == -EOPENSTALE) {
-                       if (flags & LOOKUP_RCU)
-                               error = -ECHILD;
-                       else
-                               error = -ESTALE;
-               }
-               file = ERR_PTR(error);
+       fput(file);
+       if (error == -EOPENSTALE) {
+               if (flags & LOOKUP_RCU)
+                       error = -ECHILD;
+               else
+                       error = -ESTALE;
        }
-       return file;
+       return ERR_PTR(error);
 }
 
 struct file *do_filp_open(int dfd, struct filename *pathname,
index 8ddd14806799db5d701ffd1eee41b650dfba3313..bd2f4c68506afb79023c4423abebe392397c53ea 100644 (file)
@@ -659,12 +659,21 @@ int __legitimize_mnt(struct vfsmount *bastard, unsigned seq)
                return 0;
        mnt = real_mount(bastard);
        mnt_add_count(mnt, 1);
+       smp_mb();                       // see mntput_no_expire()
        if (likely(!read_seqretry(&mount_lock, seq)))
                return 0;
        if (bastard->mnt_flags & MNT_SYNC_UMOUNT) {
                mnt_add_count(mnt, -1);
                return 1;
        }
+       lock_mount_hash();
+       if (unlikely(bastard->mnt_flags & MNT_DOOMED)) {
+               mnt_add_count(mnt, -1);
+               unlock_mount_hash();
+               return 1;
+       }
+       unlock_mount_hash();
+       /* caller will mntput() */
        return -1;
 }
 
@@ -1195,12 +1204,27 @@ static DECLARE_DELAYED_WORK(delayed_mntput_work, delayed_mntput);
 static void mntput_no_expire(struct mount *mnt)
 {
        rcu_read_lock();
-       mnt_add_count(mnt, -1);
-       if (likely(mnt->mnt_ns)) { /* shouldn't be the last one */
+       if (likely(READ_ONCE(mnt->mnt_ns))) {
+               /*
+                * Since we don't do lock_mount_hash() here,
+                * ->mnt_ns can change under us.  However, if it's
+                * non-NULL, then there's a reference that won't
+                * be dropped until after an RCU delay done after
+                * turning ->mnt_ns NULL.  So if we observe it
+                * non-NULL under rcu_read_lock(), the reference
+                * we are dropping is not the final one.
+                */
+               mnt_add_count(mnt, -1);
                rcu_read_unlock();
                return;
        }
        lock_mount_hash();
+       /*
+        * make sure that if __legitimize_mnt() has not seen us grab
+        * mount_lock, we'll see their refcount increment here.
+        */
+       smp_mb();
+       mnt_add_count(mnt, -1);
        if (mnt_get_count(mnt)) {
                rcu_read_unlock();
                unlock_mount_hash();
index bbd0465535ebd9e433a812d60ab345161ef736b3..f033f3a69a3bcf7259192a9e062d7af295f90639 100644 (file)
@@ -883,8 +883,10 @@ struct inode *nfs_delegation_find_inode(struct nfs_client *clp,
        rcu_read_lock();
        list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
                res = nfs_delegation_find_inode_server(server, fhandle);
-               if (res != ERR_PTR(-ENOENT))
+               if (res != ERR_PTR(-ENOENT)) {
+                       rcu_read_unlock();
                        return res;
+               }
        }
        rcu_read_unlock();
        return ERR_PTR(-ENOENT);
index 7a9c14426855309d2bb68e681b2db32321648d0c..d7f158c3efc8a18b78f9b592a1466dc7be28b7b6 100644 (file)
@@ -1434,12 +1434,11 @@ static int do_open(struct inode *inode, struct file *filp)
 
 static int nfs_finish_open(struct nfs_open_context *ctx,
                           struct dentry *dentry,
-                          struct file *file, unsigned open_flags,
-                          int *opened)
+                          struct file *file, unsigned open_flags)
 {
        int err;
 
-       err = finish_open(file, dentry, do_open, opened);
+       err = finish_open(file, dentry, do_open);
        if (err)
                goto out;
        if (S_ISREG(file->f_path.dentry->d_inode->i_mode))
@@ -1452,7 +1451,7 @@ out:
 
 int nfs_atomic_open(struct inode *dir, struct dentry *dentry,
                    struct file *file, unsigned open_flags,
-                   umode_t mode, int *opened)
+                   umode_t mode)
 {
        DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
        struct nfs_open_context *ctx;
@@ -1461,6 +1460,7 @@ int nfs_atomic_open(struct inode *dir, struct dentry *dentry,
        struct inode *inode;
        unsigned int lookup_flags = 0;
        bool switched = false;
+       int created = 0;
        int err;
 
        /* Expect a negative dentry */
@@ -1521,7 +1521,9 @@ int nfs_atomic_open(struct inode *dir, struct dentry *dentry,
                goto out;
 
        trace_nfs_atomic_open_enter(dir, ctx, open_flags);
-       inode = NFS_PROTO(dir)->open_context(dir, ctx, open_flags, &attr, opened);
+       inode = NFS_PROTO(dir)->open_context(dir, ctx, open_flags, &attr, &created);
+       if (created)
+               file->f_mode |= FMODE_CREATED;
        if (IS_ERR(inode)) {
                err = PTR_ERR(inode);
                trace_nfs_atomic_open_exit(dir, ctx, open_flags, err);
@@ -1546,7 +1548,7 @@ int nfs_atomic_open(struct inode *dir, struct dentry *dentry,
                goto out;
        }
 
-       err = nfs_finish_open(ctx, ctx->dentry, file, open_flags, opened);
+       err = nfs_finish_open(ctx, ctx->dentry, file, open_flags);
        trace_nfs_atomic_open_exit(dir, ctx, open_flags, err);
        put_nfs_open_context(ctx);
 out:
@@ -1641,6 +1643,7 @@ int nfs_instantiate(struct dentry *dentry, struct nfs_fh *fhandle,
        struct dentry *parent = dget_parent(dentry);
        struct inode *dir = d_inode(parent);
        struct inode *inode;
+       struct dentry *d;
        int error = -EACCES;
 
        d_drop(dentry);
@@ -1662,10 +1665,12 @@ int nfs_instantiate(struct dentry *dentry, struct nfs_fh *fhandle,
                        goto out_error;
        }
        inode = nfs_fhget(dentry->d_sb, fhandle, fattr, label);
-       error = PTR_ERR(inode);
-       if (IS_ERR(inode))
+       d = d_splice_alias(inode, dentry);
+       if (IS_ERR(d)) {
+               error = PTR_ERR(d);
                goto out_error;
-       d_add(dentry, inode);
+       }
+       dput(d);
 out:
        dput(parent);
        return 0;
index d4a07acad5989e1374f879f2cc46c284f9aa8c4f..8f003792ccde1c24c3bcd444a609b888a629340f 100644 (file)
@@ -1243,17 +1243,18 @@ static int ff_layout_read_done_cb(struct rpc_task *task,
                                           hdr->ds_clp, hdr->lseg,
                                           hdr->pgio_mirror_idx);
 
+       clear_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
+       clear_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
        switch (err) {
        case -NFS4ERR_RESET_TO_PNFS:
                if (ff_layout_choose_best_ds_for_read(hdr->lseg,
                                        hdr->pgio_mirror_idx + 1,
                                        &hdr->pgio_mirror_idx))
                        goto out_eagain;
-               ff_layout_read_record_layoutstats_done(task, hdr);
-               pnfs_read_resend_pnfs(hdr);
+               set_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
                return task->tk_status;
        case -NFS4ERR_RESET_TO_MDS:
-               ff_layout_reset_read(hdr);
+               set_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
                return task->tk_status;
        case -EAGAIN:
                goto out_eagain;
@@ -1403,6 +1404,10 @@ static void ff_layout_read_release(void *data)
        struct nfs_pgio_header *hdr = data;
 
        ff_layout_read_record_layoutstats_done(&hdr->task, hdr);
+       if (test_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags))
+               pnfs_read_resend_pnfs(hdr);
+       else if (test_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags))
+               ff_layout_reset_read(hdr);
        pnfs_generic_rw_release(data);
 }
 
@@ -1423,12 +1428,14 @@ static int ff_layout_write_done_cb(struct rpc_task *task,
                                           hdr->ds_clp, hdr->lseg,
                                           hdr->pgio_mirror_idx);
 
+       clear_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
+       clear_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
        switch (err) {
        case -NFS4ERR_RESET_TO_PNFS:
-               ff_layout_reset_write(hdr, true);
+               set_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
                return task->tk_status;
        case -NFS4ERR_RESET_TO_MDS:
-               ff_layout_reset_write(hdr, false);
+               set_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
                return task->tk_status;
        case -EAGAIN:
                return -EAGAIN;
@@ -1575,6 +1582,10 @@ static void ff_layout_write_release(void *data)
        struct nfs_pgio_header *hdr = data;
 
        ff_layout_write_record_layoutstats_done(&hdr->task, hdr);
+       if (test_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags))
+               ff_layout_reset_write(hdr, true);
+       else if (test_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags))
+               ff_layout_reset_write(hdr, false);
        pnfs_generic_rw_release(data);
 }
 
index 137e18abb7e792660be11acf7b114213f71cd9a1..51beb6e38c9009e486344ac430c533a9f7e5bc3a 100644 (file)
@@ -258,7 +258,7 @@ extern const struct dentry_operations nfs4_dentry_operations;
 
 /* dir.c */
 int nfs_atomic_open(struct inode *, struct dentry *, struct file *,
-                   unsigned, umode_t, int *);
+                   unsigned, umode_t);
 
 /* super.c */
 extern struct file_system_type nfs4_fs_type;
index ed45090e4df6471902f5968b908429fe28976280..b790976d39135de899c1b70a08f75059247e2236 100644 (file)
@@ -2951,7 +2951,7 @@ static int _nfs4_do_open(struct inode *dir,
                }
        }
        if (opened && opendata->file_created)
-               *opened |= FILE_CREATED;
+               *opened = 1;
 
        if (pnfs_use_threshold(ctx_th, opendata->f_attr.mdsthreshold, server)) {
                *ctx_th = opendata->f_attr.mdsthreshold;
@@ -3294,6 +3294,7 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data)
        struct nfs4_closedata *calldata = data;
        struct nfs4_state *state = calldata->state;
        struct inode *inode = calldata->inode;
+       struct pnfs_layout_hdr *lo;
        bool is_rdonly, is_wronly, is_rdwr;
        int call_close = 0;
 
@@ -3337,6 +3338,12 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data)
                goto out_wait;
        }
 
+       lo = calldata->arg.lr_args ? calldata->arg.lr_args->layout : NULL;
+       if (lo && !pnfs_layout_is_valid(lo)) {
+               calldata->arg.lr_args = NULL;
+               calldata->res.lr_res = NULL;
+       }
+
        if (calldata->arg.fmode == 0)
                task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE];
 
@@ -5972,12 +5979,19 @@ static void nfs4_delegreturn_release(void *calldata)
 static void nfs4_delegreturn_prepare(struct rpc_task *task, void *data)
 {
        struct nfs4_delegreturndata *d_data;
+       struct pnfs_layout_hdr *lo;
 
        d_data = (struct nfs4_delegreturndata *)data;
 
        if (!d_data->lr.roc && nfs4_wait_on_layoutreturn(d_data->inode, task))
                return;
 
+       lo = d_data->args.lr_args ? d_data->args.lr_args->layout : NULL;
+       if (lo && !pnfs_layout_is_valid(lo)) {
+               d_data->args.lr_args = NULL;
+               d_data->res.lr_res = NULL;
+       }
+
        nfs4_setup_sequence(d_data->res.server->nfs_client,
                        &d_data->args.seq_args,
                        &d_data->res.seq_res,
@@ -6452,34 +6466,34 @@ static void nfs4_lock_done(struct rpc_task *task, void *calldata)
                if (data->arg.new_lock && !data->cancelled) {
                        data->fl.fl_flags &= ~(FL_SLEEP | FL_ACCESS);
                        if (locks_lock_inode_wait(lsp->ls_state->inode, &data->fl) < 0)
-                               break;
+                               goto out_restart;
                }
-
                if (data->arg.new_lock_owner != 0) {
                        nfs_confirm_seqid(&lsp->ls_seqid, 0);
                        nfs4_stateid_copy(&lsp->ls_stateid, &data->res.stateid);
                        set_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags);
-                       goto out_done;
-               } else if (nfs4_update_lock_stateid(lsp, &data->res.stateid))
-                       goto out_done;
-
+               } else if (!nfs4_update_lock_stateid(lsp, &data->res.stateid))
+                       goto out_restart;
                break;
        case -NFS4ERR_BAD_STATEID:
        case -NFS4ERR_OLD_STATEID:
        case -NFS4ERR_STALE_STATEID:
        case -NFS4ERR_EXPIRED:
                if (data->arg.new_lock_owner != 0) {
-                       if (nfs4_stateid_match(&data->arg.open_stateid,
+                       if (!nfs4_stateid_match(&data->arg.open_stateid,
                                                &lsp->ls_state->open_stateid))
-                               goto out_done;
-               } else if (nfs4_stateid_match(&data->arg.lock_stateid,
+                               goto out_restart;
+               } else if (!nfs4_stateid_match(&data->arg.lock_stateid,
                                                &lsp->ls_stateid))
-                               goto out_done;
+                               goto out_restart;
        }
-       if (!data->cancelled)
-               rpc_restart_call_prepare(task);
 out_done:
        dprintk("%s: done, ret = %d!\n", __func__, data->rpc_status);
+       return;
+out_restart:
+       if (!data->cancelled)
+               rpc_restart_call_prepare(task);
+       goto out_done;
 }
 
 static void nfs4_lock_release(void *calldata)
@@ -6488,7 +6502,7 @@ static void nfs4_lock_release(void *calldata)
 
        dprintk("%s: begin!\n", __func__);
        nfs_free_seqid(data->arg.open_seqid);
-       if (data->cancelled) {
+       if (data->cancelled && data->rpc_status == 0) {
                struct rpc_task *task;
                task = nfs4_do_unlck(&data->fl, data->ctx, data->lsp,
                                data->arg.lock_seqid);
@@ -8650,6 +8664,8 @@ nfs4_layoutget_handle_exception(struct rpc_task *task,
 
        dprintk("--> %s tk_status => %d\n", __func__, -task->tk_status);
 
+       nfs4_sequence_free_slot(&lgp->res.seq_res);
+
        switch (nfs4err) {
        case 0:
                goto out;
@@ -8714,7 +8730,6 @@ nfs4_layoutget_handle_exception(struct rpc_task *task,
                goto out;
        }
 
-       nfs4_sequence_free_slot(&lgp->res.seq_res);
        err = nfs4_handle_exception(server, nfs4err, exception);
        if (!status) {
                if (exception->retry)
@@ -8786,20 +8801,22 @@ nfs4_proc_layoutget(struct nfs4_layoutget *lgp, long *timeout)
        if (IS_ERR(task))
                return ERR_CAST(task);
        status = rpc_wait_for_completion_task(task);
-       if (status == 0) {
+       if (status != 0)
+               goto out;
+
+       /* if layoutp->len is 0, nfs4_layoutget_prepare called rpc_exit */
+       if (task->tk_status < 0 || lgp->res.layoutp->len == 0) {
                status = nfs4_layoutget_handle_exception(task, lgp, &exception);
                *timeout = exception.timeout;
-       }
-
+       } else
+               lseg = pnfs_layout_process(lgp);
+out:
        trace_nfs4_layoutget(lgp->args.ctx,
                        &lgp->args.range,
                        &lgp->res.range,
                        &lgp->res.stateid,
                        status);
 
-       /* if layoutp->len is 0, nfs4_layoutget_prepare called rpc_exit */
-       if (status == 0 && lgp->res.layoutp->len)
-               lseg = pnfs_layout_process(lgp);
        rpc_put_task(task);
        dprintk("<-- %s status=%d\n", __func__, status);
        if (status)
@@ -8817,6 +8834,8 @@ nfs4_layoutreturn_prepare(struct rpc_task *task, void *calldata)
                        &lrp->args.seq_args,
                        &lrp->res.seq_res,
                        task);
+       if (!pnfs_layout_is_valid(lrp->args.layout))
+               rpc_exit(task, 0);
 }
 
 static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata)
index a8f5e6b167491e3746a921f1f611fbb06b7d5f45..3fe81424337d07b5b19ab77d08825fb27bf523b0 100644 (file)
@@ -801,6 +801,11 @@ static inline void nfs4_lgopen_release(struct nfs4_layoutget *lgp)
 {
 }
 
+static inline bool pnfs_layout_is_valid(const struct pnfs_layout_hdr *lo)
+{
+       return false;
+}
+
 #endif /* CONFIG_NFS_V4_1 */
 
 #if IS_ENABLED(CONFIG_NFS_V4_2)
index b0555d7d8200f237bd2feafa58bd5303494bd3b2..55a099e47ba2773e94e126285efc937391dee5d4 100644 (file)
@@ -763,7 +763,7 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type,
                goto out_nfserr;
        }
 
-       host_err = ima_file_check(file, may_flags, 0);
+       host_err = ima_file_check(file, may_flags);
        if (host_err) {
                fput(file);
                goto out_nfserr;
index d0e955b558ad84ce2d6f98f6ccb490185bc36cc2..d98e19239bb71eb0a19c3dbfe94fa122e4d75dec 100644 (file)
--- a/fs/open.c
+++ b/fs/open.c
@@ -724,27 +724,13 @@ SYSCALL_DEFINE3(fchown, unsigned int, fd, uid_t, user, gid_t, group)
        return ksys_fchown(fd, user, group);
 }
 
-int open_check_o_direct(struct file *f)
-{
-       /* NB: we're sure to have correct a_ops only after f_op->open */
-       if (f->f_flags & O_DIRECT) {
-               if (!f->f_mapping->a_ops || !f->f_mapping->a_ops->direct_IO)
-                       return -EINVAL;
-       }
-       return 0;
-}
-
 static int do_dentry_open(struct file *f,
                          struct inode *inode,
-                         int (*open)(struct inode *, struct file *),
-                         const struct cred *cred)
+                         int (*open)(struct inode *, struct file *))
 {
        static const struct file_operations empty_fops = {};
        int error;
 
-       f->f_mode = OPEN_FMODE(f->f_flags) | FMODE_LSEEK |
-                               FMODE_PREAD | FMODE_PWRITE;
-
        path_get(&f->f_path);
        f->f_inode = inode;
        f->f_mapping = inode->i_mapping;
@@ -753,7 +739,7 @@ static int do_dentry_open(struct file *f,
        f->f_wb_err = filemap_sample_wb_err(f->f_mapping);
 
        if (unlikely(f->f_flags & O_PATH)) {
-               f->f_mode = FMODE_PATH;
+               f->f_mode = FMODE_PATH | FMODE_OPENED;
                f->f_op = &empty_fops;
                return 0;
        }
@@ -780,7 +766,7 @@ static int do_dentry_open(struct file *f,
                goto cleanup_all;
        }
 
-       error = security_file_open(f, cred);
+       error = security_file_open(f);
        if (error)
                goto cleanup_all;
 
@@ -788,6 +774,8 @@ static int do_dentry_open(struct file *f,
        if (error)
                goto cleanup_all;
 
+       /* normally all 3 are set; ->open() can clear them if needed */
+       f->f_mode |= FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE;
        if (!open)
                open = f->f_op->open;
        if (open) {
@@ -795,6 +783,7 @@ static int do_dentry_open(struct file *f,
                if (error)
                        goto cleanup_all;
        }
+       f->f_mode |= FMODE_OPENED;
        if ((f->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ)
                i_readcount_inc(inode);
        if ((f->f_mode & FMODE_READ) &&
@@ -809,9 +798,16 @@ static int do_dentry_open(struct file *f,
 
        file_ra_state_init(&f->f_ra, f->f_mapping->host->i_mapping);
 
+       /* NB: we're sure to have correct a_ops only after f_op->open */
+       if (f->f_flags & O_DIRECT) {
+               if (!f->f_mapping->a_ops || !f->f_mapping->a_ops->direct_IO)
+                       return -EINVAL;
+       }
        return 0;
 
 cleanup_all:
+       if (WARN_ON_ONCE(error > 0))
+               error = -EINVAL;
        fops_put(f->f_op);
        if (f->f_mode & FMODE_WRITER) {
                put_write_access(inode);
@@ -847,19 +843,12 @@ cleanup_file:
  * Returns zero on success or -errno if the open failed.
  */
 int finish_open(struct file *file, struct dentry *dentry,
-               int (*open)(struct inode *, struct file *),
-               int *opened)
+               int (*open)(struct inode *, struct file *))
 {
-       int error;
-       BUG_ON(*opened & FILE_OPENED); /* once it's opened, it's opened */
+       BUG_ON(file->f_mode & FMODE_OPENED); /* once it's opened, it's opened */
 
        file->f_path.dentry = dentry;
-       error = do_dentry_open(file, d_backing_inode(dentry), open,
-                              current_cred());
-       if (!error)
-               *opened |= FILE_OPENED;
-
-       return error;
+       return do_dentry_open(file, d_backing_inode(dentry), open);
 }
 EXPORT_SYMBOL(finish_open);
 
@@ -874,13 +863,13 @@ EXPORT_SYMBOL(finish_open);
  * NB: unlike finish_open() this function does consume the dentry reference and
  * the caller need not dput() it.
  *
- * Returns "1" which must be the return value of ->atomic_open() after having
+ * Returns "0" which must be the return value of ->atomic_open() after having
  * called this function.
  */
 int finish_no_open(struct file *file, struct dentry *dentry)
 {
        file->f_path.dentry = dentry;
-       return 1;
+       return 0;
 }
 EXPORT_SYMBOL(finish_no_open);
 
@@ -896,8 +885,7 @@ EXPORT_SYMBOL(file_path);
  * @file: newly allocated file with f_flag initialized
  * @cred: credentials to use
  */
-int vfs_open(const struct path *path, struct file *file,
-            const struct cred *cred)
+int vfs_open(const struct path *path, struct file *file)
 {
        struct dentry *dentry = d_real(path->dentry, NULL, file->f_flags, 0);
 
@@ -905,7 +893,7 @@ int vfs_open(const struct path *path, struct file *file,
                return PTR_ERR(dentry);
 
        file->f_path = *path;
-       return do_dentry_open(file, d_backing_inode(dentry), NULL, cred);
+       return do_dentry_open(file, d_backing_inode(dentry), NULL);
 }
 
 struct file *dentry_open(const struct path *path, int flags,
@@ -919,19 +907,11 @@ struct file *dentry_open(const struct path *path, int flags,
        /* We must always pass in a valid mount pointer. */
        BUG_ON(!path->mnt);
 
-       f = get_empty_filp();
+       f = alloc_empty_file(flags, cred);
        if (!IS_ERR(f)) {
-               f->f_flags = flags;
-               error = vfs_open(path, f, cred);
-               if (!error) {
-                       /* from now on we need fput() to dispose of f */
-                       error = open_check_o_direct(f);
-                       if (error) {
-                               fput(f);
-                               f = ERR_PTR(error);
-                       }
-               } else { 
-                       put_filp(f);
+               error = vfs_open(path, f);
+               if (error) {
+                       fput(f);
                        f = ERR_PTR(error);
                }
        }
@@ -1063,26 +1043,6 @@ struct file *file_open_root(struct dentry *dentry, struct vfsmount *mnt,
 }
 EXPORT_SYMBOL(file_open_root);
 
-struct file *filp_clone_open(struct file *oldfile)
-{
-       struct file *file;
-       int retval;
-
-       file = get_empty_filp();
-       if (IS_ERR(file))
-               return file;
-
-       file->f_flags = oldfile->f_flags;
-       retval = vfs_open(&oldfile->f_path, file, oldfile->f_cred);
-       if (retval) {
-               put_filp(file);
-               return ERR_PTR(retval);
-       }
-
-       return file;
-}
-EXPORT_SYMBOL(filp_clone_open);
-
 long do_sys_open(int dfd, const char __user *filename, int flags, umode_t mode)
 {
        struct open_flags op;
index bb0840e234f3bc176d2af120d6ed94ee3720aad0..bdc5d3c0977d09b37d5eb80abfb92c3a336f6c92 100644 (file)
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -509,22 +509,19 @@ static long pipe_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
        }
 }
 
-static struct wait_queue_head *
-pipe_get_poll_head(struct file *filp, __poll_t events)
-{
-       struct pipe_inode_info *pipe = filp->private_data;
-
-       return &pipe->wait;
-}
-
 /* No kernel lock held - fine */
-static __poll_t pipe_poll_mask(struct file *filp, __poll_t events)
+static __poll_t
+pipe_poll(struct file *filp, poll_table *wait)
 {
+       __poll_t mask;
        struct pipe_inode_info *pipe = filp->private_data;
-       int nrbufs = pipe->nrbufs;
-       __poll_t mask = 0;
+       int nrbufs;
+
+       poll_wait(filp, &pipe->wait, wait);
 
        /* Reading only -- no need for acquiring the semaphore.  */
+       nrbufs = pipe->nrbufs;
+       mask = 0;
        if (filp->f_mode & FMODE_READ) {
                mask = (nrbufs > 0) ? EPOLLIN | EPOLLRDNORM : 0;
                if (!pipe->writers && filp->f_version != pipe->w_counter)
@@ -744,54 +741,33 @@ fail_inode:
 
 int create_pipe_files(struct file **res, int flags)
 {
-       int err;
        struct inode *inode = get_pipe_inode();
        struct file *f;
-       struct path path;
 
        if (!inode)
                return -ENFILE;
 
-       err = -ENOMEM;
-       path.dentry = d_alloc_pseudo(pipe_mnt->mnt_sb, &empty_name);
-       if (!path.dentry)
-               goto err_inode;
-       path.mnt = mntget(pipe_mnt);
-
-       d_instantiate(path.dentry, inode);
-
-       f = alloc_file(&path, FMODE_WRITE, &pipefifo_fops);
+       f = alloc_file_pseudo(inode, pipe_mnt, "",
+                               O_WRONLY | (flags & (O_NONBLOCK | O_DIRECT)),
+                               &pipefifo_fops);
        if (IS_ERR(f)) {
-               err = PTR_ERR(f);
-               goto err_dentry;
+               free_pipe_info(inode->i_pipe);
+               iput(inode);
+               return PTR_ERR(f);
        }
 
-       f->f_flags = O_WRONLY | (flags & (O_NONBLOCK | O_DIRECT));
        f->private_data = inode->i_pipe;
 
-       res[0] = alloc_file(&path, FMODE_READ, &pipefifo_fops);
+       res[0] = alloc_file_clone(f, O_RDONLY | (flags & O_NONBLOCK),
+                                 &pipefifo_fops);
        if (IS_ERR(res[0])) {
-               err = PTR_ERR(res[0]);
-               goto err_file;
+               put_pipe_info(inode, inode->i_pipe);
+               fput(f);
+               return PTR_ERR(res[0]);
        }
-
-       path_get(&path);
        res[0]->private_data = inode->i_pipe;
-       res[0]->f_flags = O_RDONLY | (flags & O_NONBLOCK);
        res[1] = f;
        return 0;
-
-err_file:
-       put_filp(f);
-err_dentry:
-       free_pipe_info(inode->i_pipe);
-       path_put(&path);
-       return err;
-
-err_inode:
-       free_pipe_info(inode->i_pipe);
-       iput(inode);
-       return err;
 }
 
 static int __do_pipe_flags(int *fd, struct file **files, int flags)
@@ -1023,8 +999,7 @@ const struct file_operations pipefifo_fops = {
        .llseek         = no_llseek,
        .read_iter      = pipe_read,
        .write_iter     = pipe_write,
-       .get_poll_head  = pipe_get_poll_head,
-       .poll_mask      = pipe_poll_mask,
+       .poll           = pipe_poll,
        .unlocked_ioctl = pipe_ioctl,
        .release        = pipe_release,
        .fasync         = pipe_fasync,
index b6572944efc340d89f136c5a9c17ac409c8bef00..aaffc0c302162db0fc9d682c071469f55326dc1d 100644 (file)
@@ -235,6 +235,10 @@ static ssize_t get_mm_cmdline(struct mm_struct *mm, char __user *buf,
        if (env_start != arg_end || env_start >= env_end)
                env_start = env_end = arg_end;
 
+       /* .. and limit it to a maximum of one page of slop */
+       if (env_end >= arg_end + PAGE_SIZE)
+               env_end = arg_end + PAGE_SIZE - 1;
+
        /* We're not going to care if "*ppos" has high bits set */
        pos = arg_start + *ppos;
 
@@ -254,10 +258,19 @@ static ssize_t get_mm_cmdline(struct mm_struct *mm, char __user *buf,
        while (count) {
                int got;
                size_t size = min_t(size_t, PAGE_SIZE, count);
+               long offset;
 
-               got = access_remote_vm(mm, pos, page, size, FOLL_ANON);
-               if (got <= 0)
+               /*
+                * Are we already starting past the official end?
+                * We always include the last byte that is *supposed*
+                * to be NUL
+                */
+               offset = (pos >= arg_end) ? pos - arg_end + 1 : 0;
+
+               got = access_remote_vm(mm, pos - offset, page, size + offset, FOLL_ANON);
+               if (got <= offset)
                        break;
+               got -= offset;
 
                /* Don't walk past a NUL character once you hit arg_end */
                if (pos + got >= arg_end) {
@@ -276,12 +289,17 @@ static ssize_t get_mm_cmdline(struct mm_struct *mm, char __user *buf,
                                n = arg_end - pos - 1;
 
                        /* Cut off at first NUL after 'n' */
-                       got = n + strnlen(page+n, got-n);
-                       if (!got)
+                       got = n + strnlen(page+n, offset+got-n);
+                       if (got < offset)
                                break;
+                       got -= offset;
+
+                       /* Include the NUL if it existed */
+                       if (got < size)
+                               got++;
                }
 
-               got -= copy_to_user(buf, page, got);
+               got -= copy_to_user(buf, page+offset, got);
                if (unlikely(!got)) {
                        if (!len)
                                len = -EFAULT;
index 6ac1c92997ea2a20c3af8959c6920218f16a846d..bb1c1625b158d03f5c8685f55e370267e1cc76fb 100644 (file)
@@ -564,11 +564,20 @@ static int proc_seq_open(struct inode *inode, struct file *file)
        return seq_open(file, de->seq_ops);
 }
 
+static int proc_seq_release(struct inode *inode, struct file *file)
+{
+       struct proc_dir_entry *de = PDE(inode);
+
+       if (de->state_size)
+               return seq_release_private(inode, file);
+       return seq_release(inode, file);
+}
+
 static const struct file_operations proc_seq_fops = {
        .open           = proc_seq_open,
        .read           = seq_read,
        .llseek         = seq_lseek,
-       .release        = seq_release,
+       .release        = proc_seq_release,
 };
 
 struct proc_dir_entry *proc_create_seq_private(const char *name, umode_t mode,
index e9679016271fba923290c24e13f5368f5f0e0199..dfd73a4616ce565bfccb996a50f5eb549fe41fe8 100644 (file)
@@ -831,7 +831,8 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
                SEQ_PUT_DEC(" kB\nSwap:           ", mss->swap);
                SEQ_PUT_DEC(" kB\nSwapPss:        ",
                                                mss->swap_pss >> PSS_SHIFT);
-               SEQ_PUT_DEC(" kB\nLocked:         ", mss->pss >> PSS_SHIFT);
+               SEQ_PUT_DEC(" kB\nLocked:         ",
+                                               mss->pss_locked >> PSS_SHIFT);
                seq_puts(m, " kB\n");
        }
        if (!rollup_mode) {
index d88231e3b2be3ec1bc1f85c3c2fd92973e312c15..fc20e06c56ba55bf229db78cb5b5077c21935931 100644 (file)
@@ -711,21 +711,18 @@ EXPORT_SYMBOL(dquot_quota_sync);
 static unsigned long
 dqcache_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
 {
-       struct list_head *head;
        struct dquot *dquot;
        unsigned long freed = 0;
 
        spin_lock(&dq_list_lock);
-       head = free_dquots.prev;
-       while (head != &free_dquots && sc->nr_to_scan) {
-               dquot = list_entry(head, struct dquot, dq_free);
+       while (!list_empty(&free_dquots) && sc->nr_to_scan) {
+               dquot = list_first_entry(&free_dquots, struct dquot, dq_free);
                remove_dquot_hash(dquot);
                remove_free_dquot(dquot);
                remove_inuse(dquot);
                do_destroy_dquot(dquot);
                sc->nr_to_scan--;
                freed++;
-               head = free_dquots.prev;
        }
        spin_unlock(&dq_list_lock);
        return freed;
index 7e288d97adcbb7504f2c3c2953ca24debd770b01..9fed1c05f1f4df6f750c599da1670abf4b066445 100644 (file)
@@ -76,83 +76,99 @@ static char *le_type(struct reiserfs_key *key)
 }
 
 /* %k */
-static void sprintf_le_key(char *buf, struct reiserfs_key *key)
+static int scnprintf_le_key(char *buf, size_t size, struct reiserfs_key *key)
 {
        if (key)
-               sprintf(buf, "[%d %d %s %s]", le32_to_cpu(key->k_dir_id),
-                       le32_to_cpu(key->k_objectid), le_offset(key),
-                       le_type(key));
+               return scnprintf(buf, size, "[%d %d %s %s]",
+                                le32_to_cpu(key->k_dir_id),
+                                le32_to_cpu(key->k_objectid), le_offset(key),
+                                le_type(key));
        else
-               sprintf(buf, "[NULL]");
+               return scnprintf(buf, size, "[NULL]");
 }
 
 /* %K */
-static void sprintf_cpu_key(char *buf, struct cpu_key *key)
+static int scnprintf_cpu_key(char *buf, size_t size, struct cpu_key *key)
 {
        if (key)
-               sprintf(buf, "[%d %d %s %s]", key->on_disk_key.k_dir_id,
-                       key->on_disk_key.k_objectid, reiserfs_cpu_offset(key),
-                       cpu_type(key));
+               return scnprintf(buf, size, "[%d %d %s %s]",
+                                key->on_disk_key.k_dir_id,
+                                key->on_disk_key.k_objectid,
+                                reiserfs_cpu_offset(key), cpu_type(key));
        else
-               sprintf(buf, "[NULL]");
+               return scnprintf(buf, size, "[NULL]");
 }
 
-static void sprintf_de_head(char *buf, struct reiserfs_de_head *deh)
+static int scnprintf_de_head(char *buf, size_t size,
+                            struct reiserfs_de_head *deh)
 {
        if (deh)
-               sprintf(buf,
-                       "[offset=%d dir_id=%d objectid=%d location=%d state=%04x]",
-                       deh_offset(deh), deh_dir_id(deh), deh_objectid(deh),
-                       deh_location(deh), deh_state(deh));
+               return scnprintf(buf, size,
+                                "[offset=%d dir_id=%d objectid=%d location=%d state=%04x]",
+                                deh_offset(deh), deh_dir_id(deh),
+                                deh_objectid(deh), deh_location(deh),
+                                deh_state(deh));
        else
-               sprintf(buf, "[NULL]");
+               return scnprintf(buf, size, "[NULL]");
 
 }
 
-static void sprintf_item_head(char *buf, struct item_head *ih)
+static int scnprintf_item_head(char *buf, size_t size, struct item_head *ih)
 {
        if (ih) {
-               strcpy(buf,
-                      (ih_version(ih) == KEY_FORMAT_3_6) ? "*3.6* " : "*3.5*");
-               sprintf_le_key(buf + strlen(buf), &(ih->ih_key));
-               sprintf(buf + strlen(buf), ", item_len %d, item_location %d, "
-                       "free_space(entry_count) %d",
-                       ih_item_len(ih), ih_location(ih), ih_free_space(ih));
+               char *p = buf;
+               char * const end = buf + size;
+
+               p += scnprintf(p, end - p, "%s",
+                              (ih_version(ih) == KEY_FORMAT_3_6) ?
+                              "*3.6* " : "*3.5*");
+
+               p += scnprintf_le_key(p, end - p, &ih->ih_key);
+
+               p += scnprintf(p, end - p,
+                              ", item_len %d, item_location %d, free_space(entry_count) %d",
+                              ih_item_len(ih), ih_location(ih),
+                              ih_free_space(ih));
+               return p - buf;
        } else
-               sprintf(buf, "[NULL]");
+               return scnprintf(buf, size, "[NULL]");
 }
 
-static void sprintf_direntry(char *buf, struct reiserfs_dir_entry *de)
+static int scnprintf_direntry(char *buf, size_t size,
+                             struct reiserfs_dir_entry *de)
 {
        char name[20];
 
        memcpy(name, de->de_name, de->de_namelen > 19 ? 19 : de->de_namelen);
        name[de->de_namelen > 19 ? 19 : de->de_namelen] = 0;
-       sprintf(buf, "\"%s\"==>[%d %d]", name, de->de_dir_id, de->de_objectid);
+       return scnprintf(buf, size, "\"%s\"==>[%d %d]",
+                        name, de->de_dir_id, de->de_objectid);
 }
 
-static void sprintf_block_head(char *buf, struct buffer_head *bh)
+static int scnprintf_block_head(char *buf, size_t size, struct buffer_head *bh)
 {
-       sprintf(buf, "level=%d, nr_items=%d, free_space=%d rdkey ",
-               B_LEVEL(bh), B_NR_ITEMS(bh), B_FREE_SPACE(bh));
+       return scnprintf(buf, size,
+                        "level=%d, nr_items=%d, free_space=%d rdkey ",
+                        B_LEVEL(bh), B_NR_ITEMS(bh), B_FREE_SPACE(bh));
 }
 
-static void sprintf_buffer_head(char *buf, struct buffer_head *bh)
+static int scnprintf_buffer_head(char *buf, size_t size, struct buffer_head *bh)
 {
-       sprintf(buf,
-               "dev %pg, size %zd, blocknr %llu, count %d, state 0x%lx, page %p, (%s, %s, %s)",
-               bh->b_bdev, bh->b_size,
-               (unsigned long long)bh->b_blocknr, atomic_read(&(bh->b_count)),
-               bh->b_state, bh->b_page,
-               buffer_uptodate(bh) ? "UPTODATE" : "!UPTODATE",
-               buffer_dirty(bh) ? "DIRTY" : "CLEAN",
-               buffer_locked(bh) ? "LOCKED" : "UNLOCKED");
+       return scnprintf(buf, size,
+                        "dev %pg, size %zd, blocknr %llu, count %d, state 0x%lx, page %p, (%s, %s, %s)",
+                        bh->b_bdev, bh->b_size,
+                        (unsigned long long)bh->b_blocknr,
+                        atomic_read(&(bh->b_count)),
+                        bh->b_state, bh->b_page,
+                        buffer_uptodate(bh) ? "UPTODATE" : "!UPTODATE",
+                        buffer_dirty(bh) ? "DIRTY" : "CLEAN",
+                        buffer_locked(bh) ? "LOCKED" : "UNLOCKED");
 }
 
-static void sprintf_disk_child(char *buf, struct disk_child *dc)
+static int scnprintf_disk_child(char *buf, size_t size, struct disk_child *dc)
 {
-       sprintf(buf, "[dc_number=%d, dc_size=%u]", dc_block_number(dc),
-               dc_size(dc));
+       return scnprintf(buf, size, "[dc_number=%d, dc_size=%u]",
+                        dc_block_number(dc), dc_size(dc));
 }
 
 static char *is_there_reiserfs_struct(char *fmt, int *what)
@@ -189,55 +205,60 @@ static void prepare_error_buf(const char *fmt, va_list args)
        char *fmt1 = fmt_buf;
        char *k;
        char *p = error_buf;
+       char * const end = &error_buf[sizeof(error_buf)];
        int what;
 
        spin_lock(&error_lock);
 
-       strcpy(fmt1, fmt);
+       if (WARN_ON(strscpy(fmt_buf, fmt, sizeof(fmt_buf)) < 0)) {
+               strscpy(error_buf, "format string too long", end - error_buf);
+               goto out_unlock;
+       }
 
        while ((k = is_there_reiserfs_struct(fmt1, &what)) != NULL) {
                *k = 0;
 
-               p += vsprintf(p, fmt1, args);
+               p += vscnprintf(p, end - p, fmt1, args);
 
                switch (what) {
                case 'k':
-                       sprintf_le_key(p, va_arg(args, struct reiserfs_key *));
+                       p += scnprintf_le_key(p, end - p,
+                                             va_arg(args, struct reiserfs_key *));
                        break;
                case 'K':
-                       sprintf_cpu_key(p, va_arg(args, struct cpu_key *));
+                       p += scnprintf_cpu_key(p, end - p,
+                                              va_arg(args, struct cpu_key *));
                        break;
                case 'h':
-                       sprintf_item_head(p, va_arg(args, struct item_head *));
+                       p += scnprintf_item_head(p, end - p,
+                                                va_arg(args, struct item_head *));
                        break;
                case 't':
-                       sprintf_direntry(p,
-                                        va_arg(args,
-                                               struct reiserfs_dir_entry *));
+                       p += scnprintf_direntry(p, end - p,
+                                               va_arg(args, struct reiserfs_dir_entry *));
                        break;
                case 'y':
-                       sprintf_disk_child(p,
-                                          va_arg(args, struct disk_child *));
+                       p += scnprintf_disk_child(p, end - p,
+                                                 va_arg(args, struct disk_child *));
                        break;
                case 'z':
-                       sprintf_block_head(p,
-                                          va_arg(args, struct buffer_head *));
+                       p += scnprintf_block_head(p, end - p,
+                                                 va_arg(args, struct buffer_head *));
                        break;
                case 'b':
-                       sprintf_buffer_head(p,
-                                           va_arg(args, struct buffer_head *));
+                       p += scnprintf_buffer_head(p, end - p,
+                                                  va_arg(args, struct buffer_head *));
                        break;
                case 'a':
-                       sprintf_de_head(p,
-                                       va_arg(args,
-                                              struct reiserfs_de_head *));
+                       p += scnprintf_de_head(p, end - p,
+                                              va_arg(args, struct reiserfs_de_head *));
                        break;
                }
 
-               p += strlen(p);
                fmt1 = k + 2;
        }
-       vsprintf(p, fmt1, args);
+       p += vscnprintf(p, end - p, fmt1, args);
+out_unlock:
        spin_unlock(&error_lock);
 
 }
index 317891ff8165ba19b775fcfaa8f6deccb58ba18f..4a6b6e4b21cb91aecdf40492c4763f09bf4ccc3f 100644 (file)
 
 #include <linux/uaccess.h>
 
-__poll_t vfs_poll(struct file *file, struct poll_table_struct *pt)
-{
-       if (file->f_op->poll) {
-               return file->f_op->poll(file, pt);
-       } else if (file_has_poll_mask(file)) {
-               unsigned int events = poll_requested_events(pt);
-               struct wait_queue_head *head;
-
-               if (pt && pt->_qproc) {
-                       head = file->f_op->get_poll_head(file, events);
-                       if (!head)
-                               return DEFAULT_POLLMASK;
-                       if (IS_ERR(head))
-                               return EPOLLERR;
-                       pt->_qproc(file, head, pt);
-               }
-
-               return file->f_op->poll_mask(file, events);
-       } else {
-               return DEFAULT_POLLMASK;
-       }
-}
-EXPORT_SYMBOL_GPL(vfs_poll);
 
 /*
  * Estimate expected accuracy in ns from a timeval.
index 2751476e6b6e85e094b33f717093ba0251af999e..f098b9f1c3963b77c2112d7f4342f58ccca4f192 100644 (file)
@@ -167,6 +167,8 @@ int squashfs_read_data(struct super_block *sb, u64 index, int length,
        }
 
        if (compressed) {
+               if (!msblk->stream)
+                       goto read_failure;
                length = squashfs_decompress(msblk, bh, b, offset, length,
                        output);
                if (length < 0)
index 23813c078cc9527f547c345ba01ce31dafd570ab..0839efa720b3b562a9e56e677631fe9458ca9233 100644 (file)
@@ -350,6 +350,9 @@ int squashfs_read_metadata(struct super_block *sb, void *buffer,
 
        TRACE("Entered squashfs_read_metadata [%llx:%x]\n", *block, *offset);
 
+       if (unlikely(length < 0))
+               return -EIO;
+
        while (length) {
                entry = squashfs_cache_get(sb, msblk->block_cache, *block, 0);
                if (entry->error) {
index 13d80947bf9e6adac348878e3494b38cdd206099..f1c1430ae7213515214e7ea51941c894b83349d7 100644 (file)
@@ -194,7 +194,11 @@ static long long read_indexes(struct super_block *sb, int n,
                }
 
                for (i = 0; i < blocks; i++) {
-                       int size = le32_to_cpu(blist[i]);
+                       int size = squashfs_block_size(blist[i]);
+                       if (size < 0) {
+                               err = size;
+                               goto failure;
+                       }
                        block += SQUASHFS_COMPRESSED_SIZE_BLOCK(size);
                }
                n -= blocks;
@@ -367,7 +371,24 @@ static int read_blocklist(struct inode *inode, int index, u64 *block)
                        sizeof(size));
        if (res < 0)
                return res;
-       return le32_to_cpu(size);
+       return squashfs_block_size(size);
+}
+
+void squashfs_fill_page(struct page *page, struct squashfs_cache_entry *buffer, int offset, int avail)
+{
+       int copied;
+       void *pageaddr;
+
+       pageaddr = kmap_atomic(page);
+       copied = squashfs_copy_data(pageaddr, buffer, offset, avail);
+       memset(pageaddr + copied, 0, PAGE_SIZE - copied);
+       kunmap_atomic(pageaddr);
+
+       flush_dcache_page(page);
+       if (copied == avail)
+               SetPageUptodate(page);
+       else
+               SetPageError(page);
 }
 
 /* Copy data into page cache  */
@@ -376,7 +397,6 @@ void squashfs_copy_cache(struct page *page, struct squashfs_cache_entry *buffer,
 {
        struct inode *inode = page->mapping->host;
        struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
-       void *pageaddr;
        int i, mask = (1 << (msblk->block_log - PAGE_SHIFT)) - 1;
        int start_index = page->index & ~mask, end_index = start_index | mask;
 
@@ -402,12 +422,7 @@ void squashfs_copy_cache(struct page *page, struct squashfs_cache_entry *buffer,
                if (PageUptodate(push_page))
                        goto skip_page;
 
-               pageaddr = kmap_atomic(push_page);
-               squashfs_copy_data(pageaddr, buffer, offset, avail);
-               memset(pageaddr + avail, 0, PAGE_SIZE - avail);
-               kunmap_atomic(pageaddr);
-               flush_dcache_page(push_page);
-               SetPageUptodate(push_page);
+               squashfs_fill_page(push_page, buffer, offset, avail);
 skip_page:
                unlock_page(push_page);
                if (i != page->index)
@@ -416,10 +431,9 @@ skip_page:
 }
 
 /* Read datablock stored packed inside a fragment (tail-end packed block) */
-static int squashfs_readpage_fragment(struct page *page)
+static int squashfs_readpage_fragment(struct page *page, int expected)
 {
        struct inode *inode = page->mapping->host;
-       struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
        struct squashfs_cache_entry *buffer = squashfs_get_fragment(inode->i_sb,
                squashfs_i(inode)->fragment_block,
                squashfs_i(inode)->fragment_size);
@@ -430,23 +444,16 @@ static int squashfs_readpage_fragment(struct page *page)
                        squashfs_i(inode)->fragment_block,
                        squashfs_i(inode)->fragment_size);
        else
-               squashfs_copy_cache(page, buffer, i_size_read(inode) &
-                       (msblk->block_size - 1),
+               squashfs_copy_cache(page, buffer, expected,
                        squashfs_i(inode)->fragment_offset);
 
        squashfs_cache_put(buffer);
        return res;
 }
 
-static int squashfs_readpage_sparse(struct page *page, int index, int file_end)
+static int squashfs_readpage_sparse(struct page *page, int expected)
 {
-       struct inode *inode = page->mapping->host;
-       struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
-       int bytes = index == file_end ?
-                       (i_size_read(inode) & (msblk->block_size - 1)) :
-                        msblk->block_size;
-
-       squashfs_copy_cache(page, NULL, bytes, 0);
+       squashfs_copy_cache(page, NULL, expected, 0);
        return 0;
 }
 
@@ -456,6 +463,9 @@ static int squashfs_readpage(struct file *file, struct page *page)
        struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
        int index = page->index >> (msblk->block_log - PAGE_SHIFT);
        int file_end = i_size_read(inode) >> msblk->block_log;
+       int expected = index == file_end ?
+                       (i_size_read(inode) & (msblk->block_size - 1)) :
+                        msblk->block_size;
        int res;
        void *pageaddr;
 
@@ -474,11 +484,11 @@ static int squashfs_readpage(struct file *file, struct page *page)
                        goto error_out;
 
                if (bsize == 0)
-                       res = squashfs_readpage_sparse(page, index, file_end);
+                       res = squashfs_readpage_sparse(page, expected);
                else
-                       res = squashfs_readpage_block(page, block, bsize);
+                       res = squashfs_readpage_block(page, block, bsize, expected);
        } else
-               res = squashfs_readpage_fragment(page);
+               res = squashfs_readpage_fragment(page, expected);
 
        if (!res)
                return 0;
index f2310d2a2019556ea316b2c03386f3bbcf98c355..a9ba8d96776ac2661a98370612d02b53784577c7 100644 (file)
@@ -20,7 +20,7 @@
 #include "squashfs.h"
 
 /* Read separately compressed datablock and memcopy into page cache */
-int squashfs_readpage_block(struct page *page, u64 block, int bsize)
+int squashfs_readpage_block(struct page *page, u64 block, int bsize, int expected)
 {
        struct inode *i = page->mapping->host;
        struct squashfs_cache_entry *buffer = squashfs_get_datablock(i->i_sb,
@@ -31,7 +31,7 @@ int squashfs_readpage_block(struct page *page, u64 block, int bsize)
                ERROR("Unable to read page, block %llx, size %x\n", block,
                        bsize);
        else
-               squashfs_copy_cache(page, buffer, buffer->length, 0);
+               squashfs_copy_cache(page, buffer, expected, 0);
 
        squashfs_cache_put(buffer);
        return res;
index cb485d8e0e91b1b2ff1cb9b0330339c51c15b8a4..80db1b86a27c66b1eb2105971685267171e52ce2 100644 (file)
 #include "page_actor.h"
 
 static int squashfs_read_cache(struct page *target_page, u64 block, int bsize,
-       int pages, struct page **page);
+       int pages, struct page **page, int bytes);
 
 /* Read separately compressed datablock directly into page cache */
-int squashfs_readpage_block(struct page *target_page, u64 block, int bsize)
+int squashfs_readpage_block(struct page *target_page, u64 block, int bsize,
+       int expected)
 
 {
        struct inode *inode = target_page->mapping->host;
@@ -83,7 +84,7 @@ int squashfs_readpage_block(struct page *target_page, u64 block, int bsize)
                 * using an intermediate buffer.
                 */
                res = squashfs_read_cache(target_page, block, bsize, pages,
-                                                               page);
+                                                       page, expected);
                if (res < 0)
                        goto mark_errored;
 
@@ -95,6 +96,11 @@ int squashfs_readpage_block(struct page *target_page, u64 block, int bsize)
        if (res < 0)
                goto mark_errored;
 
+       if (res != expected) {
+               res = -EIO;
+               goto mark_errored;
+       }
+
        /* Last page may have trailing bytes not filled */
        bytes = res % PAGE_SIZE;
        if (bytes) {
@@ -138,13 +144,12 @@ out:
 
 
 static int squashfs_read_cache(struct page *target_page, u64 block, int bsize,
-       int pages, struct page **page)
+       int pages, struct page **page, int bytes)
 {
        struct inode *i = target_page->mapping->host;
        struct squashfs_cache_entry *buffer = squashfs_get_datablock(i->i_sb,
                                                 block, bsize);
-       int bytes = buffer->length, res = buffer->error, n, offset = 0;
-       void *pageaddr;
+       int res = buffer->error, n, offset = 0;
 
        if (res) {
                ERROR("Unable to read page, block %llx, size %x\n", block,
@@ -159,12 +164,7 @@ static int squashfs_read_cache(struct page *target_page, u64 block, int bsize,
                if (page[n] == NULL)
                        continue;
 
-               pageaddr = kmap_atomic(page[n]);
-               squashfs_copy_data(pageaddr, buffer, offset, avail);
-               memset(pageaddr + avail, 0, PAGE_SIZE - avail);
-               kunmap_atomic(pageaddr);
-               flush_dcache_page(page[n]);
-               SetPageUptodate(page[n]);
+               squashfs_fill_page(page[n], buffer, offset, avail);
                unlock_page(page[n]);
                if (page[n] != target_page)
                        put_page(page[n]);
index 0ed6edbc5c7170aa06f191e33df193721206cb3f..0681feab4a8499562ccad42cd53b31f05d5776f3 100644 (file)
@@ -49,11 +49,16 @@ int squashfs_frag_lookup(struct super_block *sb, unsigned int fragment,
                                u64 *fragment_block)
 {
        struct squashfs_sb_info *msblk = sb->s_fs_info;
-       int block = SQUASHFS_FRAGMENT_INDEX(fragment);
-       int offset = SQUASHFS_FRAGMENT_INDEX_OFFSET(fragment);
-       u64 start_block = le64_to_cpu(msblk->fragment_index[block]);
+       int block, offset, size;
        struct squashfs_fragment_entry fragment_entry;
-       int size;
+       u64 start_block;
+
+       if (fragment >= msblk->fragments)
+               return -EIO;
+       block = SQUASHFS_FRAGMENT_INDEX(fragment);
+       offset = SQUASHFS_FRAGMENT_INDEX_OFFSET(fragment);
+
+       start_block = le64_to_cpu(msblk->fragment_index[block]);
 
        size = squashfs_read_metadata(sb, &fragment_entry, &start_block,
                                        &offset, sizeof(fragment_entry));
@@ -61,9 +66,7 @@ int squashfs_frag_lookup(struct super_block *sb, unsigned int fragment,
                return size;
 
        *fragment_block = le64_to_cpu(fragment_entry.start_block);
-       size = le32_to_cpu(fragment_entry.size);
-
-       return size;
+       return squashfs_block_size(fragment_entry.size);
 }
 
 
index 887d6d270080a6d8d945868c5bc1265d3f38f93c..f89f8a74c6cea5c6fc8723319af336e7bf1ec04b 100644 (file)
@@ -67,11 +67,12 @@ extern __le64 *squashfs_read_fragment_index_table(struct super_block *,
                                u64, u64, unsigned int);
 
 /* file.c */
+void squashfs_fill_page(struct page *, struct squashfs_cache_entry *, int, int);
 void squashfs_copy_cache(struct page *, struct squashfs_cache_entry *, int,
                                int);
 
 /* file_xxx.c */
-extern int squashfs_readpage_block(struct page *, u64, int);
+extern int squashfs_readpage_block(struct page *, u64, int, int);
 
 /* id.c */
 extern int squashfs_get_id(struct super_block *, unsigned int, unsigned int *);
index 24d12fd1417767689778302abf77b21f1efd6350..4e6853f084d071b6291da9891b8a16c730901e48 100644 (file)
 
 #define SQUASHFS_COMPRESSED_BLOCK(B)   (!((B) & SQUASHFS_COMPRESSED_BIT_BLOCK))
 
+static inline int squashfs_block_size(__le32 raw)
+{
+       u32 size = le32_to_cpu(raw);
+       return (size >> 25) ? -EIO : size;
+}
+
 /*
  * Inode number ops.  Inodes consist of a compressed block number, and an
  * uncompressed offset within that block
index 1da565cb50c3d0f1e652671dd7a81577b598ca2f..ef69c31947bf86c06e138015c86d77e7c7727754 100644 (file)
@@ -75,6 +75,7 @@ struct squashfs_sb_info {
        unsigned short                          block_log;
        long long                               bytes_used;
        unsigned int                            inodes;
+       unsigned int                            fragments;
        int                                     xattr_ids;
 };
 #endif
index 8a73b97217c8a5fe24f0e30047354fe058644018..40e657386fa52d9e0358ff45dd949dede645d4e0 100644 (file)
@@ -175,6 +175,7 @@ static int squashfs_fill_super(struct super_block *sb, void *data, int silent)
        msblk->inode_table = le64_to_cpu(sblk->inode_table_start);
        msblk->directory_table = le64_to_cpu(sblk->directory_table_start);
        msblk->inodes = le32_to_cpu(sblk->inodes);
+       msblk->fragments = le32_to_cpu(sblk->fragments);
        flags = le16_to_cpu(sblk->flags);
 
        TRACE("Found valid superblock on %pg\n", sb->s_bdev);
@@ -185,7 +186,7 @@ static int squashfs_fill_super(struct super_block *sb, void *data, int silent)
        TRACE("Filesystem size %lld bytes\n", msblk->bytes_used);
        TRACE("Block size %d\n", msblk->block_size);
        TRACE("Number of inodes %d\n", msblk->inodes);
-       TRACE("Number of fragments %d\n", le32_to_cpu(sblk->fragments));
+       TRACE("Number of fragments %d\n", msblk->fragments);
        TRACE("Number of ids %d\n", le16_to_cpu(sblk->no_ids));
        TRACE("sblk->inode_table_start %llx\n", msblk->inode_table);
        TRACE("sblk->directory_table_start %llx\n", msblk->directory_table);
@@ -272,7 +273,7 @@ allocate_id_index_table:
        sb->s_export_op = &squashfs_export_ops;
 
 handle_fragments:
-       fragments = le32_to_cpu(sblk->fragments);
+       fragments = msblk->fragments;
        if (fragments == 0)
                goto check_directory_table;
 
index d84a2bee4f82b2f8470b7f2fbd42b2f33beb2bce..38c695ce385bb91fc0185cb92132b103a7b2c2d5 100644 (file)
@@ -226,20 +226,21 @@ static int timerfd_release(struct inode *inode, struct file *file)
        kfree_rcu(ctx, rcu);
        return 0;
 }
-       
-static struct wait_queue_head *timerfd_get_poll_head(struct file *file,
-               __poll_t eventmask)
+
+static __poll_t timerfd_poll(struct file *file, poll_table *wait)
 {
        struct timerfd_ctx *ctx = file->private_data;
+       __poll_t events = 0;
+       unsigned long flags;
 
-       return &ctx->wqh;
-}
+       poll_wait(file, &ctx->wqh, wait);
 
-static __poll_t timerfd_poll_mask(struct file *file, __poll_t eventmask)
-{
-       struct timerfd_ctx *ctx = file->private_data;
+       spin_lock_irqsave(&ctx->wqh.lock, flags);
+       if (ctx->ticks)
+               events |= EPOLLIN;
+       spin_unlock_irqrestore(&ctx->wqh.lock, flags);
 
-       return ctx->ticks ? EPOLLIN : 0;
+       return events;
 }
 
 static ssize_t timerfd_read(struct file *file, char __user *buf, size_t count,
@@ -363,8 +364,7 @@ static long timerfd_ioctl(struct file *file, unsigned int cmd, unsigned long arg
 
 static const struct file_operations timerfd_fops = {
        .release        = timerfd_release,
-       .get_poll_head  = timerfd_get_poll_head,
-       .poll_mask      = timerfd_poll_mask,
+       .poll           = timerfd_poll,
        .read           = timerfd_read,
        .llseek         = noop_llseek,
        .show_fdinfo    = timerfd_show,
@@ -533,8 +533,8 @@ static int do_timerfd_gettime(int ufd, struct itimerspec64 *t)
 }
 
 SYSCALL_DEFINE4(timerfd_settime, int, ufd, int, flags,
-               const struct itimerspec __user *, utmr,
-               struct itimerspec __user *, otmr)
+               const struct __kernel_itimerspec __user *, utmr,
+               struct __kernel_itimerspec __user *, otmr)
 {
        struct itimerspec64 new, old;
        int ret;
@@ -550,7 +550,7 @@ SYSCALL_DEFINE4(timerfd_settime, int, ufd, int, flags,
        return ret;
 }
 
-SYSCALL_DEFINE2(timerfd_gettime, int, ufd, struct itimerspec __user *, otmr)
+SYSCALL_DEFINE2(timerfd_gettime, int, ufd, struct __kernel_itimerspec __user *, otmr)
 {
        struct itimerspec64 kotmr;
        int ret = do_timerfd_gettime(ufd, &kotmr);
@@ -559,7 +559,7 @@ SYSCALL_DEFINE2(timerfd_gettime, int, ufd, struct itimerspec __user *, otmr)
        return put_itimerspec64(&kotmr, otmr) ? -EFAULT : 0;
 }
 
-#ifdef CONFIG_COMPAT
+#ifdef CONFIG_COMPAT_32BIT_TIME
 COMPAT_SYSCALL_DEFINE4(timerfd_settime, int, ufd, int, flags,
                const struct compat_itimerspec __user *, utmr,
                struct compat_itimerspec __user *, otmr)
index 1b961b1d9699461cdf0771a90b4771078f6c95fc..fcda0fc97b90a14fd53aafbeb15885d85716e3a1 100644 (file)
@@ -533,8 +533,7 @@ static int udf_table_prealloc_blocks(struct super_block *sb,
                        udf_write_aext(table, &epos, &eloc,
                                        (etype << 30) | elen, 1);
                } else
-                       udf_delete_aext(table, epos, eloc,
-                                       (etype << 30) | elen);
+                       udf_delete_aext(table, epos);
        } else {
                alloc_count = 0;
        }
@@ -630,7 +629,7 @@ static udf_pblk_t udf_table_new_block(struct super_block *sb,
        if (goal_elen)
                udf_write_aext(table, &goal_epos, &goal_eloc, goal_elen, 1);
        else
-               udf_delete_aext(table, goal_epos, goal_eloc, goal_elen);
+               udf_delete_aext(table, goal_epos);
        brelse(goal_epos.bh);
 
        udf_add_free_space(sb, partition, -1);
index 0a98a2369738fc2cff925c80066b92a58b299066..d9523013096f978c9d4a3ca1d8fdd23b55eeb275 100644 (file)
@@ -141,10 +141,7 @@ struct fileIdentDesc *udf_fileident_read(struct inode *dir, loff_t *nf_pos,
                               fibh->ebh->b_data,
                               sizeof(struct fileIdentDesc) + fibh->soffset);
 
-                       fi_len = (sizeof(struct fileIdentDesc) +
-                                 cfi->lengthFileIdent +
-                                 le16_to_cpu(cfi->lengthOfImpUse) + 3) & ~3;
-
+                       fi_len = udf_dir_entry_len(cfi);
                        *nf_pos += fi_len - (fibh->eoffset - fibh->soffset);
                        fibh->eoffset = fibh->soffset + fi_len;
                } else {
@@ -152,6 +149,9 @@ struct fileIdentDesc *udf_fileident_read(struct inode *dir, loff_t *nf_pos,
                               sizeof(struct fileIdentDesc));
                }
        }
+       /* Got last entry outside of dir size - fs is corrupted! */
+       if (*nf_pos > dir->i_size)
+               return NULL;
        return fi;
 }
 
index 7f39d17352c9697863f02140f7cf7ec1120a2215..9915a58fbabd7ff0194709ec883c1bd7003d72c7 100644 (file)
@@ -1147,8 +1147,7 @@ static void udf_update_extents(struct inode *inode, struct kernel_long_ad *laarr
 
        if (startnum > endnum) {
                for (i = 0; i < (startnum - endnum); i++)
-                       udf_delete_aext(inode, *epos, laarr[i].extLocation,
-                                       laarr[i].extLength);
+                       udf_delete_aext(inode, *epos);
        } else if (startnum < endnum) {
                for (i = 0; i < (endnum - startnum); i++) {
                        udf_insert_aext(inode, *epos, laarr[i].extLocation,
@@ -2176,14 +2175,15 @@ static int8_t udf_insert_aext(struct inode *inode, struct extent_position epos,
        return (nelen >> 30);
 }
 
-int8_t udf_delete_aext(struct inode *inode, struct extent_position epos,
-                      struct kernel_lb_addr eloc, uint32_t elen)
+int8_t udf_delete_aext(struct inode *inode, struct extent_position epos)
 {
        struct extent_position oepos;
        int adsize;
        int8_t etype;
        struct allocExtDesc *aed;
        struct udf_inode_info *iinfo;
+       struct kernel_lb_addr eloc;
+       uint32_t elen;
 
        if (epos.bh) {
                get_bh(epos.bh);
index c586026508db82d0a27a1df1b964bcbf3fcec45c..58cc2414992b673296ff2d9408c62f3bd07b6f35 100644 (file)
@@ -351,8 +351,6 @@ static struct fileIdentDesc *udf_add_entry(struct inode *dir,
        loff_t f_pos;
        loff_t size = udf_ext0_offset(dir) + dir->i_size;
        int nfidlen;
-       uint8_t lfi;
-       uint16_t liu;
        udf_pblk_t block;
        struct kernel_lb_addr eloc;
        uint32_t elen = 0;
@@ -383,7 +381,7 @@ static struct fileIdentDesc *udf_add_entry(struct inode *dir,
                namelen = 0;
        }
 
-       nfidlen = (sizeof(struct fileIdentDesc) + namelen + 3) & ~3;
+       nfidlen = ALIGN(sizeof(struct fileIdentDesc) + namelen, UDF_NAME_PAD);
 
        f_pos = udf_ext0_offset(dir);
 
@@ -424,12 +422,8 @@ static struct fileIdentDesc *udf_add_entry(struct inode *dir,
                        goto out_err;
                }
 
-               liu = le16_to_cpu(cfi->lengthOfImpUse);
-               lfi = cfi->lengthFileIdent;
-
                if ((cfi->fileCharacteristics & FID_FILE_CHAR_DELETED) != 0) {
-                       if (((sizeof(struct fileIdentDesc) +
-                                       liu + lfi + 3) & ~3) == nfidlen) {
+                       if (udf_dir_entry_len(cfi) == nfidlen) {
                                cfi->descTag.tagSerialNum = cpu_to_le16(1);
                                cfi->fileVersionNum = cpu_to_le16(1);
                                cfi->fileCharacteristics = 0;
@@ -608,8 +602,7 @@ static int udf_add_nondir(struct dentry *dentry, struct inode *inode)
        fi = udf_add_entry(dir, dentry, &fibh, &cfi, &err);
        if (unlikely(!fi)) {
                inode_dec_link_count(inode);
-               unlock_new_inode(inode);
-               iput(inode);
+               discard_new_inode(inode);
                return err;
        }
        cfi.icb.extLength = cpu_to_le32(inode->i_sb->s_blocksize);
@@ -700,8 +693,7 @@ static int udf_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
        fi = udf_add_entry(inode, NULL, &fibh, &cfi, &err);
        if (!fi) {
                inode_dec_link_count(inode);
-               unlock_new_inode(inode);
-               iput(inode);
+               discard_new_inode(inode);
                goto out;
        }
        set_nlink(inode, 2);
@@ -719,8 +711,7 @@ static int udf_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
        if (!fi) {
                clear_nlink(inode);
                mark_inode_dirty(inode);
-               unlock_new_inode(inode);
-               iput(inode);
+               discard_new_inode(inode);
                goto out;
        }
        cfi.icb.extLength = cpu_to_le32(inode->i_sb->s_blocksize);
@@ -1047,8 +1038,7 @@ out:
 out_no_entry:
        up_write(&iinfo->i_data_sem);
        inode_dec_link_count(inode);
-       unlock_new_inode(inode);
-       iput(inode);
+       discard_new_inode(inode);
        goto out;
 }
 
@@ -1201,9 +1191,7 @@ static int udf_rename(struct inode *old_dir, struct dentry *old_dentry,
 
        if (dir_fi) {
                dir_fi->icb.extLocation = cpu_to_lelb(UDF_I(new_dir)->i_location);
-               udf_update_tag((char *)dir_fi,
-                               (sizeof(struct fileIdentDesc) +
-                               le16_to_cpu(dir_fi->lengthOfImpUse) + 3) & ~3);
+               udf_update_tag((char *)dir_fi, udf_dir_entry_len(dir_fi));
                if (old_iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
                        mark_inode_dirty(old_inode);
                else
index bae311b59400459338d2c60f9e962429066e1483..84c47dde4d268a12e8f1aeaf2c6e6dda91db4972 100644 (file)
@@ -132,6 +132,12 @@ struct inode *udf_find_metadata_inode_efe(struct super_block *sb,
 extern int udf_write_fi(struct inode *inode, struct fileIdentDesc *,
                        struct fileIdentDesc *, struct udf_fileident_bh *,
                        uint8_t *, uint8_t *);
+static inline unsigned int udf_dir_entry_len(struct fileIdentDesc *cfi)
+{
+       return ALIGN(sizeof(struct fileIdentDesc) +
+               le16_to_cpu(cfi->lengthOfImpUse) + cfi->lengthFileIdent,
+               UDF_NAME_PAD);
+}
 
 /* file.c */
 extern long udf_ioctl(struct file *, unsigned int, unsigned long);
@@ -167,8 +173,7 @@ extern int udf_add_aext(struct inode *, struct extent_position *,
                        struct kernel_lb_addr *, uint32_t, int);
 extern void udf_write_aext(struct inode *, struct extent_position *,
                           struct kernel_lb_addr *, uint32_t, int);
-extern int8_t udf_delete_aext(struct inode *, struct extent_position,
-                             struct kernel_lb_addr, uint32_t);
+extern int8_t udf_delete_aext(struct inode *, struct extent_position);
 extern int8_t udf_next_aext(struct inode *, struct extent_position *,
                            struct kernel_lb_addr *, uint32_t *, int);
 extern int8_t udf_current_aext(struct inode *, struct extent_position *,
index e1ef0f0a135352992ecff800ca94044673ad0ad2..02c0a4be421295c3d8ad28368eaba90ed4449e7c 100644 (file)
@@ -343,8 +343,7 @@ cg_found:
 fail_remove_inode:
        mutex_unlock(&sbi->s_lock);
        clear_nlink(inode);
-       unlock_new_inode(inode);
-       iput(inode);
+       discard_new_inode(inode);
        UFSD("EXIT (FAILED): err %d\n", err);
        return ERR_PTR(err);
 failed:
index d5f43ba76c598dea592339f8926327401b181483..9ef40f100415cebf9ebeeade4467897cb62979bc 100644 (file)
@@ -43,8 +43,7 @@ static inline int ufs_add_nondir(struct dentry *dentry, struct inode *inode)
                return 0;
        }
        inode_dec_link_count(inode);
-       unlock_new_inode(inode);
-       iput(inode);
+       discard_new_inode(inode);
        return err;
 }
 
@@ -142,8 +141,7 @@ static int ufs_symlink (struct inode * dir, struct dentry * dentry,
 
 out_fail:
        inode_dec_link_count(inode);
-       unlock_new_inode(inode);
-       iput(inode);
+       discard_new_inode(inode);
        return err;
 }
 
@@ -198,8 +196,7 @@ static int ufs_mkdir(struct inode * dir, struct dentry * dentry, umode_t mode)
 out_fail:
        inode_dec_link_count(inode);
        inode_dec_link_count(inode);
-       unlock_new_inode(inode);
-       iput (inode);
+       discard_new_inode(inode);
 out_dir:
        inode_dec_link_count(dir);
        return err;
index 123bf7d516fc1f475cb89edb8aade4c2ad556f51..bad9cea37f12b6e5c95a4a0e4ed78796011d8be5 100644 (file)
@@ -222,24 +222,26 @@ static inline bool userfaultfd_huge_must_wait(struct userfaultfd_ctx *ctx,
                                         unsigned long reason)
 {
        struct mm_struct *mm = ctx->mm;
-       pte_t *pte;
+       pte_t *ptep, pte;
        bool ret = true;
 
        VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem));
 
-       pte = huge_pte_offset(mm, address, vma_mmu_pagesize(vma));
-       if (!pte)
+       ptep = huge_pte_offset(mm, address, vma_mmu_pagesize(vma));
+
+       if (!ptep)
                goto out;
 
        ret = false;
+       pte = huge_ptep_get(ptep);
 
        /*
         * Lockless access: we're in a wait_event so it's ok if it
         * changes under us.
         */
-       if (huge_pte_none(*pte))
+       if (huge_pte_none(pte))
                ret = true;
-       if (!huge_pte_write(*pte) && (reason & VM_UFFD_WP))
+       if (!huge_pte_write(pte) && (reason & VM_UFFD_WP))
                ret = true;
 out:
        return ret;
@@ -631,8 +633,10 @@ static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx,
                /* the various vma->vm_userfaultfd_ctx still points to it */
                down_write(&mm->mmap_sem);
                for (vma = mm->mmap; vma; vma = vma->vm_next)
-                       if (vma->vm_userfaultfd_ctx.ctx == release_new_ctx)
+                       if (vma->vm_userfaultfd_ctx.ctx == release_new_ctx) {
                                vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
+                               vma->vm_flags &= ~(VM_UFFD_WP | VM_UFFD_MISSING);
+                       }
                up_write(&mm->mmap_sem);
 
                userfaultfd_ctx_put(release_new_ctx);
index 84db76e0e3e3c58ae7d25b38a46a1ce2b4d5cae4..fecd187fcf2c3cd69bd79954ccc272737cb2ce2b 100644 (file)
@@ -157,6 +157,7 @@ __xfs_ag_resv_free(
        error = xfs_mod_fdblocks(pag->pag_mount, oldresv, true);
        resv->ar_reserved = 0;
        resv->ar_asked = 0;
+       resv->ar_orig_reserved = 0;
 
        if (error)
                trace_xfs_ag_resv_free_error(pag->pag_mount, pag->pag_agno,
@@ -189,13 +190,34 @@ __xfs_ag_resv_init(
        struct xfs_mount                *mp = pag->pag_mount;
        struct xfs_ag_resv              *resv;
        int                             error;
-       xfs_extlen_t                    reserved;
+       xfs_extlen_t                    hidden_space;
 
        if (used > ask)
                ask = used;
-       reserved = ask - used;
 
-       error = xfs_mod_fdblocks(mp, -(int64_t)reserved, true);
+       switch (type) {
+       case XFS_AG_RESV_RMAPBT:
+               /*
+                * Space taken by the rmapbt is not subtracted from fdblocks
+                * because the rmapbt lives in the free space.  Here we must
+                * subtract the entire reservation from fdblocks so that we
+                * always have blocks available for rmapbt expansion.
+                */
+               hidden_space = ask;
+               break;
+       case XFS_AG_RESV_METADATA:
+               /*
+                * Space taken by all other metadata btrees are accounted
+                * on-disk as used space.  We therefore only hide the space
+                * that is reserved but not used by the trees.
+                */
+               hidden_space = ask - used;
+               break;
+       default:
+               ASSERT(0);
+               return -EINVAL;
+       }
+       error = xfs_mod_fdblocks(mp, -(int64_t)hidden_space, true);
        if (error) {
                trace_xfs_ag_resv_init_error(pag->pag_mount, pag->pag_agno,
                                error, _RET_IP_);
@@ -216,7 +238,8 @@ __xfs_ag_resv_init(
 
        resv = xfs_perag_resv(pag, type);
        resv->ar_asked = ask;
-       resv->ar_reserved = resv->ar_orig_reserved = reserved;
+       resv->ar_orig_reserved = hidden_space;
+       resv->ar_reserved = ask - used;
 
        trace_xfs_ag_resv_init(pag, type, ask);
        return 0;
index eef466260d43adb3cc9ef6ae3dcea36f82cd90a4..75dbdc14c45f08b733ab9d066dc86056ce8261f5 100644 (file)
@@ -223,12 +223,13 @@ xfs_alloc_get_rec(
        error = xfs_btree_get_rec(cur, &rec, stat);
        if (error || !(*stat))
                return error;
-       if (rec->alloc.ar_blockcount == 0)
-               goto out_bad_rec;
 
        *bno = be32_to_cpu(rec->alloc.ar_startblock);
        *len = be32_to_cpu(rec->alloc.ar_blockcount);
 
+       if (*len == 0)
+               goto out_bad_rec;
+
        /* check for valid extent range, including overflow */
        if (!xfs_verify_agbno(mp, agno, *bno))
                goto out_bad_rec;
index 01628f0c9a0c227543087c70bd7391ad3f0eee2c..7205268b30bc54b488bf513b1a2b6bb737769d64 100644 (file)
@@ -5780,6 +5780,32 @@ del_cursor:
        return error;
 }
 
+/* Make sure we won't be right-shifting an extent past the maximum bound. */
+int
+xfs_bmap_can_insert_extents(
+       struct xfs_inode        *ip,
+       xfs_fileoff_t           off,
+       xfs_fileoff_t           shift)
+{
+       struct xfs_bmbt_irec    got;
+       int                     is_empty;
+       int                     error = 0;
+
+       ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
+
+       if (XFS_FORCED_SHUTDOWN(ip->i_mount))
+               return -EIO;
+
+       xfs_ilock(ip, XFS_ILOCK_EXCL);
+       error = xfs_bmap_last_extent(NULL, ip, XFS_DATA_FORK, &got, &is_empty);
+       if (!error && !is_empty && got.br_startoff >= off &&
+           ((got.br_startoff + shift) & BMBT_STARTOFF_MASK) < got.br_startoff)
+               error = -EINVAL;
+       xfs_iunlock(ip, XFS_ILOCK_EXCL);
+
+       return error;
+}
+
 int
 xfs_bmap_insert_extents(
        struct xfs_trans        *tp,
index 99dddbd0fcc6c606e59544d69a0435b0cc205c5f..9b49ddf99c4115479fe8271cc5b492a2d86b2b70 100644 (file)
@@ -227,6 +227,8 @@ int xfs_bmap_collapse_extents(struct xfs_trans *tp, struct xfs_inode *ip,
                xfs_fileoff_t *next_fsb, xfs_fileoff_t offset_shift_fsb,
                bool *done, xfs_fsblock_t *firstblock,
                struct xfs_defer_ops *dfops);
+int    xfs_bmap_can_insert_extents(struct xfs_inode *ip, xfs_fileoff_t off,
+               xfs_fileoff_t shift);
 int    xfs_bmap_insert_extents(struct xfs_trans *tp, struct xfs_inode *ip,
                xfs_fileoff_t *next_fsb, xfs_fileoff_t offset_shift_fsb,
                bool *done, xfs_fileoff_t stop_fsb, xfs_fsblock_t *firstblock,
index 1c5a8aaf2bfcea6b51b76e7aa7dff4b55b4e4145..059bc44c27e83edf3cb1fe2c494490e65f93c5d8 100644 (file)
@@ -962,6 +962,9 @@ typedef enum xfs_dinode_fmt {
                XFS_DFORK_DSIZE(dip, mp) : \
                XFS_DFORK_ASIZE(dip, mp))
 
+#define XFS_DFORK_MAXEXT(dip, mp, w) \
+       (XFS_DFORK_SIZE(dip, mp, w) / sizeof(struct xfs_bmbt_rec))
+
 /*
  * Return pointers to the data or attribute forks.
  */
@@ -1526,6 +1529,8 @@ typedef struct xfs_bmdr_block {
 #define BMBT_STARTBLOCK_BITLEN 52
 #define BMBT_BLOCKCOUNT_BITLEN 21
 
+#define BMBT_STARTOFF_MASK     ((1ULL << BMBT_STARTOFF_BITLEN) - 1)
+
 typedef struct xfs_bmbt_rec {
        __be64                  l0, l1;
 } xfs_bmbt_rec_t;
index d38d724534c48e2a4644be06acbf6d64da9a65b2..30d1d60f1d46e62ff71eca1f45b273536cc6cce1 100644 (file)
@@ -374,6 +374,47 @@ xfs_log_dinode_to_disk(
        }
 }
 
+static xfs_failaddr_t
+xfs_dinode_verify_fork(
+       struct xfs_dinode       *dip,
+       struct xfs_mount        *mp,
+       int                     whichfork)
+{
+       uint32_t                di_nextents = XFS_DFORK_NEXTENTS(dip, whichfork);
+
+       switch (XFS_DFORK_FORMAT(dip, whichfork)) {
+       case XFS_DINODE_FMT_LOCAL:
+               /*
+                * no local regular files yet
+                */
+               if (whichfork == XFS_DATA_FORK) {
+                       if (S_ISREG(be16_to_cpu(dip->di_mode)))
+                               return __this_address;
+                       if (be64_to_cpu(dip->di_size) >
+                                       XFS_DFORK_SIZE(dip, mp, whichfork))
+                               return __this_address;
+               }
+               if (di_nextents)
+                       return __this_address;
+               break;
+       case XFS_DINODE_FMT_EXTENTS:
+               if (di_nextents > XFS_DFORK_MAXEXT(dip, mp, whichfork))
+                       return __this_address;
+               break;
+       case XFS_DINODE_FMT_BTREE:
+               if (whichfork == XFS_ATTR_FORK) {
+                       if (di_nextents > MAXAEXTNUM)
+                               return __this_address;
+               } else if (di_nextents > MAXEXTNUM) {
+                       return __this_address;
+               }
+               break;
+       default:
+               return __this_address;
+       }
+       return NULL;
+}
+
 xfs_failaddr_t
 xfs_dinode_verify(
        struct xfs_mount        *mp,
@@ -441,24 +482,9 @@ xfs_dinode_verify(
        case S_IFREG:
        case S_IFLNK:
        case S_IFDIR:
-               switch (dip->di_format) {
-               case XFS_DINODE_FMT_LOCAL:
-                       /*
-                        * no local regular files yet
-                        */
-                       if (S_ISREG(mode))
-                               return __this_address;
-                       if (di_size > XFS_DFORK_DSIZE(dip, mp))
-                               return __this_address;
-                       if (dip->di_nextents)
-                               return __this_address;
-                       /* fall through */
-               case XFS_DINODE_FMT_EXTENTS:
-               case XFS_DINODE_FMT_BTREE:
-                       break;
-               default:
-                       return __this_address;
-               }
+               fa = xfs_dinode_verify_fork(dip, mp, XFS_DATA_FORK);
+               if (fa)
+                       return fa;
                break;
        case 0:
                /* Uninitialized inode ok. */
@@ -468,17 +494,9 @@ xfs_dinode_verify(
        }
 
        if (XFS_DFORK_Q(dip)) {
-               switch (dip->di_aformat) {
-               case XFS_DINODE_FMT_LOCAL:
-                       if (dip->di_anextents)
-                               return __this_address;
-               /* fall through */
-               case XFS_DINODE_FMT_EXTENTS:
-               case XFS_DINODE_FMT_BTREE:
-                       break;
-               default:
-                       return __this_address;
-               }
+               fa = xfs_dinode_verify_fork(dip, mp, XFS_ATTR_FORK);
+               if (fa)
+                       return fa;
        } else {
                /*
                 * If there is no fork offset, this may be a freshly-made inode
@@ -713,7 +731,8 @@ xfs_inode_validate_extsize(
        if ((hint_flag || inherit_flag) && extsize == 0)
                return __this_address;
 
-       if (!(hint_flag || inherit_flag) && extsize != 0)
+       /* free inodes get flags set to zero but extsize remains */
+       if (mode && !(hint_flag || inherit_flag) && extsize != 0)
                return __this_address;
 
        if (extsize_bytes % blocksize_bytes)
@@ -759,7 +778,8 @@ xfs_inode_validate_cowextsize(
        if (hint_flag && cowextsize == 0)
                return __this_address;
 
-       if (!hint_flag && cowextsize != 0)
+       /* free inodes get flags set to zero but cowextsize remains */
+       if (mode && !hint_flag && cowextsize != 0)
                return __this_address;
 
        if (hint_flag && rt_flag)
index 65fc4ed2e9a1050b76b1cd85d874294e52a8afd9..b228c821bae6802c0aa8ab9b79069d703245bbe2 100644 (file)
@@ -1029,8 +1029,8 @@ xfs_rtalloc_query_range(
        if (low_rec->ar_startext >= mp->m_sb.sb_rextents ||
            low_rec->ar_startext == high_rec->ar_startext)
                return 0;
-       if (high_rec->ar_startext >= mp->m_sb.sb_rextents)
-               high_rec->ar_startext = mp->m_sb.sb_rextents - 1;
+       if (high_rec->ar_startext > mp->m_sb.sb_rextents)
+               high_rec->ar_startext = mp->m_sb.sb_rextents;
 
        /* Iterate the bitmap, looking for discrepancies. */
        rtstart = low_rec->ar_startext;
index c35009a8669953dfee4013615ca62b47237b4d77..83b1e8c6c18f939e8afcabdb4eb37fd33e459da8 100644 (file)
@@ -685,12 +685,10 @@ out_unlock_iolock:
 }
 
 /*
- * dead simple method of punching delalyed allocation blocks from a range in
- * the inode. Walks a block at a time so will be slow, but is only executed in
- * rare error cases so the overhead is not critical. This will always punch out
- * both the start and end blocks, even if the ranges only partially overlap
- * them, so it is up to the caller to ensure that partial blocks are not
- * passed in.
+ * Dead simple method of punching delalyed allocation blocks from a range in
+ * the inode.  This will always punch out both the start and end blocks, even
+ * if the ranges only partially overlap them, so it is up to the caller to
+ * ensure that partial blocks are not passed in.
  */
 int
 xfs_bmap_punch_delalloc_range(
@@ -698,63 +696,44 @@ xfs_bmap_punch_delalloc_range(
        xfs_fileoff_t           start_fsb,
        xfs_fileoff_t           length)
 {
-       xfs_fileoff_t           remaining = length;
+       struct xfs_ifork        *ifp = &ip->i_df;
+       xfs_fileoff_t           end_fsb = start_fsb + length;
+       struct xfs_bmbt_irec    got, del;
+       struct xfs_iext_cursor  icur;
        int                     error = 0;
 
        ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
 
-       do {
-               int             done;
-               xfs_bmbt_irec_t imap;
-               int             nimaps = 1;
-               xfs_fsblock_t   firstblock;
-               struct xfs_defer_ops dfops;
+       if (!(ifp->if_flags & XFS_IFEXTENTS)) {
+               error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK);
+               if (error)
+                       return error;
+       }
 
-               /*
-                * Map the range first and check that it is a delalloc extent
-                * before trying to unmap the range. Otherwise we will be
-                * trying to remove a real extent (which requires a
-                * transaction) or a hole, which is probably a bad idea...
-                */
-               error = xfs_bmapi_read(ip, start_fsb, 1, &imap, &nimaps,
-                                      XFS_BMAPI_ENTIRE);
+       if (!xfs_iext_lookup_extent_before(ip, ifp, &end_fsb, &icur, &got))
+               return 0;
 
-               if (error) {
-                       /* something screwed, just bail */
-                       if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
-                               xfs_alert(ip->i_mount,
-                       "Failed delalloc mapping lookup ino %lld fsb %lld.",
-                                               ip->i_ino, start_fsb);
-                       }
-                       break;
-               }
-               if (!nimaps) {
-                       /* nothing there */
-                       goto next_block;
-               }
-               if (imap.br_startblock != DELAYSTARTBLOCK) {
-                       /* been converted, ignore */
-                       goto next_block;
-               }
-               WARN_ON(imap.br_blockcount == 0);
+       while (got.br_startoff + got.br_blockcount > start_fsb) {
+               del = got;
+               xfs_trim_extent(&del, start_fsb, length);
 
                /*
-                * Note: while we initialise the firstblock/dfops pair, they
-                * should never be used because blocks should never be
-                * allocated or freed for a delalloc extent and hence we need
-                * don't cancel or finish them after the xfs_bunmapi() call.
+                * A delete can push the cursor forward. Step back to the
+                * previous extent on non-delalloc or extents outside the
+                * target range.
                 */
-               xfs_defer_init(&dfops, &firstblock);
-               error = xfs_bunmapi(NULL, ip, start_fsb, 1, 0, 1, &firstblock,
-                                       &dfops, &done);
-               if (error)
-                       break;
+               if (!del.br_blockcount ||
+                   !isnullstartblock(del.br_startblock)) {
+                       if (!xfs_iext_prev_extent(ifp, &icur, &got))
+                               break;
+                       continue;
+               }
 
-               ASSERT(!xfs_defer_has_unfinished_work(&dfops));
-next_block:
-               start_fsb++;
-               remaining--;
-       } while(remaining > 0);
+               error = xfs_bmap_del_extent_delay(ip, XFS_DATA_FORK, &icur,
+                                                 &got, &del);
+               if (error || !xfs_iext_get_extent(ifp, &icur, &got))
+                       break;
+       }
 
        return error;
 }
@@ -1208,7 +1187,22 @@ xfs_free_file_space(
                return 0;
        if (offset + len > XFS_ISIZE(ip))
                len = XFS_ISIZE(ip) - offset;
-       return iomap_zero_range(VFS_I(ip), offset, len, NULL, &xfs_iomap_ops);
+       error = iomap_zero_range(VFS_I(ip), offset, len, NULL, &xfs_iomap_ops);
+       if (error)
+               return error;
+
+       /*
+        * If we zeroed right up to EOF and EOF straddles a page boundary we
+        * must make sure that the post-EOF area is also zeroed because the
+        * page could be mmap'd and iomap_zero_range doesn't do that for us.
+        * Writeback of the eof page will do this, albeit clumsily.
+        */
+       if (offset + len >= XFS_ISIZE(ip) && ((offset + len) & PAGE_MASK)) {
+               error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping,
+                               (offset + len) & ~PAGE_MASK, LLONG_MAX);
+       }
+
+       return error;
 }
 
 /*
@@ -1404,6 +1398,10 @@ xfs_insert_file_space(
 
        trace_xfs_insert_file_space(ip);
 
+       error = xfs_bmap_can_insert_extents(ip, stop_fsb, shift_fsb);
+       if (error)
+               return error;
+
        error = xfs_prepare_shift(ip, offset);
        if (error)
                return error;
index c34fa9c342f25fdbee7e39fead0078e30859bba3..c7157bc48bd192ea60650577232ea87e8bfbbf02 100644 (file)
@@ -513,8 +513,8 @@ xfs_getfsmap_rtdev_rtbitmap_query(
        struct xfs_trans                *tp,
        struct xfs_getfsmap_info        *info)
 {
-       struct xfs_rtalloc_rec          alow;
-       struct xfs_rtalloc_rec          ahigh;
+       struct xfs_rtalloc_rec          alow = { 0 };
+       struct xfs_rtalloc_rec          ahigh = { 0 };
        int                             error;
 
        xfs_ilock(tp->t_mountp->m_rbmip, XFS_ILOCK_SHARED);
index a7afcad6b71140aed25f02979946cb9795afa644..3f2bd6032cf86525d6d344d60be903d9c739267c 100644 (file)
@@ -387,7 +387,7 @@ xfs_reserve_blocks(
        do {
                free = percpu_counter_sum(&mp->m_fdblocks) -
                                                mp->m_alloc_set_aside;
-               if (!free)
+               if (free <= 0)
                        break;
 
                delta = request - mp->m_resblks;
index 7a96c4e0ab5c621f38d9e034622d26ebd8d95437..5df4de666cc118848c86ddc33420d4147031ce57 100644 (file)
@@ -3236,7 +3236,6 @@ xfs_iflush_cluster(
        struct xfs_inode        *cip;
        int                     nr_found;
        int                     clcount = 0;
-       int                     bufwasdelwri;
        int                     i;
 
        pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
@@ -3360,37 +3359,22 @@ cluster_corrupt_out:
         * inode buffer and shut down the filesystem.
         */
        rcu_read_unlock();
-       /*
-        * Clean up the buffer.  If it was delwri, just release it --
-        * brelse can handle it with no problems.  If not, shut down the
-        * filesystem before releasing the buffer.
-        */
-       bufwasdelwri = (bp->b_flags & _XBF_DELWRI_Q);
-       if (bufwasdelwri)
-               xfs_buf_relse(bp);
-
        xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
 
-       if (!bufwasdelwri) {
-               /*
-                * Just like incore_relse: if we have b_iodone functions,
-                * mark the buffer as an error and call them.  Otherwise
-                * mark it as stale and brelse.
-                */
-               if (bp->b_iodone) {
-                       bp->b_flags &= ~XBF_DONE;
-                       xfs_buf_stale(bp);
-                       xfs_buf_ioerror(bp, -EIO);
-                       xfs_buf_ioend(bp);
-               } else {
-                       xfs_buf_stale(bp);
-                       xfs_buf_relse(bp);
-               }
-       }
-
        /*
-        * Unlocks the flush lock
+        * We'll always have an inode attached to the buffer for completion
+        * process by the time we are called from xfs_iflush(). Hence we have
+        * always need to do IO completion processing to abort the inodes
+        * attached to the buffer.  handle them just like the shutdown case in
+        * xfs_buf_submit().
         */
+       ASSERT(bp->b_iodone);
+       bp->b_flags &= ~XBF_DONE;
+       xfs_buf_stale(bp);
+       xfs_buf_ioerror(bp, -EIO);
+       xfs_buf_ioend(bp);
+
+       /* abort the corrupt inode, as it was not attached to the buffer */
        xfs_iflush_abort(cip, false);
        kmem_free(cilist);
        xfs_perag_put(pag);
@@ -3486,12 +3470,17 @@ xfs_iflush(
                xfs_log_force(mp, 0);
 
        /*
-        * inode clustering:
-        * see if other inodes can be gathered into this write
+        * inode clustering: try to gather other inodes into this write
+        *
+        * Note: Any error during clustering will result in the filesystem
+        * being shut down and completion callbacks run on the cluster buffer.
+        * As we have already flushed and attached this inode to the buffer,
+        * it has already been aborted and released by xfs_iflush_cluster() and
+        * so we have no further error handling to do here.
         */
        error = xfs_iflush_cluster(ip, bp);
        if (error)
-               goto cluster_corrupt_out;
+               return error;
 
        *bpp = bp;
        return 0;
@@ -3500,12 +3489,8 @@ corrupt_out:
        if (bp)
                xfs_buf_relse(bp);
        xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
-cluster_corrupt_out:
-       error = -EFSCORRUPTED;
 abort_out:
-       /*
-        * Unlocks the flush lock
-        */
+       /* abort the corrupt inode, as it was not attached to the buffer */
        xfs_iflush_abort(ip, false);
        return error;
 }
index 49f5492eed3bdb9d85c53843df03546c83f2c799..55876dd02f0c8c75fa5653eeab82881bd3741928 100644 (file)
@@ -963,12 +963,13 @@ xfs_ilock_for_iomap(
        unsigned                *lockmode)
 {
        unsigned                mode = XFS_ILOCK_SHARED;
+       bool                    is_write = flags & (IOMAP_WRITE | IOMAP_ZERO);
 
        /*
         * COW writes may allocate delalloc space or convert unwritten COW
         * extents, so we need to make sure to take the lock exclusively here.
         */
-       if (xfs_is_reflink_inode(ip) && (flags & (IOMAP_WRITE | IOMAP_ZERO))) {
+       if (xfs_is_reflink_inode(ip) && is_write) {
                /*
                 * FIXME: It could still overwrite on unshared extents and not
                 * need allocation.
@@ -989,6 +990,7 @@ xfs_ilock_for_iomap(
                mode = XFS_ILOCK_EXCL;
        }
 
+relock:
        if (flags & IOMAP_NOWAIT) {
                if (!xfs_ilock_nowait(ip, mode))
                        return -EAGAIN;
@@ -996,6 +998,17 @@ xfs_ilock_for_iomap(
                xfs_ilock(ip, mode);
        }
 
+       /*
+        * The reflink iflag could have changed since the earlier unlocked
+        * check, so if we got ILOCK_SHARED for a write and but we're now a
+        * reflink inode we have to switch to ILOCK_EXCL and relock.
+        */
+       if (mode == XFS_ILOCK_SHARED && is_write && xfs_is_reflink_inode(ip)) {
+               xfs_iunlock(ip, mode);
+               mode = XFS_ILOCK_EXCL;
+               goto relock;
+       }
+
        *lockmode = mode;
        return 0;
 }
index 0fa29f39d658a7fcd8a8886689ab8e1b67a5db7e..3a75de7778434b53da42290819bb750dda5d6a0f 100644 (file)
@@ -1253,7 +1253,7 @@ xfs_setup_inode(
 
        inode_sb_list_add(inode);
        /* make the inode look hashed for the writeback code */
-       hlist_add_fake(&inode->i_hash);
+       inode_fake_hash(inode);
 
        inode->i_uid    = xfs_uid_to_kuid(ip->i_d.di_uid);
        inode->i_gid    = xfs_gid_to_kgid(ip->i_d.di_gid);
index e040af120b69b3a69b38517cde3092773b391260..524f543c5b820fe45de5866cd950509190a74612 100644 (file)
@@ -258,7 +258,12 @@ xfs_trans_alloc(
        if (!(flags & XFS_TRANS_NO_WRITECOUNT))
                sb_start_intwrite(mp->m_super);
 
-       WARN_ON(mp->m_super->s_writers.frozen == SB_FREEZE_COMPLETE);
+       /*
+        * Zero-reservation ("empty") transactions can't modify anything, so
+        * they're allowed to run while we're frozen.
+        */
+       WARN_ON(resp->tr_logres > 0 &&
+               mp->m_super->s_writers.frozen == SB_FREEZE_COMPLETE);
        atomic_inc(&mp->m_active_trans);
 
        tp = kmem_zone_zalloc(xfs_trans_zone,
index 40a916efd7c039d2132014fcaf5ec780e4a8248e..1194a4c78d557fb411e9672291f6bab6e3623e9d 100644 (file)
@@ -309,7 +309,7 @@ static inline void acpi_processor_ppc_exit(void)
 {
        return;
 }
-static inline int acpi_processor_ppc_has_changed(struct acpi_processor *pr,
+static inline void acpi_processor_ppc_has_changed(struct acpi_processor *pr,
                                                                int event_flag)
 {
        static unsigned int printout = 1;
@@ -320,7 +320,6 @@ static inline int acpi_processor_ppc_has_changed(struct acpi_processor *pr,
                       "Consider compiling CPUfreq support into your kernel.\n");
                printout = 0;
        }
-       return 0;
 }
 static inline int acpi_processor_get_bios_limit(int cpu, unsigned int *limit)
 {
index ec07f23678ea6a507539b92d7df7577ccf54e0a0..0d4b1d3dbc1ee0c588e391dc75017664d48aa0ec 100644 (file)
@@ -84,42 +84,59 @@ static __always_inline bool atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 ne
 }
 #endif
 
-static __always_inline int __atomic_add_unless(atomic_t *v, int a, int u)
+#ifdef arch_atomic_fetch_add_unless
+#define atomic_fetch_add_unless atomic_fetch_add_unless
+static __always_inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
 {
        kasan_check_write(v, sizeof(*v));
-       return __arch_atomic_add_unless(v, a, u);
+       return arch_atomic_fetch_add_unless(v, a, u);
 }
+#endif
 
-
-static __always_inline bool atomic64_add_unless(atomic64_t *v, s64 a, s64 u)
+#ifdef arch_atomic64_fetch_add_unless
+#define atomic64_fetch_add_unless atomic64_fetch_add_unless
+static __always_inline s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
 {
        kasan_check_write(v, sizeof(*v));
-       return arch_atomic64_add_unless(v, a, u);
+       return arch_atomic64_fetch_add_unless(v, a, u);
 }
+#endif
 
+#ifdef arch_atomic_inc
+#define atomic_inc atomic_inc
 static __always_inline void atomic_inc(atomic_t *v)
 {
        kasan_check_write(v, sizeof(*v));
        arch_atomic_inc(v);
 }
+#endif
 
+#ifdef arch_atomic64_inc
+#define atomic64_inc atomic64_inc
 static __always_inline void atomic64_inc(atomic64_t *v)
 {
        kasan_check_write(v, sizeof(*v));
        arch_atomic64_inc(v);
 }
+#endif
 
+#ifdef arch_atomic_dec
+#define atomic_dec atomic_dec
 static __always_inline void atomic_dec(atomic_t *v)
 {
        kasan_check_write(v, sizeof(*v));
        arch_atomic_dec(v);
 }
+#endif
 
+#ifdef atch_atomic64_dec
+#define atomic64_dec
 static __always_inline void atomic64_dec(atomic64_t *v)
 {
        kasan_check_write(v, sizeof(*v));
        arch_atomic64_dec(v);
 }
+#endif
 
 static __always_inline void atomic_add(int i, atomic_t *v)
 {
@@ -181,65 +198,95 @@ static __always_inline void atomic64_xor(s64 i, atomic64_t *v)
        arch_atomic64_xor(i, v);
 }
 
+#ifdef arch_atomic_inc_return
+#define atomic_inc_return atomic_inc_return
 static __always_inline int atomic_inc_return(atomic_t *v)
 {
        kasan_check_write(v, sizeof(*v));
        return arch_atomic_inc_return(v);
 }
+#endif
 
+#ifdef arch_atomic64_in_return
+#define atomic64_inc_return atomic64_inc_return
 static __always_inline s64 atomic64_inc_return(atomic64_t *v)
 {
        kasan_check_write(v, sizeof(*v));
        return arch_atomic64_inc_return(v);
 }
+#endif
 
+#ifdef arch_atomic_dec_return
+#define atomic_dec_return atomic_dec_return
 static __always_inline int atomic_dec_return(atomic_t *v)
 {
        kasan_check_write(v, sizeof(*v));
        return arch_atomic_dec_return(v);
 }
+#endif
 
+#ifdef arch_atomic64_dec_return
+#define atomic64_dec_return atomic64_dec_return
 static __always_inline s64 atomic64_dec_return(atomic64_t *v)
 {
        kasan_check_write(v, sizeof(*v));
        return arch_atomic64_dec_return(v);
 }
+#endif
 
-static __always_inline s64 atomic64_inc_not_zero(atomic64_t *v)
+#ifdef arch_atomic64_inc_not_zero
+#define atomic64_inc_not_zero atomic64_inc_not_zero
+static __always_inline bool atomic64_inc_not_zero(atomic64_t *v)
 {
        kasan_check_write(v, sizeof(*v));
        return arch_atomic64_inc_not_zero(v);
 }
+#endif
 
+#ifdef arch_atomic64_dec_if_positive
+#define atomic64_dec_if_positive atomic64_dec_if_positive
 static __always_inline s64 atomic64_dec_if_positive(atomic64_t *v)
 {
        kasan_check_write(v, sizeof(*v));
        return arch_atomic64_dec_if_positive(v);
 }
+#endif
 
+#ifdef arch_atomic_dec_and_test
+#define atomic_dec_and_test atomic_dec_and_test
 static __always_inline bool atomic_dec_and_test(atomic_t *v)
 {
        kasan_check_write(v, sizeof(*v));
        return arch_atomic_dec_and_test(v);
 }
+#endif
 
+#ifdef arch_atomic64_dec_and_test
+#define atomic64_dec_and_test atomic64_dec_and_test
 static __always_inline bool atomic64_dec_and_test(atomic64_t *v)
 {
        kasan_check_write(v, sizeof(*v));
        return arch_atomic64_dec_and_test(v);
 }
+#endif
 
+#ifdef arch_atomic_inc_and_test
+#define atomic_inc_and_test atomic_inc_and_test
 static __always_inline bool atomic_inc_and_test(atomic_t *v)
 {
        kasan_check_write(v, sizeof(*v));
        return arch_atomic_inc_and_test(v);
 }
+#endif
 
+#ifdef arch_atomic64_inc_and_test
+#define atomic64_inc_and_test atomic64_inc_and_test
 static __always_inline bool atomic64_inc_and_test(atomic64_t *v)
 {
        kasan_check_write(v, sizeof(*v));
        return arch_atomic64_inc_and_test(v);
 }
+#endif
 
 static __always_inline int atomic_add_return(int i, atomic_t *v)
 {
@@ -325,152 +372,96 @@ static __always_inline s64 atomic64_fetch_xor(s64 i, atomic64_t *v)
        return arch_atomic64_fetch_xor(i, v);
 }
 
+#ifdef arch_atomic_sub_and_test
+#define atomic_sub_and_test atomic_sub_and_test
 static __always_inline bool atomic_sub_and_test(int i, atomic_t *v)
 {
        kasan_check_write(v, sizeof(*v));
        return arch_atomic_sub_and_test(i, v);
 }
+#endif
 
+#ifdef arch_atomic64_sub_and_test
+#define atomic64_sub_and_test atomic64_sub_and_test
 static __always_inline bool atomic64_sub_and_test(s64 i, atomic64_t *v)
 {
        kasan_check_write(v, sizeof(*v));
        return arch_atomic64_sub_and_test(i, v);
 }
+#endif
 
+#ifdef arch_atomic_add_negative
+#define atomic_add_negative atomic_add_negative
 static __always_inline bool atomic_add_negative(int i, atomic_t *v)
 {
        kasan_check_write(v, sizeof(*v));
        return arch_atomic_add_negative(i, v);
 }
+#endif
 
+#ifdef arch_atomic64_add_negative
+#define atomic64_add_negative atomic64_add_negative
 static __always_inline bool atomic64_add_negative(s64 i, atomic64_t *v)
 {
        kasan_check_write(v, sizeof(*v));
        return arch_atomic64_add_negative(i, v);
 }
+#endif
 
-static __always_inline unsigned long
-cmpxchg_size(volatile void *ptr, unsigned long old, unsigned long new, int size)
-{
-       kasan_check_write(ptr, size);
-       switch (size) {
-       case 1:
-               return arch_cmpxchg((u8 *)ptr, (u8)old, (u8)new);
-       case 2:
-               return arch_cmpxchg((u16 *)ptr, (u16)old, (u16)new);
-       case 4:
-               return arch_cmpxchg((u32 *)ptr, (u32)old, (u32)new);
-       case 8:
-               BUILD_BUG_ON(sizeof(unsigned long) != 8);
-               return arch_cmpxchg((u64 *)ptr, (u64)old, (u64)new);
-       }
-       BUILD_BUG();
-       return 0;
-}
+#define xchg(ptr, new)                                                 \
+({                                                                     \
+       typeof(ptr) __ai_ptr = (ptr);                                   \
+       kasan_check_write(__ai_ptr, sizeof(*__ai_ptr));                 \
+       arch_xchg(__ai_ptr, (new));                                     \
+})
 
 #define cmpxchg(ptr, old, new)                                         \
 ({                                                                     \
-       ((__typeof__(*(ptr)))cmpxchg_size((ptr), (unsigned long)(old),  \
-               (unsigned long)(new), sizeof(*(ptr))));                 \
+       typeof(ptr) __ai_ptr = (ptr);                                   \
+       kasan_check_write(__ai_ptr, sizeof(*__ai_ptr));                 \
+       arch_cmpxchg(__ai_ptr, (old), (new));                           \
 })
 
-static __always_inline unsigned long
-sync_cmpxchg_size(volatile void *ptr, unsigned long old, unsigned long new,
-                 int size)
-{
-       kasan_check_write(ptr, size);
-       switch (size) {
-       case 1:
-               return arch_sync_cmpxchg((u8 *)ptr, (u8)old, (u8)new);
-       case 2:
-               return arch_sync_cmpxchg((u16 *)ptr, (u16)old, (u16)new);
-       case 4:
-               return arch_sync_cmpxchg((u32 *)ptr, (u32)old, (u32)new);
-       case 8:
-               BUILD_BUG_ON(sizeof(unsigned long) != 8);
-               return arch_sync_cmpxchg((u64 *)ptr, (u64)old, (u64)new);
-       }
-       BUILD_BUG();
-       return 0;
-}
-
 #define sync_cmpxchg(ptr, old, new)                                    \
 ({                                                                     \
-       ((__typeof__(*(ptr)))sync_cmpxchg_size((ptr),                   \
-               (unsigned long)(old), (unsigned long)(new),             \
-               sizeof(*(ptr))));                                       \
+       typeof(ptr) __ai_ptr = (ptr);                                   \
+       kasan_check_write(__ai_ptr, sizeof(*__ai_ptr));                 \
+       arch_sync_cmpxchg(__ai_ptr, (old), (new));                      \
 })
 
-static __always_inline unsigned long
-cmpxchg_local_size(volatile void *ptr, unsigned long old, unsigned long new,
-                  int size)
-{
-       kasan_check_write(ptr, size);
-       switch (size) {
-       case 1:
-               return arch_cmpxchg_local((u8 *)ptr, (u8)old, (u8)new);
-       case 2:
-               return arch_cmpxchg_local((u16 *)ptr, (u16)old, (u16)new);
-       case 4:
-               return arch_cmpxchg_local((u32 *)ptr, (u32)old, (u32)new);
-       case 8:
-               BUILD_BUG_ON(sizeof(unsigned long) != 8);
-               return arch_cmpxchg_local((u64 *)ptr, (u64)old, (u64)new);
-       }
-       BUILD_BUG();
-       return 0;
-}
-
 #define cmpxchg_local(ptr, old, new)                                   \
 ({                                                                     \
-       ((__typeof__(*(ptr)))cmpxchg_local_size((ptr),                  \
-               (unsigned long)(old), (unsigned long)(new),             \
-               sizeof(*(ptr))));                                       \
+       typeof(ptr) __ai_ptr = (ptr);                                   \
+       kasan_check_write(__ai_ptr, sizeof(*__ai_ptr));                 \
+       arch_cmpxchg_local(__ai_ptr, (old), (new));                     \
 })
 
-static __always_inline u64
-cmpxchg64_size(volatile u64 *ptr, u64 old, u64 new)
-{
-       kasan_check_write(ptr, sizeof(*ptr));
-       return arch_cmpxchg64(ptr, old, new);
-}
-
 #define cmpxchg64(ptr, old, new)                                       \
 ({                                                                     \
-       ((__typeof__(*(ptr)))cmpxchg64_size((ptr), (u64)(old),          \
-               (u64)(new)));                                           \
+       typeof(ptr) __ai_ptr = (ptr);                                   \
+       kasan_check_write(__ai_ptr, sizeof(*__ai_ptr));                 \
+       arch_cmpxchg64(__ai_ptr, (old), (new));                         \
 })
 
-static __always_inline u64
-cmpxchg64_local_size(volatile u64 *ptr, u64 old, u64 new)
-{
-       kasan_check_write(ptr, sizeof(*ptr));
-       return arch_cmpxchg64_local(ptr, old, new);
-}
-
 #define cmpxchg64_local(ptr, old, new)                                 \
 ({                                                                     \
-       ((__typeof__(*(ptr)))cmpxchg64_local_size((ptr), (u64)(old),    \
-               (u64)(new)));                                           \
+       typeof(ptr) __ai_ptr = (ptr);                                   \
+       kasan_check_write(__ai_ptr, sizeof(*__ai_ptr));                 \
+       arch_cmpxchg64_local(__ai_ptr, (old), (new));                   \
 })
 
-/*
- * Originally we had the following code here:
- *     __typeof__(p1) ____p1 = (p1);
- *     kasan_check_write(____p1, 2 * sizeof(*____p1));
- *     arch_cmpxchg_double(____p1, (p2), (o1), (o2), (n1), (n2));
- * But it leads to compilation failures (see gcc issue 72873).
- * So for now it's left non-instrumented.
- * There are few callers of cmpxchg_double(), so it's not critical.
- */
 #define cmpxchg_double(p1, p2, o1, o2, n1, n2)                         \
 ({                                                                     \
-       arch_cmpxchg_double((p1), (p2), (o1), (o2), (n1), (n2));        \
+       typeof(p1) __ai_p1 = (p1);                                      \
+       kasan_check_write(__ai_p1, 2 * sizeof(*__ai_p1));               \
+       arch_cmpxchg_double(__ai_p1, (p2), (o1), (o2), (n1), (n2));     \
 })
 
-#define cmpxchg_double_local(p1, p2, o1, o2, n1, n2)                   \
-({                                                                     \
-       arch_cmpxchg_double_local((p1), (p2), (o1), (o2), (n1), (n2));  \
+#define cmpxchg_double_local(p1, p2, o1, o2, n1, n2)                           \
+({                                                                             \
+       typeof(p1) __ai_p1 = (p1);                                              \
+       kasan_check_write(__ai_p1, 2 * sizeof(*__ai_p1));                       \
+       arch_cmpxchg_double_local(__ai_p1, (p2), (o1), (o2), (n1), (n2));       \
 })
 
 #endif /* _LINUX_ATOMIC_INSTRUMENTED_H */
index abe6dd9ca2a86578de4fa87b70d82bb79adc0b77..13324aa828eba12ade4e48b57d54aa956adfc183 100644 (file)
@@ -186,11 +186,6 @@ ATOMIC_OP(xor, ^)
 
 #include <linux/irqflags.h>
 
-static inline int atomic_add_negative(int i, atomic_t *v)
-{
-       return atomic_add_return(i, v) < 0;
-}
-
 static inline void atomic_add(int i, atomic_t *v)
 {
        atomic_add_return(i, v);
@@ -201,35 +196,7 @@ static inline void atomic_sub(int i, atomic_t *v)
        atomic_sub_return(i, v);
 }
 
-static inline void atomic_inc(atomic_t *v)
-{
-       atomic_add_return(1, v);
-}
-
-static inline void atomic_dec(atomic_t *v)
-{
-       atomic_sub_return(1, v);
-}
-
-#define atomic_dec_return(v)           atomic_sub_return(1, (v))
-#define atomic_inc_return(v)           atomic_add_return(1, (v))
-
-#define atomic_sub_and_test(i, v)      (atomic_sub_return((i), (v)) == 0)
-#define atomic_dec_and_test(v)         (atomic_dec_return(v) == 0)
-#define atomic_inc_and_test(v)         (atomic_inc_return(v) == 0)
-
 #define atomic_xchg(ptr, v)            (xchg(&(ptr)->counter, (v)))
 #define atomic_cmpxchg(v, old, new)    (cmpxchg(&((v)->counter), (old), (new)))
 
-#ifndef __atomic_add_unless
-static inline int __atomic_add_unless(atomic_t *v, int a, int u)
-{
-       int c, old;
-       c = atomic_read(v);
-       while (c != u && (old = atomic_cmpxchg(v, c, c + a)) != c)
-               c = old;
-       return c;
-}
-#endif
-
 #endif /* __ASM_GENERIC_ATOMIC_H */
index 8d28eb010d0d11a2f35f769f7ef51cd753a42a94..97b28b7f1f29a6ab34f8b81d030ff95ab23e4ffb 100644 (file)
@@ -11,6 +11,7 @@
  */
 #ifndef _ASM_GENERIC_ATOMIC64_H
 #define _ASM_GENERIC_ATOMIC64_H
+#include <linux/types.h>
 
 typedef struct {
        long long counter;
@@ -50,18 +51,10 @@ ATOMIC64_OPS(xor)
 #undef ATOMIC64_OP
 
 extern long long atomic64_dec_if_positive(atomic64_t *v);
+#define atomic64_dec_if_positive atomic64_dec_if_positive
 extern long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n);
 extern long long atomic64_xchg(atomic64_t *v, long long new);
-extern int      atomic64_add_unless(atomic64_t *v, long long a, long long u);
-
-#define atomic64_add_negative(a, v)    (atomic64_add_return((a), (v)) < 0)
-#define atomic64_inc(v)                        atomic64_add(1LL, (v))
-#define atomic64_inc_return(v)         atomic64_add_return(1LL, (v))
-#define atomic64_inc_and_test(v)       (atomic64_inc_return(v) == 0)
-#define atomic64_sub_and_test(a, v)    (atomic64_sub_return((a), (v)) == 0)
-#define atomic64_dec(v)                        atomic64_sub(1LL, (v))
-#define atomic64_dec_return(v)         atomic64_sub_return(1LL, (v))
-#define atomic64_dec_and_test(v)       (atomic64_dec_return((v)) == 0)
-#define atomic64_inc_not_zero(v)       atomic64_add_unless((v), 1LL, 0LL)
+extern long long atomic64_fetch_add_unless(atomic64_t *v, long long a, long long u);
+#define atomic64_fetch_add_unless atomic64_fetch_add_unless
 
 #endif  /*  _ASM_GENERIC_ATOMIC64_H  */
index 04deffaf5f7d416f7b0892b206856bd9e9c849db..dd90c9792909d1db39c2a8a52cc78216a034f1ba 100644 (file)
 #ifndef _ASM_GENERIC_BITOPS_ATOMIC_H_
 #define _ASM_GENERIC_BITOPS_ATOMIC_H_
 
-#include <asm/types.h>
-#include <linux/irqflags.h>
-
-#ifdef CONFIG_SMP
-#include <asm/spinlock.h>
-#include <asm/cache.h>         /* we use L1_CACHE_BYTES */
-
-/* Use an array of spinlocks for our atomic_ts.
- * Hash function to index into a different SPINLOCK.
- * Since "a" is usually an address, use one spinlock per cacheline.
- */
-#  define ATOMIC_HASH_SIZE 4
-#  define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) a)/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ]))
-
-extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
-
-/* Can't use raw_spin_lock_irq because of #include problems, so
- * this is the substitute */
-#define _atomic_spin_lock_irqsave(l,f) do {    \
-       arch_spinlock_t *s = ATOMIC_HASH(l);    \
-       local_irq_save(f);                      \
-       arch_spin_lock(s);                      \
-} while(0)
-
-#define _atomic_spin_unlock_irqrestore(l,f) do {       \
-       arch_spinlock_t *s = ATOMIC_HASH(l);            \
-       arch_spin_unlock(s);                            \
-       local_irq_restore(f);                           \
-} while(0)
-
-
-#else
-#  define _atomic_spin_lock_irqsave(l,f) do { local_irq_save(f); } while (0)
-#  define _atomic_spin_unlock_irqrestore(l,f) do { local_irq_restore(f); } while (0)
-#endif
+#include <linux/atomic.h>
+#include <linux/compiler.h>
+#include <asm/barrier.h>
 
 /*
- * NMI events can occur at any time, including when interrupts have been
- * disabled by *_irqsave().  So you can get NMI events occurring while a
- * *_bit function is holding a spin lock.  If the NMI handler also wants
- * to do bit manipulation (and they do) then you can get a deadlock
- * between the original caller of *_bit() and the NMI handler.
- *
- * by Keith Owens
+ * Implementation of atomic bitops using atomic-fetch ops.
+ * See Documentation/atomic_bitops.txt for details.
  */
 
-/**
- * set_bit - Atomically set a bit in memory
- * @nr: the bit to set
- * @addr: the address to start counting from
- *
- * This function is atomic and may not be reordered.  See __set_bit()
- * if you do not require the atomic guarantees.
- *
- * Note: there are no guarantees that this function will not be reordered
- * on non x86 architectures, so if you are writing portable code,
- * make sure not to rely on its reordering guarantees.
- *
- * Note that @nr may be almost arbitrarily large; this function is not
- * restricted to acting on a single-word quantity.
- */
-static inline void set_bit(int nr, volatile unsigned long *addr)
+static inline void set_bit(unsigned int nr, volatile unsigned long *p)
 {
-       unsigned long mask = BIT_MASK(nr);
-       unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
-       unsigned long flags;
-
-       _atomic_spin_lock_irqsave(p, flags);
-       *p  |= mask;
-       _atomic_spin_unlock_irqrestore(p, flags);
+       p += BIT_WORD(nr);
+       atomic_long_or(BIT_MASK(nr), (atomic_long_t *)p);
 }
 
-/**
- * clear_bit - Clears a bit in memory
- * @nr: Bit to clear
- * @addr: Address to start counting from
- *
- * clear_bit() is atomic and may not be reordered.  However, it does
- * not contain a memory barrier, so if it is used for locking purposes,
- * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic()
- * in order to ensure changes are visible on other processors.
- */
-static inline void clear_bit(int nr, volatile unsigned long *addr)
+static inline void clear_bit(unsigned int nr, volatile unsigned long *p)
 {
-       unsigned long mask = BIT_MASK(nr);
-       unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
-       unsigned long flags;
-
-       _atomic_spin_lock_irqsave(p, flags);
-       *p &= ~mask;
-       _atomic_spin_unlock_irqrestore(p, flags);
+       p += BIT_WORD(nr);
+       atomic_long_andnot(BIT_MASK(nr), (atomic_long_t *)p);
 }
 
-/**
- * change_bit - Toggle a bit in memory
- * @nr: Bit to change
- * @addr: Address to start counting from
- *
- * change_bit() is atomic and may not be reordered. It may be
- * reordered on other architectures than x86.
- * Note that @nr may be almost arbitrarily large; this function is not
- * restricted to acting on a single-word quantity.
- */
-static inline void change_bit(int nr, volatile unsigned long *addr)
+static inline void change_bit(unsigned int nr, volatile unsigned long *p)
 {
-       unsigned long mask = BIT_MASK(nr);
-       unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
-       unsigned long flags;
-
-       _atomic_spin_lock_irqsave(p, flags);
-       *p ^= mask;
-       _atomic_spin_unlock_irqrestore(p, flags);
+       p += BIT_WORD(nr);
+       atomic_long_xor(BIT_MASK(nr), (atomic_long_t *)p);
 }
 
-/**
- * test_and_set_bit - Set a bit and return its old value
- * @nr: Bit to set
- * @addr: Address to count from
- *
- * This operation is atomic and cannot be reordered.
- * It may be reordered on other architectures than x86.
- * It also implies a memory barrier.
- */
-static inline int test_and_set_bit(int nr, volatile unsigned long *addr)
+static inline int test_and_set_bit(unsigned int nr, volatile unsigned long *p)
 {
+       long old;
        unsigned long mask = BIT_MASK(nr);
-       unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
-       unsigned long old;
-       unsigned long flags;
 
-       _atomic_spin_lock_irqsave(p, flags);
-       old = *p;
-       *p = old | mask;
-       _atomic_spin_unlock_irqrestore(p, flags);
+       p += BIT_WORD(nr);
+       if (READ_ONCE(*p) & mask)
+               return 1;
 
-       return (old & mask) != 0;
+       old = atomic_long_fetch_or(mask, (atomic_long_t *)p);
+       return !!(old & mask);
 }
 
-/**
- * test_and_clear_bit - Clear a bit and return its old value
- * @nr: Bit to clear
- * @addr: Address to count from
- *
- * This operation is atomic and cannot be reordered.
- * It can be reorderdered on other architectures other than x86.
- * It also implies a memory barrier.
- */
-static inline int test_and_clear_bit(int nr, volatile unsigned long *addr)
+static inline int test_and_clear_bit(unsigned int nr, volatile unsigned long *p)
 {
+       long old;
        unsigned long mask = BIT_MASK(nr);
-       unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
-       unsigned long old;
-       unsigned long flags;
 
-       _atomic_spin_lock_irqsave(p, flags);
-       old = *p;
-       *p = old & ~mask;
-       _atomic_spin_unlock_irqrestore(p, flags);
+       p += BIT_WORD(nr);
+       if (!(READ_ONCE(*p) & mask))
+               return 0;
 
-       return (old & mask) != 0;
+       old = atomic_long_fetch_andnot(mask, (atomic_long_t *)p);
+       return !!(old & mask);
 }
 
-/**
- * test_and_change_bit - Change a bit and return its old value
- * @nr: Bit to change
- * @addr: Address to count from
- *
- * This operation is atomic and cannot be reordered.
- * It also implies a memory barrier.
- */
-static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
+static inline int test_and_change_bit(unsigned int nr, volatile unsigned long *p)
 {
+       long old;
        unsigned long mask = BIT_MASK(nr);
-       unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
-       unsigned long old;
-       unsigned long flags;
-
-       _atomic_spin_lock_irqsave(p, flags);
-       old = *p;
-       *p = old ^ mask;
-       _atomic_spin_unlock_irqrestore(p, flags);
 
-       return (old & mask) != 0;
+       p += BIT_WORD(nr);
+       old = atomic_long_fetch_xor(mask, (atomic_long_t *)p);
+       return !!(old & mask);
 }
 
 #endif /* _ASM_GENERIC_BITOPS_ATOMIC_H */
index 67ab280ad13401088a2fb3426e9867f03bd51d26..3ae021368f4854feaee22d8e486a1c60c64cc6d9 100644 (file)
@@ -2,6 +2,10 @@
 #ifndef _ASM_GENERIC_BITOPS_LOCK_H_
 #define _ASM_GENERIC_BITOPS_LOCK_H_
 
+#include <linux/atomic.h>
+#include <linux/compiler.h>
+#include <asm/barrier.h>
+
 /**
  * test_and_set_bit_lock - Set a bit and return its old value, for lock
  * @nr: Bit to set
  * the returned value is 0.
  * It can be used to implement bit locks.
  */
-#define test_and_set_bit_lock(nr, addr)        test_and_set_bit(nr, addr)
+static inline int test_and_set_bit_lock(unsigned int nr,
+                                       volatile unsigned long *p)
+{
+       long old;
+       unsigned long mask = BIT_MASK(nr);
+
+       p += BIT_WORD(nr);
+       if (READ_ONCE(*p) & mask)
+               return 1;
+
+       old = atomic_long_fetch_or_acquire(mask, (atomic_long_t *)p);
+       return !!(old & mask);
+}
+
 
 /**
  * clear_bit_unlock - Clear a bit in memory, for unlock
  *
  * This operation is atomic and provides release barrier semantics.
  */
-#define clear_bit_unlock(nr, addr)     \
-do {                                   \
-       smp_mb__before_atomic();        \
-       clear_bit(nr, addr);            \
-} while (0)
+static inline void clear_bit_unlock(unsigned int nr, volatile unsigned long *p)
+{
+       p += BIT_WORD(nr);
+       atomic_long_fetch_andnot_release(BIT_MASK(nr), (atomic_long_t *)p);
+}
 
 /**
  * __clear_bit_unlock - Clear a bit in memory, for unlock
@@ -37,11 +54,38 @@ do {                                        \
  *
  * See for example x86's implementation.
  */
-#define __clear_bit_unlock(nr, addr)   \
-do {                                   \
-       smp_mb__before_atomic();        \
-       clear_bit(nr, addr);            \
-} while (0)
+static inline void __clear_bit_unlock(unsigned int nr,
+                                     volatile unsigned long *p)
+{
+       unsigned long old;
 
-#endif /* _ASM_GENERIC_BITOPS_LOCK_H_ */
+       p += BIT_WORD(nr);
+       old = READ_ONCE(*p);
+       old &= ~BIT_MASK(nr);
+       atomic_long_set_release((atomic_long_t *)p, old);
+}
+
+/**
+ * clear_bit_unlock_is_negative_byte - Clear a bit in memory and test if bottom
+ *                                     byte is negative, for unlock.
+ * @nr: the bit to clear
+ * @addr: the address to start counting from
+ *
+ * This is a bit of a one-trick-pony for the filemap code, which clears
+ * PG_locked and tests PG_waiters,
+ */
+#ifndef clear_bit_unlock_is_negative_byte
+static inline bool clear_bit_unlock_is_negative_byte(unsigned int nr,
+                                                    volatile unsigned long *p)
+{
+       long old;
+       unsigned long mask = BIT_MASK(nr);
+
+       p += BIT_WORD(nr);
+       old = atomic_long_fetch_andnot_release(mask, (atomic_long_t *)p);
+       return !!(old & BIT(7));
+}
+#define clear_bit_unlock_is_negative_byte clear_bit_unlock_is_negative_byte
+#endif
 
+#endif /* _ASM_GENERIC_BITOPS_LOCK_H_ */
index f59639afaa3908dbcaa06cec9defc8837cba09ee..b081794ba135730fb55aebdb4a709b9c9ea1f010 100644 (file)
@@ -1019,8 +1019,8 @@ int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot);
 int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot);
 int pud_clear_huge(pud_t *pud);
 int pmd_clear_huge(pmd_t *pmd);
-int pud_free_pmd_page(pud_t *pud);
-int pmd_free_pte_page(pmd_t *pmd);
+int pud_free_pmd_page(pud_t *pud, unsigned long addr);
+int pmd_free_pte_page(pmd_t *pmd, unsigned long addr);
 #else  /* !CONFIG_HAVE_ARCH_HUGE_VMAP */
 static inline int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot)
 {
@@ -1046,11 +1046,11 @@ static inline int pmd_clear_huge(pmd_t *pmd)
 {
        return 0;
 }
-static inline int pud_free_pmd_page(pud_t *pud)
+static inline int pud_free_pmd_page(pud_t *pud, unsigned long addr)
 {
        return 0;
 }
-static inline int pmd_free_pte_page(pmd_t *pmd)
+static inline int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
 {
        return 0;
 }
index 0763f065b975a543fb0e887d4af8d63bf7354f05..d10f1e7d6ba8c37140ae9332b59399baae4ffdf1 100644 (file)
@@ -63,7 +63,7 @@ typedef struct qspinlock {
 /*
  * Initializier
  */
-#define        __ARCH_SPIN_LOCK_UNLOCKED       { .val = ATOMIC_INIT(0) }
+#define        __ARCH_SPIN_LOCK_UNLOCKED       { { .val = ATOMIC_INIT(0) } }
 
 /*
  * Bitfields in the atomic value:
index faddde44de8c902e6884e64eeb8b22bd0d11b75a..e811ef7b835006e232bad707f02fd5dfdc7e0691 100644 (file)
@@ -265,34 +265,52 @@ static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
  * For now w.r.t page table cache, mark the range_size as PAGE_SIZE
  */
 
+#ifndef pte_free_tlb
 #define pte_free_tlb(tlb, ptep, address)                       \
        do {                                                    \
                __tlb_adjust_range(tlb, address, PAGE_SIZE);    \
                __pte_free_tlb(tlb, ptep, address);             \
        } while (0)
+#endif
 
+#ifndef pmd_free_tlb
 #define pmd_free_tlb(tlb, pmdp, address)                       \
        do {                                                    \
                __tlb_adjust_range(tlb, address, PAGE_SIZE);            \
                __pmd_free_tlb(tlb, pmdp, address);             \
        } while (0)
+#endif
 
 #ifndef __ARCH_HAS_4LEVEL_HACK
+#ifndef pud_free_tlb
 #define pud_free_tlb(tlb, pudp, address)                       \
        do {                                                    \
                __tlb_adjust_range(tlb, address, PAGE_SIZE);    \
                __pud_free_tlb(tlb, pudp, address);             \
        } while (0)
 #endif
+#endif
 
 #ifndef __ARCH_HAS_5LEVEL_HACK
+#ifndef p4d_free_tlb
 #define p4d_free_tlb(tlb, pudp, address)                       \
        do {                                                    \
                __tlb_adjust_range(tlb, address, PAGE_SIZE);            \
                __p4d_free_tlb(tlb, pudp, address);             \
        } while (0)
 #endif
+#endif
 
 #define tlb_migrate_finish(mm) do {} while (0)
 
+/*
+ * Used to flush the TLB when page tables are removed, when lazy
+ * TLB mode may cause a CPU to retain intermediate translations
+ * pointing to about-to-be-freed page table memory.
+ */
+#ifndef HAVE_TLB_FLUSH_REMOVE_TABLES
+#define tlb_flush_remove_tables(mm) do {} while (0)
+#define tlb_flush_remove_tables_local(mm) do {} while (0)
+#endif
+
 #endif /* _ASM_GENERIC__TLB_H */
index cc414db9da0ad6758f696d0de2a251ce99d8d301..482461d8931d9186c4a11b7b2d9a24f981a595bc 100644 (file)
@@ -245,7 +245,8 @@ ssize_t af_alg_sendpage(struct socket *sock, struct page *page,
                        int offset, size_t size, int flags);
 void af_alg_free_resources(struct af_alg_async_req *areq);
 void af_alg_async_cb(struct crypto_async_request *_req, int err);
-__poll_t af_alg_poll_mask(struct socket *sock, __poll_t events);
+__poll_t af_alg_poll(struct file *file, struct socket *sock,
+                        poll_table *wait);
 struct af_alg_async_req *af_alg_alloc_areq(struct sock *sk,
                                           unsigned int areqlen);
 int af_alg_get_rsgl(struct sock *sk, struct msghdr *msg, int flags,
index 9564597cbfac59aa1837d4de80a2f1c18d64cd7f..0aa1d9c3e0b968479af93c764b438f9ae35bece3 100644 (file)
 #define IMX6UL_CLK_CSI_PODF            222
 #define IMX6UL_CLK_PLL3_120M           223
 #define IMX6UL_CLK_KPP                 224
-#define IMX6UL_CLK_CKO1_SEL            225
-#define IMX6UL_CLK_CKO1_PODF           226
-#define IMX6UL_CLK_CKO1                        227
-#define IMX6UL_CLK_CKO2_SEL            228
-#define IMX6UL_CLK_CKO2_PODF           229
-#define IMX6UL_CLK_CKO2                        230
-#define IMX6UL_CLK_CKO                 231
-
-/* For i.MX6ULL */
-#define IMX6ULL_CLK_ESAI_PRED          232
-#define IMX6ULL_CLK_ESAI_PODF          233
-#define IMX6ULL_CLK_ESAI_EXTAL         234
-#define IMX6ULL_CLK_ESAI_MEM           235
-#define IMX6ULL_CLK_ESAI_IPG           236
-#define IMX6ULL_CLK_DCP_CLK            237
-#define IMX6ULL_CLK_EPDC_PRE_SEL       238
-#define IMX6ULL_CLK_EPDC_SEL           239
-#define IMX6ULL_CLK_EPDC_PODF          240
-#define IMX6ULL_CLK_EPDC_ACLK          241
-#define IMX6ULL_CLK_EPDC_PIX           242
-#define IMX6ULL_CLK_ESAI_SEL           243
+#define IMX6ULL_CLK_ESAI_PRED          225
+#define IMX6ULL_CLK_ESAI_PODF          226
+#define IMX6ULL_CLK_ESAI_EXTAL         227
+#define IMX6ULL_CLK_ESAI_MEM           228
+#define IMX6ULL_CLK_ESAI_IPG           229
+#define IMX6ULL_CLK_DCP_CLK            230
+#define IMX6ULL_CLK_EPDC_PRE_SEL       231
+#define IMX6ULL_CLK_EPDC_SEL           232
+#define IMX6ULL_CLK_EPDC_PODF          233
+#define IMX6ULL_CLK_EPDC_ACLK          234
+#define IMX6ULL_CLK_EPDC_PIX           235
+#define IMX6ULL_CLK_ESAI_SEL           236
+#define IMX6UL_CLK_CKO1_SEL            237
+#define IMX6UL_CLK_CKO1_PODF           238
+#define IMX6UL_CLK_CKO1                        239
+#define IMX6UL_CLK_CKO2_SEL            240
+#define IMX6UL_CLK_CKO2_PODF           241
+#define IMX6UL_CLK_CKO2                        242
+#define IMX6UL_CLK_CKO                 243
 #define IMX6UL_CLK_END                 244
 
 #endif /* __DT_BINDINGS_CLOCK_IMX6UL_H */
index 4b35a66383f983f5594f3b71885145d6b1b101ef..e54f40974eb04ca516987ac3df89b0997b5ca0dd 100644 (file)
@@ -443,6 +443,9 @@ int acpi_check_resource_conflict(const struct resource *res);
 int acpi_check_region(resource_size_t start, resource_size_t n,
                      const char *name);
 
+acpi_status acpi_release_memory(acpi_handle handle, struct resource *res,
+                               u32 level);
+
 int acpi_resources_are_enforced(void);
 
 #ifdef CONFIG_HIBERNATION
index 0c27515d2cf6db3683da2341a700283f82a99645..8124815eb1218b5653572fc4a04f5d4d734e3469 100644 (file)
@@ -214,6 +214,7 @@ struct atmphy_ops {
 struct atm_skb_data {
        struct atm_vcc  *vcc;           /* ATM VCC */
        unsigned long   atm_options;    /* ATM layer options */
+       unsigned int    acct_truesize;  /* truesize accounted to vcc */
 };
 
 #define VCC_HTABLE_SIZE 32
@@ -241,6 +242,20 @@ void vcc_insert_socket(struct sock *sk);
 
 void atm_dev_release_vccs(struct atm_dev *dev);
 
+static inline void atm_account_tx(struct atm_vcc *vcc, struct sk_buff *skb)
+{
+       /*
+        * Because ATM skbs may not belong to a sock (and we don't
+        * necessarily want to), skb->truesize may be adjusted,
+        * escaping the hack in pskb_expand_head() which avoids
+        * doing so for some cases. So stash the value of truesize
+        * at the time we accounted it, and atm_pop_raw() can use
+        * that value later, in case it changes.
+        */
+       refcount_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
+       ATM_SKB(skb)->acct_truesize = skb->truesize;
+       ATM_SKB(skb)->atm_options = vcc->atm_options;
+}
 
 static inline void atm_force_charge(struct atm_vcc *vcc,int truesize)
 {
index 01ce3997cb4237b40fba2cbe8af1b69d6bc33fed..1e8e88bdaf09b8b8a015a08b264744956411e72c 100644 (file)
@@ -2,6 +2,8 @@
 /* Atomic operations usable in machine independent code */
 #ifndef _LINUX_ATOMIC_H
 #define _LINUX_ATOMIC_H
+#include <linux/types.h>
+
 #include <asm/atomic.h>
 #include <asm/barrier.h>
 
  * barriers on top of the relaxed variant. In the case where the relaxed
  * variant is already fully ordered, no additional barriers are needed.
  *
- * Besides, if an arch has a special barrier for acquire/release, it could
- * implement its own __atomic_op_* and use the same framework for building
- * variants
- *
- * If an architecture overrides __atomic_op_acquire() it will probably want
- * to define smp_mb__after_spinlock().
+ * If an architecture overrides __atomic_acquire_fence() it will probably
+ * want to define smp_mb__after_spinlock().
  */
-#ifndef __atomic_op_acquire
+#ifndef __atomic_acquire_fence
+#define __atomic_acquire_fence         smp_mb__after_atomic
+#endif
+
+#ifndef __atomic_release_fence
+#define __atomic_release_fence         smp_mb__before_atomic
+#endif
+
+#ifndef __atomic_pre_full_fence
+#define __atomic_pre_full_fence                smp_mb__before_atomic
+#endif
+
+#ifndef __atomic_post_full_fence
+#define __atomic_post_full_fence       smp_mb__after_atomic
+#endif
+
 #define __atomic_op_acquire(op, args...)                               \
 ({                                                                     \
        typeof(op##_relaxed(args)) __ret  = op##_relaxed(args);         \
-       smp_mb__after_atomic();                                         \
+       __atomic_acquire_fence();                                       \
        __ret;                                                          \
 })
-#endif
 
-#ifndef __atomic_op_release
 #define __atomic_op_release(op, args...)                               \
 ({                                                                     \
-       smp_mb__before_atomic();                                        \
+       __atomic_release_fence();                                       \
        op##_relaxed(args);                                             \
 })
-#endif
 
-#ifndef __atomic_op_fence
 #define __atomic_op_fence(op, args...)                                 \
 ({                                                                     \
        typeof(op##_relaxed(args)) __ret;                               \
-       smp_mb__before_atomic();                                        \
+       __atomic_pre_full_fence();                                      \
        __ret = op##_relaxed(args);                                     \
-       smp_mb__after_atomic();                                         \
+       __atomic_post_full_fence();                                     \
        __ret;                                                          \
 })
-#endif
 
 /* atomic_add_return_relaxed */
 #ifndef atomic_add_return_relaxed
 #endif
 #endif /* atomic_add_return_relaxed */
 
+#ifndef atomic_inc
+#define atomic_inc(v)                  atomic_add(1, (v))
+#endif
+
 /* atomic_inc_return_relaxed */
 #ifndef atomic_inc_return_relaxed
+
+#ifndef atomic_inc_return
+#define atomic_inc_return(v)           atomic_add_return(1, (v))
+#define atomic_inc_return_relaxed(v)   atomic_add_return_relaxed(1, (v))
+#define atomic_inc_return_acquire(v)   atomic_add_return_acquire(1, (v))
+#define atomic_inc_return_release(v)   atomic_add_return_release(1, (v))
+#else /* atomic_inc_return */
 #define  atomic_inc_return_relaxed     atomic_inc_return
 #define  atomic_inc_return_acquire     atomic_inc_return
 #define  atomic_inc_return_release     atomic_inc_return
+#endif /* atomic_inc_return */
 
 #else /* atomic_inc_return_relaxed */
 
 #endif
 #endif /* atomic_sub_return_relaxed */
 
+#ifndef atomic_dec
+#define atomic_dec(v)                  atomic_sub(1, (v))
+#endif
+
 /* atomic_dec_return_relaxed */
 #ifndef atomic_dec_return_relaxed
+
+#ifndef atomic_dec_return
+#define atomic_dec_return(v)           atomic_sub_return(1, (v))
+#define atomic_dec_return_relaxed(v)   atomic_sub_return_relaxed(1, (v))
+#define atomic_dec_return_acquire(v)   atomic_sub_return_acquire(1, (v))
+#define atomic_dec_return_release(v)   atomic_sub_return_release(1, (v))
+#else /* atomic_dec_return */
 #define  atomic_dec_return_relaxed     atomic_dec_return
 #define  atomic_dec_return_acquire     atomic_dec_return
 #define  atomic_dec_return_release     atomic_dec_return
+#endif /* atomic_dec_return */
 
 #else /* atomic_dec_return_relaxed */
 
 #endif
 #endif /* atomic_fetch_and_relaxed */
 
-#ifdef atomic_andnot
-/* atomic_fetch_andnot_relaxed */
+#ifndef atomic_andnot
+#define atomic_andnot(i, v)            atomic_and(~(int)(i), (v))
+#endif
+
 #ifndef atomic_fetch_andnot_relaxed
-#define atomic_fetch_andnot_relaxed    atomic_fetch_andnot
-#define atomic_fetch_andnot_acquire    atomic_fetch_andnot
-#define atomic_fetch_andnot_release    atomic_fetch_andnot
+
+#ifndef atomic_fetch_andnot
+#define atomic_fetch_andnot(i, v)              atomic_fetch_and(~(int)(i), (v))
+#define atomic_fetch_andnot_relaxed(i, v)      atomic_fetch_and_relaxed(~(int)(i), (v))
+#define atomic_fetch_andnot_acquire(i, v)      atomic_fetch_and_acquire(~(int)(i), (v))
+#define atomic_fetch_andnot_release(i, v)      atomic_fetch_and_release(~(int)(i), (v))
+#else /* atomic_fetch_andnot */
+#define atomic_fetch_andnot_relaxed            atomic_fetch_andnot
+#define atomic_fetch_andnot_acquire            atomic_fetch_andnot
+#define atomic_fetch_andnot_release            atomic_fetch_andnot
+#endif /* atomic_fetch_andnot */
 
 #else /* atomic_fetch_andnot_relaxed */
 
        __atomic_op_fence(atomic_fetch_andnot, __VA_ARGS__)
 #endif
 #endif /* atomic_fetch_andnot_relaxed */
-#endif /* atomic_andnot */
 
 /* atomic_fetch_xor_relaxed */
 #ifndef atomic_fetch_xor_relaxed
 #endif
 #endif /* xchg_relaxed */
 
+/**
+ * atomic_fetch_add_unless - add unless the number is already a given value
+ * @v: pointer of type atomic_t
+ * @a: the amount to add to v...
+ * @u: ...unless v is equal to u.
+ *
+ * Atomically adds @a to @v, if @v was not already @u.
+ * Returns the original value of @v.
+ */
+#ifndef atomic_fetch_add_unless
+static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
+{
+       int c = atomic_read(v);
+
+       do {
+               if (unlikely(c == u))
+                       break;
+       } while (!atomic_try_cmpxchg(v, &c, c + a));
+
+       return c;
+}
+#endif
+
 /**
  * atomic_add_unless - add unless the number is already a given value
  * @v: pointer of type atomic_t
  * @a: the amount to add to v...
  * @u: ...unless v is equal to u.
  *
- * Atomically adds @a to @v, so long as @v was not already @u.
- * Returns non-zero if @v was not @u, and zero otherwise.
+ * Atomically adds @a to @v, if @v was not already @u.
+ * Returns true if the addition was done.
  */
-static inline int atomic_add_unless(atomic_t *v, int a, int u)
+static inline bool atomic_add_unless(atomic_t *v, int a, int u)
 {
-       return __atomic_add_unless(v, a, u) != u;
+       return atomic_fetch_add_unless(v, a, u) != u;
 }
 
 /**
  * atomic_inc_not_zero - increment unless the number is zero
  * @v: pointer of type atomic_t
  *
- * Atomically increments @v by 1, so long as @v is non-zero.
- * Returns non-zero if @v was non-zero, and zero otherwise.
+ * Atomically increments @v by 1, if @v is non-zero.
+ * Returns true if the increment was done.
  */
 #ifndef atomic_inc_not_zero
 #define atomic_inc_not_zero(v)         atomic_add_unless((v), 1, 0)
 #endif
 
-#ifndef atomic_andnot
-static inline void atomic_andnot(int i, atomic_t *v)
-{
-       atomic_and(~i, v);
-}
-
-static inline int atomic_fetch_andnot(int i, atomic_t *v)
-{
-       return atomic_fetch_and(~i, v);
-}
-
-static inline int atomic_fetch_andnot_relaxed(int i, atomic_t *v)
+/**
+ * atomic_inc_and_test - increment and test
+ * @v: pointer of type atomic_t
+ *
+ * Atomically increments @v by 1
+ * and returns true if the result is zero, or false for all
+ * other cases.
+ */
+#ifndef atomic_inc_and_test
+static inline bool atomic_inc_and_test(atomic_t *v)
 {
-       return atomic_fetch_and_relaxed(~i, v);
+       return atomic_inc_return(v) == 0;
 }
+#endif
 
-static inline int atomic_fetch_andnot_acquire(int i, atomic_t *v)
+/**
+ * atomic_dec_and_test - decrement and test
+ * @v: pointer of type atomic_t
+ *
+ * Atomically decrements @v by 1 and
+ * returns true if the result is 0, or false for all other
+ * cases.
+ */
+#ifndef atomic_dec_and_test
+static inline bool atomic_dec_and_test(atomic_t *v)
 {
-       return atomic_fetch_and_acquire(~i, v);
+       return atomic_dec_return(v) == 0;
 }
+#endif
 
-static inline int atomic_fetch_andnot_release(int i, atomic_t *v)
+/**
+ * atomic_sub_and_test - subtract value from variable and test result
+ * @i: integer value to subtract
+ * @v: pointer of type atomic_t
+ *
+ * Atomically subtracts @i from @v and returns
+ * true if the result is zero, or false for all
+ * other cases.
+ */
+#ifndef atomic_sub_and_test
+static inline bool atomic_sub_and_test(int i, atomic_t *v)
 {
-       return atomic_fetch_and_release(~i, v);
+       return atomic_sub_return(i, v) == 0;
 }
 #endif
 
 /**
- * atomic_inc_not_zero_hint - increment if not null
+ * atomic_add_negative - add and test if negative
+ * @i: integer value to add
  * @v: pointer of type atomic_t
- * @hint: probable value of the atomic before the increment
- *
- * This version of atomic_inc_not_zero() gives a hint of probable
- * value of the atomic. This helps processor to not read the memory
- * before doing the atomic read/modify/write cycle, lowering
- * number of bus transactions on some arches.
  *
- * Returns: 0 if increment was not done, 1 otherwise.
+ * Atomically adds @i to @v and returns true
+ * if the result is negative, or false when
+ * result is greater than or equal to zero.
  */
-#ifndef atomic_inc_not_zero_hint
-static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
+#ifndef atomic_add_negative
+static inline bool atomic_add_negative(int i, atomic_t *v)
 {
-       int val, c = hint;
-
-       /* sanity test, should be removed by compiler if hint is a constant */
-       if (!hint)
-               return atomic_inc_not_zero(v);
-
-       do {
-               val = atomic_cmpxchg(v, c, c + 1);
-               if (val == c)
-                       return 1;
-               c = val;
-       } while (c);
-
-       return 0;
+       return atomic_add_return(i, v) < 0;
 }
 #endif
 
 #ifndef atomic_inc_unless_negative
-static inline int atomic_inc_unless_negative(atomic_t *p)
+static inline bool atomic_inc_unless_negative(atomic_t *v)
 {
-       int v, v1;
-       for (v = 0; v >= 0; v = v1) {
-               v1 = atomic_cmpxchg(p, v, v + 1);
-               if (likely(v1 == v))
-                       return 1;
-       }
-       return 0;
+       int c = atomic_read(v);
+
+       do {
+               if (unlikely(c < 0))
+                       return false;
+       } while (!atomic_try_cmpxchg(v, &c, c + 1));
+
+       return true;
 }
 #endif
 
 #ifndef atomic_dec_unless_positive
-static inline int atomic_dec_unless_positive(atomic_t *p)
+static inline bool atomic_dec_unless_positive(atomic_t *v)
 {
-       int v, v1;
-       for (v = 0; v <= 0; v = v1) {
-               v1 = atomic_cmpxchg(p, v, v - 1);
-               if (likely(v1 == v))
-                       return 1;
-       }
-       return 0;
+       int c = atomic_read(v);
+
+       do {
+               if (unlikely(c > 0))
+                       return false;
+       } while (!atomic_try_cmpxchg(v, &c, c - 1));
+
+       return true;
 }
 #endif
 
@@ -639,17 +708,14 @@ static inline int atomic_dec_unless_positive(atomic_t *p)
 #ifndef atomic_dec_if_positive
 static inline int atomic_dec_if_positive(atomic_t *v)
 {
-       int c, old, dec;
-       c = atomic_read(v);
-       for (;;) {
+       int dec, c = atomic_read(v);
+
+       do {
                dec = c - 1;
                if (unlikely(dec < 0))
                        break;
-               old = atomic_cmpxchg((v), c, dec);
-               if (likely(old == c))
-                       break;
-               c = old;
-       }
+       } while (!atomic_try_cmpxchg(v, &c, dec));
+
        return dec;
 }
 #endif
@@ -693,11 +759,23 @@ static inline int atomic_dec_if_positive(atomic_t *v)
 #endif
 #endif /* atomic64_add_return_relaxed */
 
+#ifndef atomic64_inc
+#define atomic64_inc(v)                        atomic64_add(1, (v))
+#endif
+
 /* atomic64_inc_return_relaxed */
 #ifndef atomic64_inc_return_relaxed
+
+#ifndef atomic64_inc_return
+#define atomic64_inc_return(v)         atomic64_add_return(1, (v))
+#define atomic64_inc_return_relaxed(v) atomic64_add_return_relaxed(1, (v))
+#define atomic64_inc_return_acquire(v) atomic64_add_return_acquire(1, (v))
+#define atomic64_inc_return_release(v) atomic64_add_return_release(1, (v))
+#else /* atomic64_inc_return */
 #define  atomic64_inc_return_relaxed   atomic64_inc_return
 #define  atomic64_inc_return_acquire   atomic64_inc_return
 #define  atomic64_inc_return_release   atomic64_inc_return
+#endif /* atomic64_inc_return */
 
 #else /* atomic64_inc_return_relaxed */
 
@@ -742,11 +820,23 @@ static inline int atomic_dec_if_positive(atomic_t *v)
 #endif
 #endif /* atomic64_sub_return_relaxed */
 
+#ifndef atomic64_dec
+#define atomic64_dec(v)                        atomic64_sub(1, (v))
+#endif
+
 /* atomic64_dec_return_relaxed */
 #ifndef atomic64_dec_return_relaxed
+
+#ifndef atomic64_dec_return
+#define atomic64_dec_return(v)         atomic64_sub_return(1, (v))
+#define atomic64_dec_return_relaxed(v) atomic64_sub_return_relaxed(1, (v))
+#define atomic64_dec_return_acquire(v) atomic64_sub_return_acquire(1, (v))
+#define atomic64_dec_return_release(v) atomic64_sub_return_release(1, (v))
+#else /* atomic64_dec_return */
 #define  atomic64_dec_return_relaxed   atomic64_dec_return
 #define  atomic64_dec_return_acquire   atomic64_dec_return
 #define  atomic64_dec_return_release   atomic64_dec_return
+#endif /* atomic64_dec_return */
 
 #else /* atomic64_dec_return_relaxed */
 
@@ -927,12 +1017,22 @@ static inline int atomic_dec_if_positive(atomic_t *v)
 #endif
 #endif /* atomic64_fetch_and_relaxed */
 
-#ifdef atomic64_andnot
-/* atomic64_fetch_andnot_relaxed */
+#ifndef atomic64_andnot
+#define atomic64_andnot(i, v)          atomic64_and(~(long long)(i), (v))
+#endif
+
 #ifndef atomic64_fetch_andnot_relaxed
-#define atomic64_fetch_andnot_relaxed  atomic64_fetch_andnot
-#define atomic64_fetch_andnot_acquire  atomic64_fetch_andnot
-#define atomic64_fetch_andnot_release  atomic64_fetch_andnot
+
+#ifndef atomic64_fetch_andnot
+#define atomic64_fetch_andnot(i, v)            atomic64_fetch_and(~(long long)(i), (v))
+#define atomic64_fetch_andnot_relaxed(i, v)    atomic64_fetch_and_relaxed(~(long long)(i), (v))
+#define atomic64_fetch_andnot_acquire(i, v)    atomic64_fetch_and_acquire(~(long long)(i), (v))
+#define atomic64_fetch_andnot_release(i, v)    atomic64_fetch_and_release(~(long long)(i), (v))
+#else /* atomic64_fetch_andnot */
+#define atomic64_fetch_andnot_relaxed          atomic64_fetch_andnot
+#define atomic64_fetch_andnot_acquire          atomic64_fetch_andnot
+#define atomic64_fetch_andnot_release          atomic64_fetch_andnot
+#endif /* atomic64_fetch_andnot */
 
 #else /* atomic64_fetch_andnot_relaxed */
 
@@ -951,7 +1051,6 @@ static inline int atomic_dec_if_positive(atomic_t *v)
        __atomic_op_fence(atomic64_fetch_andnot, __VA_ARGS__)
 #endif
 #endif /* atomic64_fetch_andnot_relaxed */
-#endif /* atomic64_andnot */
 
 /* atomic64_fetch_xor_relaxed */
 #ifndef atomic64_fetch_xor_relaxed
@@ -1049,30 +1148,164 @@ static inline int atomic_dec_if_positive(atomic_t *v)
 #define atomic64_try_cmpxchg_release   atomic64_try_cmpxchg
 #endif /* atomic64_try_cmpxchg */
 
-#ifndef atomic64_andnot
-static inline void atomic64_andnot(long long i, atomic64_t *v)
+/**
+ * atomic64_fetch_add_unless - add unless the number is already a given value
+ * @v: pointer of type atomic64_t
+ * @a: the amount to add to v...
+ * @u: ...unless v is equal to u.
+ *
+ * Atomically adds @a to @v, if @v was not already @u.
+ * Returns the original value of @v.
+ */
+#ifndef atomic64_fetch_add_unless
+static inline long long atomic64_fetch_add_unless(atomic64_t *v, long long a,
+                                                 long long u)
 {
-       atomic64_and(~i, v);
+       long long c = atomic64_read(v);
+
+       do {
+               if (unlikely(c == u))
+                       break;
+       } while (!atomic64_try_cmpxchg(v, &c, c + a));
+
+       return c;
 }
+#endif
 
-static inline long long atomic64_fetch_andnot(long long i, atomic64_t *v)
+/**
+ * atomic64_add_unless - add unless the number is already a given value
+ * @v: pointer of type atomic_t
+ * @a: the amount to add to v...
+ * @u: ...unless v is equal to u.
+ *
+ * Atomically adds @a to @v, if @v was not already @u.
+ * Returns true if the addition was done.
+ */
+static inline bool atomic64_add_unless(atomic64_t *v, long long a, long long u)
 {
-       return atomic64_fetch_and(~i, v);
+       return atomic64_fetch_add_unless(v, a, u) != u;
 }
 
-static inline long long atomic64_fetch_andnot_relaxed(long long i, atomic64_t *v)
+/**
+ * atomic64_inc_not_zero - increment unless the number is zero
+ * @v: pointer of type atomic64_t
+ *
+ * Atomically increments @v by 1, if @v is non-zero.
+ * Returns true if the increment was done.
+ */
+#ifndef atomic64_inc_not_zero
+#define atomic64_inc_not_zero(v)       atomic64_add_unless((v), 1, 0)
+#endif
+
+/**
+ * atomic64_inc_and_test - increment and test
+ * @v: pointer of type atomic64_t
+ *
+ * Atomically increments @v by 1
+ * and returns true if the result is zero, or false for all
+ * other cases.
+ */
+#ifndef atomic64_inc_and_test
+static inline bool atomic64_inc_and_test(atomic64_t *v)
 {
-       return atomic64_fetch_and_relaxed(~i, v);
+       return atomic64_inc_return(v) == 0;
 }
+#endif
 
-static inline long long atomic64_fetch_andnot_acquire(long long i, atomic64_t *v)
+/**
+ * atomic64_dec_and_test - decrement and test
+ * @v: pointer of type atomic64_t
+ *
+ * Atomically decrements @v by 1 and
+ * returns true if the result is 0, or false for all other
+ * cases.
+ */
+#ifndef atomic64_dec_and_test
+static inline bool atomic64_dec_and_test(atomic64_t *v)
 {
-       return atomic64_fetch_and_acquire(~i, v);
+       return atomic64_dec_return(v) == 0;
 }
+#endif
 
-static inline long long atomic64_fetch_andnot_release(long long i, atomic64_t *v)
+/**
+ * atomic64_sub_and_test - subtract value from variable and test result
+ * @i: integer value to subtract
+ * @v: pointer of type atomic64_t
+ *
+ * Atomically subtracts @i from @v and returns
+ * true if the result is zero, or false for all
+ * other cases.
+ */
+#ifndef atomic64_sub_and_test
+static inline bool atomic64_sub_and_test(long long i, atomic64_t *v)
+{
+       return atomic64_sub_return(i, v) == 0;
+}
+#endif
+
+/**
+ * atomic64_add_negative - add and test if negative
+ * @i: integer value to add
+ * @v: pointer of type atomic64_t
+ *
+ * Atomically adds @i to @v and returns true
+ * if the result is negative, or false when
+ * result is greater than or equal to zero.
+ */
+#ifndef atomic64_add_negative
+static inline bool atomic64_add_negative(long long i, atomic64_t *v)
 {
-       return atomic64_fetch_and_release(~i, v);
+       return atomic64_add_return(i, v) < 0;
+}
+#endif
+
+#ifndef atomic64_inc_unless_negative
+static inline bool atomic64_inc_unless_negative(atomic64_t *v)
+{
+       long long c = atomic64_read(v);
+
+       do {
+               if (unlikely(c < 0))
+                       return false;
+       } while (!atomic64_try_cmpxchg(v, &c, c + 1));
+
+       return true;
+}
+#endif
+
+#ifndef atomic64_dec_unless_positive
+static inline bool atomic64_dec_unless_positive(atomic64_t *v)
+{
+       long long c = atomic64_read(v);
+
+       do {
+               if (unlikely(c > 0))
+                       return false;
+       } while (!atomic64_try_cmpxchg(v, &c, c - 1));
+
+       return true;
+}
+#endif
+
+/*
+ * atomic64_dec_if_positive - decrement by 1 if old value positive
+ * @v: pointer of type atomic64_t
+ *
+ * The function returns the old value of *v minus 1, even if
+ * the atomic64 variable, v, was not decremented.
+ */
+#ifndef atomic64_dec_if_positive
+static inline long long atomic64_dec_if_positive(atomic64_t *v)
+{
+       long long dec, c = atomic64_read(v);
+
+       do {
+               dec = c - 1;
+               if (unlikely(dec < 0))
+                       break;
+       } while (!atomic64_try_cmpxchg(v, &c, dec));
+
+       return dec;
 }
 #endif
 
index 0bd432a4d7bd00ce376292720edd104d617c80c2..24251762c20c94edd238cfca1c1f55f0269d4e80 100644 (file)
@@ -22,7 +22,6 @@ struct dentry;
  */
 enum wb_state {
        WB_registered,          /* bdi_register() was done */
-       WB_shutting_down,       /* wb_shutdown() in progress */
        WB_writeback_running,   /* Writeback is in progress */
        WB_has_dirty_io,        /* Dirty inodes on ->b_{dirty|io|more_io} */
        WB_start_all,           /* nr_pages == 0 (all) work pending */
@@ -189,6 +188,7 @@ struct backing_dev_info {
 #ifdef CONFIG_CGROUP_WRITEBACK
        struct radix_tree_root cgwb_tree; /* radix tree of active cgroup wbs */
        struct rb_root cgwb_congested_tree; /* their congested states */
+       struct mutex cgwb_release_mutex;  /* protect shutdown of wb structs */
 #else
        struct bdi_writeback_congested *wb_congested;
 #endif
index 4cac4e1a72ffc8d81f5c2acc6d88cea0c32117b2..af419012d77de428dd7df26c958001f615ab8c3f 100644 (file)
@@ -2,29 +2,9 @@
 #ifndef _LINUX_BITOPS_H
 #define _LINUX_BITOPS_H
 #include <asm/types.h>
+#include <linux/bits.h>
 
-#ifdef __KERNEL__
-#define BIT(nr)                        (1UL << (nr))
-#define BIT_ULL(nr)            (1ULL << (nr))
-#define BIT_MASK(nr)           (1UL << ((nr) % BITS_PER_LONG))
-#define BIT_WORD(nr)           ((nr) / BITS_PER_LONG)
-#define BIT_ULL_MASK(nr)       (1ULL << ((nr) % BITS_PER_LONG_LONG))
-#define BIT_ULL_WORD(nr)       ((nr) / BITS_PER_LONG_LONG)
-#define BITS_PER_BYTE          8
 #define BITS_TO_LONGS(nr)      DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long))
-#endif
-
-/*
- * Create a contiguous bitmask starting at bit position @l and ending at
- * position @h. For example
- * GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000.
- */
-#define GENMASK(h, l) \
-       (((~0UL) - (1UL << (l)) + 1) & (~0UL >> (BITS_PER_LONG - 1 - (h))))
-
-#define GENMASK_ULL(h, l) \
-       (((~0ULL) - (1ULL << (l)) + 1) & \
-        (~0ULL >> (BITS_PER_LONG_LONG - 1 - (h))))
 
 extern unsigned int __sw_hweight8(unsigned int w);
 extern unsigned int __sw_hweight16(unsigned int w);
diff --git a/include/linux/bits.h b/include/linux/bits.h
new file mode 100644 (file)
index 0000000..2b7b532
--- /dev/null
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_BITS_H
+#define __LINUX_BITS_H
+#include <asm/bitsperlong.h>
+
+#define BIT(nr)                        (1UL << (nr))
+#define BIT_ULL(nr)            (1ULL << (nr))
+#define BIT_MASK(nr)           (1UL << ((nr) % BITS_PER_LONG))
+#define BIT_WORD(nr)           ((nr) / BITS_PER_LONG)
+#define BIT_ULL_MASK(nr)       (1ULL << ((nr) % BITS_PER_LONG_LONG))
+#define BIT_ULL_WORD(nr)       ((nr) / BITS_PER_LONG_LONG)
+#define BITS_PER_BYTE          8
+
+/*
+ * Create a contiguous bitmask starting at bit position @l and ending at
+ * position @h. For example
+ * GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000.
+ */
+#define GENMASK(h, l) \
+       (((~0UL) - (1UL << (l)) + 1) & (~0UL >> (BITS_PER_LONG - 1 - (h))))
+
+#define GENMASK_ULL(h, l) \
+       (((~0ULL) - (1ULL << (l)) + 1) & \
+        (~0ULL >> (BITS_PER_LONG_LONG - 1 - (h))))
+
+#endif /* __LINUX_BITS_H */
index e3147eb74222b868a014f498f1186a7a6c661804..ca3f2c2edd8573ac89e20447a12aff26bb599377 100644 (file)
@@ -287,6 +287,20 @@ void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues);
 
 void blk_mq_quiesce_queue_nowait(struct request_queue *q);
 
+/**
+ * blk_mq_mark_complete() - Set request state to complete
+ * @rq: request to set to complete state
+ *
+ * Returns true if request state was successfully set to complete. If
+ * successful, the caller is responsibile for seeing this request is ended, as
+ * blk_mq_complete_request will not work again.
+ */
+static inline bool blk_mq_mark_complete(struct request *rq)
+{
+       return cmpxchg(&rq->state, MQ_RQ_IN_FLIGHT, MQ_RQ_COMPLETE) ==
+                       MQ_RQ_IN_FLIGHT;
+}
+
 /*
  * Driver command data is immediately after the request. So subtract request
  * size to get back to the original request, add request size to get the PDU.
index 9154570edf2963628f873d7404930450735ff41a..79226ca8f80f2db7f813cf63973c61288c1b78ab 100644 (file)
@@ -1119,8 +1119,8 @@ static inline unsigned int blk_max_size_offset(struct request_queue *q,
        if (!q->limits.chunk_sectors)
                return q->limits.max_sectors;
 
-       return q->limits.chunk_sectors -
-                       (offset & (q->limits.chunk_sectors - 1));
+       return min(q->limits.max_sectors, (unsigned int)(q->limits.chunk_sectors -
+                       (offset & (q->limits.chunk_sectors - 1))));
 }
 
 static inline unsigned int blk_rq_get_max_sectors(struct request *rq,
index 975fb4cf1bb743ccff5fae92e82df582533c0ff2..d50c2f0a655ae3f95271d5f8de40f8eabc917c65 100644 (file)
@@ -2,6 +2,7 @@
 #ifndef _BPF_CGROUP_H
 #define _BPF_CGROUP_H
 
+#include <linux/errno.h>
 #include <linux/jump_label.h>
 #include <uapi/linux/bpf.h>
 
@@ -188,12 +189,38 @@ int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor,
                                                                              \
        __ret;                                                                \
 })
+int cgroup_bpf_prog_attach(const union bpf_attr *attr,
+                          enum bpf_prog_type ptype, struct bpf_prog *prog);
+int cgroup_bpf_prog_detach(const union bpf_attr *attr,
+                          enum bpf_prog_type ptype);
+int cgroup_bpf_prog_query(const union bpf_attr *attr,
+                         union bpf_attr __user *uattr);
 #else
 
+struct bpf_prog;
 struct cgroup_bpf {};
 static inline void cgroup_bpf_put(struct cgroup *cgrp) {}
 static inline int cgroup_bpf_inherit(struct cgroup *cgrp) { return 0; }
 
+static inline int cgroup_bpf_prog_attach(const union bpf_attr *attr,
+                                        enum bpf_prog_type ptype,
+                                        struct bpf_prog *prog)
+{
+       return -EINVAL;
+}
+
+static inline int cgroup_bpf_prog_detach(const union bpf_attr *attr,
+                                        enum bpf_prog_type ptype)
+{
+       return -EINVAL;
+}
+
+static inline int cgroup_bpf_prog_query(const union bpf_attr *attr,
+                                       union bpf_attr __user *uattr)
+{
+       return -EINVAL;
+}
+
 #define cgroup_bpf_enabled (0)
 #define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (0)
 #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; })
index 995c3b1e59bfa82ef3ad0504b090ab28a898f016..8827e797ff97d0973ddf1d4217a885cee9bb63ee 100644 (file)
@@ -488,12 +488,15 @@ void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth);
 
 /* Map specifics */
 struct xdp_buff;
+struct sk_buff;
 
 struct bpf_dtab_netdev *__dev_map_lookup_elem(struct bpf_map *map, u32 key);
 void __dev_map_insert_ctx(struct bpf_map *map, u32 index);
 void __dev_map_flush(struct bpf_map *map);
 int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
                    struct net_device *dev_rx);
+int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb,
+                            struct bpf_prog *xdp_prog);
 
 struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key);
 void __cpu_map_insert_ctx(struct bpf_map *map, u32 index);
@@ -586,6 +589,15 @@ int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
        return 0;
 }
 
+struct sk_buff;
+
+static inline int dev_map_generic_redirect(struct bpf_dtab_netdev *dst,
+                                          struct sk_buff *skb,
+                                          struct bpf_prog *xdp_prog)
+{
+       return 0;
+}
+
 static inline
 struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key)
 {
@@ -684,6 +696,8 @@ static inline void bpf_map_offload_map_free(struct bpf_map *map)
 struct sock  *__sock_map_lookup_elem(struct bpf_map *map, u32 key);
 struct sock  *__sock_hash_lookup_elem(struct bpf_map *map, void *key);
 int sock_map_prog(struct bpf_map *map, struct bpf_prog *prog, u32 type);
+int sockmap_get_from_fd(const union bpf_attr *attr, int type,
+                       struct bpf_prog *prog);
 #else
 static inline struct sock  *__sock_map_lookup_elem(struct bpf_map *map, u32 key)
 {
@@ -702,6 +716,12 @@ static inline int sock_map_prog(struct bpf_map *map,
 {
        return -EOPNOTSUPP;
 }
+
+static inline int sockmap_get_from_fd(const union bpf_attr *attr, int type,
+                                     struct bpf_prog *prog)
+{
+       return -EINVAL;
+}
 #endif
 
 #if defined(CONFIG_XDP_SOCKETS)
index 5f8a4283092d0a6960fd663a33832221a9615353..9d9ff755ec2972cf6e46d1905e1a5caae9dd5ae6 100644 (file)
@@ -5,11 +5,12 @@
 #include <uapi/linux/bpf.h>
 
 #ifdef CONFIG_BPF_LIRC_MODE2
-int lirc_prog_attach(const union bpf_attr *attr);
+int lirc_prog_attach(const union bpf_attr *attr, struct bpf_prog *prog);
 int lirc_prog_detach(const union bpf_attr *attr);
 int lirc_prog_query(const union bpf_attr *attr, union bpf_attr __user *uattr);
 #else
-static inline int lirc_prog_attach(const union bpf_attr *attr)
+static inline int lirc_prog_attach(const union bpf_attr *attr,
+                                  struct bpf_prog *prog)
 {
        return -EINVAL;
 }
index 687b1760bb9f87755f50fe575d30988259248e19..f02cee0225d40afcbb40e8bfc15d49e4a252ecb1 100644 (file)
@@ -5,10 +5,10 @@
 #include <uapi/linux/bpfilter.h>
 
 struct sock;
-int bpfilter_ip_set_sockopt(struct sock *sk, int optname, char *optval,
+int bpfilter_ip_set_sockopt(struct sock *sk, int optname, char __user *optval,
                            unsigned int optlen);
-int bpfilter_ip_get_sockopt(struct sock *sk, int optname, char *optval,
-                           int *optlen);
+int bpfilter_ip_get_sockopt(struct sock *sk, int optname, char __user *optval,
+                           int __user *optlen);
 extern int (*bpfilter_process_sockopt)(struct sock *sk, int optname,
                                       char __user *optval,
                                       unsigned int optlen, bool is_set);
index 7dff1963c185c9d2c85da84f5e89f96cc5a2b3b5..308918928767ad5921b17b6fe16953dfdc0f6d93 100644 (file)
@@ -194,6 +194,9 @@ extern void clocksource_suspend(void);
 extern void clocksource_resume(void);
 extern struct clocksource * __init clocksource_default_clock(void);
 extern void clocksource_mark_unstable(struct clocksource *cs);
+extern void
+clocksource_start_suspend_timing(struct clocksource *cs, u64 start_cycles);
+extern u64 clocksource_stop_suspend_timing(struct clocksource *cs, u64 now);
 
 extern u64
 clocks_calc_max_nsecs(u32 mult, u32 shift, u32 maxadj, u64 mask, u64 *max_cycles);
index b1a5562b3215b71302422b7a727bbb2cf499d8f3..df45ee8413d6d20e0741df750a6fab8fa3196ece 100644 (file)
@@ -72,6 +72,9 @@
  */
 #ifndef COMPAT_SYSCALL_DEFINEx
 #define COMPAT_SYSCALL_DEFINEx(x, name, ...)                                   \
+       __diag_push();                                                          \
+       __diag_ignore(GCC, 8, "-Wattribute-alias",                              \
+                     "Type aliasing is used to sanitize syscall arguments");\
        asmlinkage long compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__));       \
        asmlinkage long compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__))        \
                __attribute__((alias(__stringify(__se_compat_sys##name))));     \
        asmlinkage long __se_compat_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__));  \
        asmlinkage long __se_compat_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__))   \
        {                                                                       \
-               return __do_compat_sys##name(__MAP(x,__SC_DELOUSE,__VA_ARGS__));\
+               long ret = __do_compat_sys##name(__MAP(x,__SC_DELOUSE,__VA_ARGS__));\
+               __MAP(x,__SC_TEST,__VA_ARGS__);                                 \
+               return ret;                                                     \
        }                                                                       \
+       __diag_pop();                                                           \
        static inline long __do_compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__))
 #endif /* COMPAT_SYSCALL_DEFINEx */
 
@@ -109,11 +115,6 @@ typedef    compat_ulong_t          compat_aio_context_t;
 struct compat_sel_arg_struct;
 struct rusage;
 
-struct compat_itimerspec {
-       struct compat_timespec it_interval;
-       struct compat_timespec it_value;
-};
-
 struct compat_utimbuf {
        compat_time_t           actime;
        compat_time_t           modtime;
@@ -294,10 +295,6 @@ extern int compat_get_timespec(struct timespec *, const void __user *);
 extern int compat_put_timespec(const struct timespec *, void __user *);
 extern int compat_get_timeval(struct timeval *, const void __user *);
 extern int compat_put_timeval(const struct timeval *, void __user *);
-extern int get_compat_itimerspec64(struct itimerspec64 *its,
-                       const struct compat_itimerspec __user *uits);
-extern int put_compat_itimerspec64(const struct itimerspec64 *its,
-                       struct compat_itimerspec __user *uits);
 
 struct compat_iovec {
        compat_uptr_t   iov_base;
index 31f2774f199464f9f7597f8603387f42dd3b1687..e70bfd1d2c3fe6f0a57e829911afa5af07faa1aa 100644 (file)
@@ -17,7 +17,16 @@ struct compat_timeval {
        s32             tv_usec;
 };
 
+struct compat_itimerspec {
+       struct compat_timespec it_interval;
+       struct compat_timespec it_value;
+};
+
 extern int compat_get_timespec64(struct timespec64 *, const void __user *);
 extern int compat_put_timespec64(const struct timespec64 *, void __user *);
+extern int get_compat_itimerspec64(struct itimerspec64 *its,
+                       const struct compat_itimerspec __user *uits);
+extern int put_compat_itimerspec64(const struct itimerspec64 *its,
+                       struct compat_itimerspec __user *uits);
 
 #endif /* _LINUX_COMPAT_TIME_H */
index f1a7492a5cc8cc59813734d1b258dbaf04bf76c8..573f5a7d42d4fc9d1cbeecd6deb019d8d6b4d983 100644 (file)
 #define __must_be_array(a)     BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0]))
 #endif
 
+/*
+ * Feature detection for gnu_inline (gnu89 extern inline semantics). Either
+ * __GNUC_STDC_INLINE__ is defined (not using gnu89 extern inline semantics,
+ * and we opt in to the gnu89 semantics), or __GNUC_STDC_INLINE__ is not
+ * defined so the gnu89 semantics are the default.
+ */
+#ifdef __GNUC_STDC_INLINE__
+# define __gnu_inline  __attribute__((gnu_inline))
+#else
+# define __gnu_inline
+#endif
+
 /*
  * Force always-inline if the user requests it so via the .config,
  * or if gcc is too old.
  * -Wunused-function.  This turns out to avoid the need for complex #ifdef
  * directives.  Suppress the warning in clang as well by using "unused"
  * function attribute, which is redundant but not harmful for gcc.
+ * Prefer gnu_inline, so that extern inline functions do not emit an
+ * externally visible function. This makes extern inline behave as per gnu89
+ * semantics rather than c99. This prevents multiple symbol definition errors
+ * of extern inline functions at link time.
+ * A lot of inline functions can cause havoc with function tracing.
  */
 #if !defined(CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING) ||               \
     !defined(CONFIG_OPTIMIZE_INLINING) || (__GNUC__ < 4)
-#define inline inline          __attribute__((always_inline,unused)) notrace
-#define __inline__ __inline__  __attribute__((always_inline,unused)) notrace
-#define __inline __inline      __attribute__((always_inline,unused)) notrace
+#define inline \
+       inline __attribute__((always_inline, unused)) notrace __gnu_inline
 #else
-/* A lot of inline functions can cause havoc with function tracing */
-#define inline inline          __attribute__((unused)) notrace
-#define __inline__ __inline__  __attribute__((unused)) notrace
-#define __inline __inline      __attribute__((unused)) notrace
+#define inline inline          __attribute__((unused)) notrace __gnu_inline
 #endif
 
+#define __inline__ inline
+#define __inline inline
 #define __always_inline        inline __attribute__((always_inline))
 #define  noinline      __attribute__((noinline))
 
 #if GCC_VERSION >= 50100
 #define COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 1
 #endif
+
+/*
+ * Turn individual warnings and errors on and off locally, depending
+ * on version.
+ */
+#define __diag_GCC(version, severity, s) \
+       __diag_GCC_ ## version(__diag_GCC_ ## severity s)
+
+/* Severity used in pragma directives */
+#define __diag_GCC_ignore      ignored
+#define __diag_GCC_warn                warning
+#define __diag_GCC_error       error
+
+/* Compilers before gcc-4.6 do not understand "#pragma GCC diagnostic push" */
+#if GCC_VERSION >= 40600
+#define __diag_str1(s)         #s
+#define __diag_str(s)          __diag_str1(s)
+#define __diag(s)              _Pragma(__diag_str(GCC diagnostic s))
+#endif
+
+#if GCC_VERSION >= 80000
+#define __diag_GCC_8(s)                __diag(s)
+#else
+#define __diag_GCC_8(s)
+#endif
index 6b79a9bba9a7630eb0b3a8fe35251d41717a2da0..a8ba6b04152c13c9ca2960898cd6ea4e89d37957 100644 (file)
@@ -271,4 +271,22 @@ struct ftrace_likely_data {
 # define __native_word(t) (sizeof(t) == sizeof(char) || sizeof(t) == sizeof(short) || sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long))
 #endif
 
+#ifndef __diag
+#define __diag(string)
+#endif
+
+#ifndef __diag_GCC
+#define __diag_GCC(version, severity, string)
+#endif
+
+#define __diag_push()  __diag(push)
+#define __diag_pop()   __diag(pop)
+
+#define __diag_ignore(compiler, version, option, comment) \
+       __diag_ ## compiler(version, ignore, option)
+#define __diag_warn(compiler, version, option, comment) \
+       __diag_ ## compiler(version, warn, option)
+#define __diag_error(compiler, version, option, comment) \
+       __diag_ ## compiler(version, error, option)
+
 #endif /* __LINUX_COMPILER_TYPES_H */
index a97a63eef59f629bc54e0c7bc6be4d6c4b966bcf..3233fbe23594e8b6cc163fa9d2960677574fa32f 100644 (file)
@@ -30,7 +30,7 @@ struct cpu {
 };
 
 extern void boot_cpu_init(void);
-extern void boot_cpu_state_init(void);
+extern void boot_cpu_hotplug_init(void);
 extern void cpu_init(void);
 extern void trap_init(void);
 
index 8796ba3871522e1d47b035ec792246f5cbbdf30c..4cf06a64bc02f2e954328fa01aad7eaeefc10e96 100644 (file)
@@ -164,6 +164,7 @@ enum cpuhp_state {
        CPUHP_AP_PERF_POWERPC_NEST_IMC_ONLINE,
        CPUHP_AP_PERF_POWERPC_CORE_IMC_ONLINE,
        CPUHP_AP_PERF_POWERPC_THREAD_IMC_ONLINE,
+       CPUHP_AP_WATCHDOG_ONLINE,
        CPUHP_AP_WORKQUEUE_ONLINE,
        CPUHP_AP_RCUTREE_ONLINE,
        CPUHP_AP_ONLINE_DYN,
index 3855e3800f483e07cc4c16e68f6a1f2780de1b3e..deb0f663252fc55e39546c7d3107e96dfb3f03ae 100644 (file)
@@ -135,7 +135,7 @@ void dax_flush(struct dax_device *dax_dev, void *addr, size_t size);
 
 ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
                const struct iomap_ops *ops);
-int dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
+vm_fault_t dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
                    pfn_t *pfnp, int *errp, const struct iomap_ops *ops);
 vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf,
                enum page_entry_size pe_size, pfn_t pfn);
index 66c6e17e61e5af907b1c8bec24021f22aa819824..0b83629a3d8f9491992771f654de9ca220b6800d 100644 (file)
@@ -227,7 +227,6 @@ extern void d_instantiate(struct dentry *, struct inode *);
 extern void d_instantiate_new(struct dentry *, struct inode *);
 extern struct dentry * d_instantiate_unique(struct dentry *, struct inode *);
 extern struct dentry * d_instantiate_anon(struct dentry *, struct inode *);
-extern int d_instantiate_no_diralias(struct dentry *, struct inode *);
 extern void __d_drop(struct dentry *dentry);
 extern void d_drop(struct dentry *dentry);
 extern void d_delete(struct dentry *);
index e6c0448ebcc7f3f9c10b0b4ccdfb593d256a3b88..31c865d1842e88671d7f29534587311c479fb3e7 100644 (file)
@@ -124,7 +124,7 @@ static inline void delayacct_blkio_start(void)
 
 static inline void delayacct_blkio_end(struct task_struct *p)
 {
-       if (current->delays)
+       if (p->delays)
                __delayacct_blkio_end(p);
        delayacct_clear_flag(DELAYACCT_PF_BLKIO);
 }
index b67bf6ac907d8f324494efaf1d441b0ee7955a13..3c5a4cb3eb953174c688c4b965ba09d87925fdb3 100644 (file)
@@ -48,7 +48,7 @@
  *   CMA should not be used by the device drivers directly. It is
  *   only a helper framework for dma-mapping subsystem.
  *
- *   For more information, see kernel-docs in drivers/base/dma-contiguous.c
+ *   For more information, see kernel-docs in kernel/dma/contiguous.c
  */
 
 #ifdef __KERNEL__
index 10b2654d549bd1e21b3a1e8d0ce0fd1003e4f2a8..a0aa00cc909d84397439613025bb8fcfee6cad49 100644 (file)
@@ -44,4 +44,12 @@ static inline void arch_sync_dma_for_cpu(struct device *dev,
 }
 #endif /* ARCH_HAS_SYNC_DMA_FOR_CPU */
 
+#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL
+void arch_sync_dma_for_cpu_all(struct device *dev);
+#else
+static inline void arch_sync_dma_for_cpu_all(struct device *dev)
+{
+}
+#endif /* CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL */
+
 #endif /* _LINUX_DMA_NONCOHERENT_H */
index 56add823f1909e28c9e46775f12303c48096a13a..401e4b254e30b06a9b19e826bd289c893c0cfe86 100644 (file)
@@ -894,6 +894,16 @@ typedef struct _efi_file_handle {
        void *flush;
 } efi_file_handle_t;
 
+typedef struct {
+       u64 revision;
+       u32 open_volume;
+} efi_file_io_interface_32_t;
+
+typedef struct {
+       u64 revision;
+       u64 open_volume;
+} efi_file_io_interface_64_t;
+
 typedef struct _efi_file_io_interface {
        u64 revision;
        int (*open_volume)(struct _efi_file_io_interface *,
@@ -988,14 +998,12 @@ extern void efi_memmap_walk (efi_freemem_callback_t callback, void *arg);
 extern void efi_gettimeofday (struct timespec64 *ts);
 extern void efi_enter_virtual_mode (void);     /* switch EFI to virtual mode, if possible */
 #ifdef CONFIG_X86
-extern void efi_late_init(void);
 extern void efi_free_boot_services(void);
 extern efi_status_t efi_query_variable_store(u32 attributes,
                                             unsigned long size,
                                             bool nonblocking);
 extern void efi_find_mirror(void);
 #else
-static inline void efi_late_init(void) {}
 static inline void efi_free_boot_services(void) {}
 
 static inline efi_status_t efi_query_variable_store(u32 attributes,
@@ -1651,4 +1659,7 @@ struct linux_efi_tpm_eventlog {
 
 extern int efi_tpm_eventlog_init(void);
 
+/* Workqueue to queue EFI Runtime Services */
+extern struct workqueue_struct *efi_rts_wq;
+
 #endif /* _LINUX_EFI_H */
index 7094718b653b7b4ce1ebdc4d8ca37734b4284438..ffcc7724ca210097f70cca236d8e249d09541e64 100644 (file)
@@ -11,6 +11,7 @@
 
 #include <linux/fcntl.h>
 #include <linux/wait.h>
+#include <linux/err.h>
 
 /*
  * CAREFUL: Check include/uapi/asm-generic/fcntl.h when defining
index 279720db984af394c90737ed31b8ec09ef73c2ca..6b2fb032416ca2fcf285c199aeb43aa845f3ac00 100644 (file)
@@ -17,9 +17,12 @@ extern void fput(struct file *);
 struct file_operations;
 struct vfsmount;
 struct dentry;
+struct inode;
 struct path;
-extern struct file *alloc_file(const struct path *, fmode_t mode,
-       const struct file_operations *fop);
+extern struct file *alloc_file_pseudo(struct inode *, struct vfsmount *,
+       const char *, int flags, const struct file_operations *);
+extern struct file *alloc_file_clone(struct file *, int flags,
+       const struct file_operations *);
 
 static inline void fput_light(struct file *file, int fput_needed)
 {
@@ -78,7 +81,6 @@ extern int f_dupfd(unsigned int from, struct file *file, unsigned flags);
 extern int replace_fd(unsigned fd, struct file *file, unsigned flags);
 extern void set_close_on_exec(unsigned int fd, int flag);
 extern bool get_close_on_exec(unsigned int fd);
-extern void put_filp(struct file *);
 extern int get_unused_fd_flags(unsigned flags);
 extern void put_unused_fd(unsigned int fd);
 
index 45fc0f5000d8899ead3592cbdaa813d726e2c2af..c73dd7396886751938a0e2e1355d2aa28797ad87 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/cryptohash.h>
 #include <linux/set_memory.h>
 #include <linux/kallsyms.h>
+#include <linux/if_vlan.h>
 
 #include <net/sch_generic.h>
 
@@ -469,15 +470,16 @@ struct sock_fprog_kern {
 };
 
 struct bpf_binary_header {
-       unsigned int pages;
-       u8 image[];
+       u32 pages;
+       /* Some arches need word alignment for their instructions */
+       u8 image[] __aligned(4);
 };
 
 struct bpf_prog {
        u16                     pages;          /* Number of allocated pages */
        u16                     jited:1,        /* Is our filter JIT'ed? */
                                jit_requested:1,/* archs need to JIT the prog */
-                               locked:1,       /* Program image locked? */
+                               undo_set_mem:1, /* Passed set_memory_ro() checkpoint */
                                gpl_compatible:1, /* Is filter GPL compatible? */
                                cb_access:1,    /* Is control block accessed? */
                                dst_needed:1,   /* Do we need dst entry? */
@@ -671,50 +673,27 @@ bpf_ctx_narrow_access_ok(u32 off, u32 size, u32 size_default)
 
 #define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0]))
 
-#ifdef CONFIG_ARCH_HAS_SET_MEMORY
-static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
-{
-       fp->locked = 1;
-       WARN_ON_ONCE(set_memory_ro((unsigned long)fp, fp->pages));
-}
-
-static inline void bpf_prog_unlock_ro(struct bpf_prog *fp)
-{
-       if (fp->locked) {
-               WARN_ON_ONCE(set_memory_rw((unsigned long)fp, fp->pages));
-               /* In case set_memory_rw() fails, we want to be the first
-                * to crash here instead of some random place later on.
-                */
-               fp->locked = 0;
-       }
-}
-
-static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr)
-{
-       WARN_ON_ONCE(set_memory_ro((unsigned long)hdr, hdr->pages));
-}
-
-static inline void bpf_jit_binary_unlock_ro(struct bpf_binary_header *hdr)
-{
-       WARN_ON_ONCE(set_memory_rw((unsigned long)hdr, hdr->pages));
-}
-#else
 static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
 {
+       fp->undo_set_mem = 1;
+       set_memory_ro((unsigned long)fp, fp->pages);
 }
 
 static inline void bpf_prog_unlock_ro(struct bpf_prog *fp)
 {
+       if (fp->undo_set_mem)
+               set_memory_rw((unsigned long)fp, fp->pages);
 }
 
 static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr)
 {
+       set_memory_ro((unsigned long)hdr, hdr->pages);
 }
 
 static inline void bpf_jit_binary_unlock_ro(struct bpf_binary_header *hdr)
 {
+       set_memory_rw((unsigned long)hdr, hdr->pages);
 }
-#endif /* CONFIG_ARCH_HAS_SET_MEMORY */
 
 static inline struct bpf_binary_header *
 bpf_jit_binary_hdr(const struct bpf_prog *fp)
@@ -786,6 +765,21 @@ static inline bool bpf_dump_raw_ok(void)
 struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
                                       const struct bpf_insn *patch, u32 len);
 
+static inline int xdp_ok_fwd_dev(const struct net_device *fwd,
+                                unsigned int pktlen)
+{
+       unsigned int len;
+
+       if (unlikely(!(fwd->flags & IFF_UP)))
+               return -ENETDOWN;
+
+       len = fwd->mtu + fwd->hard_header_len + VLAN_HLEN;
+       if (pktlen > len)
+               return -EMSGSIZE;
+
+       return 0;
+}
+
 /* The pair of xdp_do_redirect and xdp_do_flush_map MUST be called in the
  * same cpu context. Further for best results no more than a single map
  * for the do_redirect/do_flush pair should be used. This limitation is
@@ -961,6 +955,9 @@ static inline void bpf_prog_kallsyms_del(struct bpf_prog *fp)
 }
 #endif /* CONFIG_BPF_JIT */
 
+void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp);
+void bpf_prog_kallsyms_del_all(struct bpf_prog *fp);
+
 #define BPF_ANC                BIT(15)
 
 static inline bool bpf_needs_clear_a(const struct sock_filter *first)
index 5c91108846db20894ab70dafe43b7922fe08fb1f..1803347105967024c7678eb4225e360471620c30 100644 (file)
@@ -148,6 +148,9 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
 /* Has write method(s) */
 #define FMODE_CAN_WRITE         ((__force fmode_t)0x40000)
 
+#define FMODE_OPENED           ((__force fmode_t)0x80000)
+#define FMODE_CREATED          ((__force fmode_t)0x100000)
+
 /* File was opened by fanotify and shouldn't generate fanotify events */
 #define FMODE_NONOTIFY         ((__force fmode_t)0x4000000)
 
@@ -684,6 +687,17 @@ static inline int inode_unhashed(struct inode *inode)
        return hlist_unhashed(&inode->i_hash);
 }
 
+/*
+ * __mark_inode_dirty expects inodes to be hashed.  Since we don't
+ * want special inodes in the fileset inode space, we make them
+ * appear hashed, but do not put on any lists.  hlist_del()
+ * will work fine and require no locking.
+ */
+static inline void inode_fake_hash(struct inode *inode)
+{
+       hlist_add_fake(&inode->i_hash);
+}
+
 /*
  * inode->i_mutex nesting subclasses for the lock validator:
  *
@@ -1720,8 +1734,6 @@ struct file_operations {
        int (*iterate) (struct file *, struct dir_context *);
        int (*iterate_shared) (struct file *, struct dir_context *);
        __poll_t (*poll) (struct file *, struct poll_table_struct *);
-       struct wait_queue_head * (*get_poll_head)(struct file *, __poll_t);
-       __poll_t (*poll_mask) (struct file *, __poll_t);
        long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long);
        long (*compat_ioctl) (struct file *, unsigned int, unsigned long);
        int (*mmap) (struct file *, struct vm_area_struct *);
@@ -1778,7 +1790,7 @@ struct inode_operations {
        int (*update_time)(struct inode *, struct timespec64 *, int);
        int (*atomic_open)(struct inode *, struct dentry *,
                           struct file *, unsigned open_flag,
-                          umode_t create_mode, int *opened);
+                          umode_t create_mode);
        int (*tmpfile) (struct inode *, struct dentry *, umode_t);
        int (*set_acl)(struct inode *, struct posix_acl *, int);
 } ____cacheline_aligned;
@@ -2016,6 +2028,8 @@ static inline void init_sync_kiocb(struct kiocb *kiocb, struct file *filp)
  * I_OVL_INUSE         Used by overlayfs to get exclusive ownership on upper
  *                     and work dirs among overlayfs mounts.
  *
+ * I_CREATING          New object's inode in the middle of setting up.
+ *
  * Q: What is the difference between I_WILL_FREE and I_FREEING?
  */
 #define I_DIRTY_SYNC           (1 << 0)
@@ -2036,7 +2050,8 @@ static inline void init_sync_kiocb(struct kiocb *kiocb, struct file *filp)
 #define __I_DIRTY_TIME_EXPIRED 12
 #define I_DIRTY_TIME_EXPIRED   (1 << __I_DIRTY_TIME_EXPIRED)
 #define I_WB_SWITCH            (1 << 13)
-#define I_OVL_INUSE                    (1 << 14)
+#define I_OVL_INUSE            (1 << 14)
+#define I_CREATING             (1 << 15)
 
 #define I_DIRTY_INODE (I_DIRTY_SYNC | I_DIRTY_DATASYNC)
 #define I_DIRTY (I_DIRTY_INODE | I_DIRTY_PAGES)
@@ -2422,6 +2437,10 @@ extern struct file *filp_open(const char *, int, umode_t);
 extern struct file *file_open_root(struct dentry *, struct vfsmount *,
                                   const char *, int, umode_t);
 extern struct file * dentry_open(const struct path *, int, const struct cred *);
+static inline struct file *file_clone_open(struct file *file)
+{
+       return dentry_open(&file->f_path, file->f_flags, file->f_cred);
+}
 extern int filp_close(struct file *, fl_owner_t id);
 
 extern struct filename *getname_flags(const char __user *, int, int *);
@@ -2429,13 +2448,8 @@ extern struct filename *getname(const char __user *);
 extern struct filename *getname_kernel(const char *);
 extern void putname(struct filename *name);
 
-enum {
-       FILE_CREATED = 1,
-       FILE_OPENED = 2
-};
 extern int finish_open(struct file *file, struct dentry *dentry,
-                       int (*open)(struct inode *, struct file *),
-                       int *opened);
+                       int (*open)(struct inode *, struct file *));
 extern int finish_no_open(struct file *file, struct dentry *dentry);
 
 /* fs/ioctl.c */
@@ -2919,6 +2933,7 @@ extern void lockdep_annotate_inode_mutex_key(struct inode *inode);
 static inline void lockdep_annotate_inode_mutex_key(struct inode *inode) { };
 #endif
 extern void unlock_new_inode(struct inode *);
+extern void discard_new_inode(struct inode *);
 extern unsigned int get_next_ino(void);
 extern void evict_inodes(struct super_block *sb);
 
index 3efa3b861d44cae46670532c9db208d8630099a9..941b11811f85915bd70a730bbc338288d995493b 100644 (file)
@@ -16,6 +16,7 @@
 #define __FSL_GUTS_H__
 
 #include <linux/types.h>
+#include <linux/io.h>
 
 /**
  * Global Utility Registers.
index 8154f4920fcb9de96a24ec7b85d9b92f56968122..ebb77674be90cfff4466667c7bb62c121db5a235 100644 (file)
@@ -223,7 +223,6 @@ extern enum ftrace_tracing_type_t ftrace_tracing_type;
  */
 int register_ftrace_function(struct ftrace_ops *ops);
 int unregister_ftrace_function(struct ftrace_ops *ops);
-void clear_ftrace_function(void);
 
 extern void ftrace_stub(unsigned long a0, unsigned long a1,
                        struct ftrace_ops *op, struct pt_regs *regs);
@@ -239,7 +238,6 @@ static inline int ftrace_nr_registered_ops(void)
 {
        return 0;
 }
-static inline void clear_ftrace_function(void) { }
 static inline void ftrace_kill(void) { }
 static inline void ftrace_free_init_mem(void) { }
 static inline void ftrace_free_mem(struct module *mod, void *start, void *end) { }
index 41a3d5775394fed48e7b880317eaf6c1944c2817..773bcb1d4044ed2d83d4a1504f951951fa639d94 100644 (file)
@@ -511,6 +511,7 @@ struct hid_output_fifo {
 #define HID_STAT_ADDED         BIT(0)
 #define HID_STAT_PARSED                BIT(1)
 #define HID_STAT_DUP_DETECTED  BIT(2)
+#define HID_STAT_REPROBED      BIT(3)
 
 struct hid_input {
        struct list_head list;
@@ -579,7 +580,7 @@ struct hid_device {                                                 /* device report descriptor */
        bool battery_avoid_query;
 #endif
 
-       unsigned int status;                                            /* see STAT flags above */
+       unsigned long status;                                           /* see STAT flags above */
        unsigned claimed;                                               /* Claimed by hidinput, hiddev? */
        unsigned quirks;                                                /* Various quirks the device can pull on us */
        bool io_started;                                                /* If IO has started */
index 7843b98e1c6ea7802dcea3f8b5a944d2355398d5..c20c7e197d0731e58b0f68b87531299080e8421a 100644 (file)
@@ -105,13 +105,13 @@ static inline bool br_vlan_enabled(const struct net_device *dev)
 
 static inline int br_vlan_get_pvid(const struct net_device *dev, u16 *p_pvid)
 {
-       return -1;
+       return -EINVAL;
 }
 
 static inline int br_vlan_get_info(const struct net_device *dev, u16 vid,
                                   struct bridge_vlan_info *p_vinfo)
 {
-       return -1;
+       return -EINVAL;
 }
 #endif
 
index f8231854b5d60316310fc5d8e57eea8625fe3078..119f53941c124c22452bf615f9ccca5a9130bb87 100644 (file)
@@ -109,6 +109,8 @@ struct ip_mc_list {
 extern int ip_check_mc_rcu(struct in_device *dev, __be32 mc_addr, __be32 src_addr, u8 proto);
 extern int igmp_rcv(struct sk_buff *);
 extern int ip_mc_join_group(struct sock *sk, struct ip_mreqn *imr);
+extern int ip_mc_join_group_ssm(struct sock *sk, struct ip_mreqn *imr,
+                               unsigned int mode);
 extern int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr);
 extern void ip_mc_drop_socket(struct sock *sk);
 extern int ip_mc_source(int add, int omode, struct sock *sk,
index 767467d886de4d53f5f5b862614b3f1644a5ecfa..67c75372b6915289e6d0876ac21368c89eb3896a 100644 (file)
@@ -141,7 +141,7 @@ int iio_dma_buffer_read(struct iio_buffer *buffer, size_t n,
        char __user *user_buffer);
 size_t iio_dma_buffer_data_available(struct iio_buffer *buffer);
 int iio_dma_buffer_set_bytes_per_datum(struct iio_buffer *buffer, size_t bpd);
-int iio_dma_buffer_set_length(struct iio_buffer *buffer, int length);
+int iio_dma_buffer_set_length(struct iio_buffer *buffer, unsigned int length);
 int iio_dma_buffer_request_update(struct iio_buffer *buffer);
 
 int iio_dma_buffer_init(struct iio_dma_buffer_queue *queue,
index 0e4647e0eb60caf9fc5f22ef5ecf1888771a4d9e..d9ba3fc363b7cef4e7347bdb88d0224615887c02 100644 (file)
@@ -16,7 +16,7 @@ struct linux_binprm;
 
 #ifdef CONFIG_IMA
 extern int ima_bprm_check(struct linux_binprm *bprm);
-extern int ima_file_check(struct file *file, int mask, int opened);
+extern int ima_file_check(struct file *file, int mask);
 extern void ima_file_free(struct file *file);
 extern int ima_file_mmap(struct file *file, unsigned long prot);
 extern int ima_read_file(struct file *file, enum kernel_read_file_id id);
@@ -34,7 +34,7 @@ static inline int ima_bprm_check(struct linux_binprm *bprm)
        return 0;
 }
 
-static inline int ima_file_check(struct file *file, int mask, int opened)
+static inline int ima_file_check(struct file *file, int mask)
 {
        return 0;
 }
index d7188de4db968c14c5db1a44fca6421aec22d041..3f4bf60b0bb55c4d9d2708593d2439aec269f9c9 100644 (file)
@@ -100,7 +100,7 @@ static inline bool input_is_mt_axis(int axis)
        return axis == ABS_MT_SLOT || input_is_mt_value(axis);
 }
 
-void input_mt_report_slot_state(struct input_dev *dev,
+bool input_mt_report_slot_state(struct input_dev *dev,
                                unsigned int tool_type, bool active);
 
 void input_mt_report_finger_count(struct input_dev *dev, int count);
index 1df940196ab2bd0987a177930be126f0828fd733..ef169d67df9217a8bf9d1dad19be7920ae0352f2 100644 (file)
 #define ecap_srs(e)            ((e >> 31) & 0x1)
 #define ecap_ers(e)            ((e >> 30) & 0x1)
 #define ecap_prs(e)            ((e >> 29) & 0x1)
+#define ecap_broken_pasid(e)   ((e >> 28) & 0x1)
 #define ecap_dis(e)            ((e >> 27) & 0x1)
 #define ecap_nest(e)           ((e >> 26) & 0x1)
 #define ecap_mts(e)            ((e >> 25) & 0x1)
index 4bd2f34947f4a7647a485fe2e8092c1fd055f630..201de12a9957171003757967bb69161c3d060575 100644 (file)
@@ -503,6 +503,7 @@ struct irq_chip {
  * IRQCHIP_SKIP_SET_WAKE:      Skip chip.irq_set_wake(), for this irq chip
  * IRQCHIP_ONESHOT_SAFE:       One shot does not require mask/unmask
  * IRQCHIP_EOI_THREADED:       Chip requires eoi() on unmask in threaded mode
+ * IRQCHIP_SUPPORTS_LEVEL_MSI  Chip can provide two doorbells for Level MSIs
  */
 enum {
        IRQCHIP_SET_TYPE_MASKED         = (1 <<  0),
index cbb872c1b607cec198f2bf374d8bd06690601a81..9d2ea3e907d0fc018d8f7e13d2912537c83c5e70 100644 (file)
@@ -73,6 +73,7 @@
 #define GICD_TYPER_MBIS                        (1U << 16)
 
 #define GICD_TYPER_ID_BITS(typer)      ((((typer) >> 19) & 0x1f) + 1)
+#define GICD_TYPER_NUM_LPIS(typer)     ((((typer) >> 11) & 0x1f) + 1)
 #define GICD_TYPER_IRQS(typer)         ((((typer) & 0x1f) + 1) * 32)
 
 #define GICD_IROUTER_SPI_MODE_ONE      (0U << 31)
@@ -576,8 +577,8 @@ struct rdists {
                phys_addr_t     phys_base;
        } __percpu              *rdist;
        struct page             *prop_page;
-       int                     id_bits;
        u64                     flags;
+       u32                     gicd_typer;
        bool                    has_vlpis;
        bool                    has_direct_lpi;
 };
index 25b33b66453773cb01509725fa68664c555ffd3f..dd1e40ddac7d8235e31aeb96fe460c77a70ac681 100644 (file)
@@ -145,11 +145,6 @@ static inline void *irq_desc_get_handler_data(struct irq_desc *desc)
        return desc->irq_common_data.handler_data;
 }
 
-static inline struct msi_desc *irq_desc_get_msi_desc(struct irq_desc *desc)
-{
-       return desc->irq_common_data.msi_desc;
-}
-
 /*
  * Architectures call this to let the generic IRQ layer
  * handle an interrupt.
index d231232385349146faf64c42cd05a73bddd0fcca..941dc0a5a877998e46d11541bdb491655a5f34af 100644 (file)
@@ -666,7 +666,7 @@ do {                                                                        \
  * your code. (Extra memory is used for special buffers that are
  * allocated when trace_printk() is used.)
  *
- * A little optization trick is done here. If there's only one
+ * A little optimization trick is done here. If there's only one
  * argument, there's no need to scan the string for printf formats.
  * The trace_puts() will suffice. But how can we take advantage of
  * using trace_puts() when trace_printk() has only one argument?
index 9440a2fc88937d463aa2990e345a08b156dd873b..e909413e4e38c6b95beed624581252de0be8ad18 100644 (file)
@@ -63,7 +63,6 @@ struct pt_regs;
 struct kretprobe;
 struct kretprobe_instance;
 typedef int (*kprobe_pre_handler_t) (struct kprobe *, struct pt_regs *);
-typedef int (*kprobe_break_handler_t) (struct kprobe *, struct pt_regs *);
 typedef void (*kprobe_post_handler_t) (struct kprobe *, struct pt_regs *,
                                       unsigned long flags);
 typedef int (*kprobe_fault_handler_t) (struct kprobe *, struct pt_regs *,
@@ -101,12 +100,6 @@ struct kprobe {
         */
        kprobe_fault_handler_t fault_handler;
 
-       /*
-        * ... called if breakpoint trap occurs in probe handler.
-        * Return 1 if it handled break, otherwise kernel will see it.
-        */
-       kprobe_break_handler_t break_handler;
-
        /* Saved opcode (which has been replaced with breakpoint) */
        kprobe_opcode_t opcode;
 
@@ -154,24 +147,6 @@ static inline int kprobe_ftrace(struct kprobe *p)
        return p->flags & KPROBE_FLAG_FTRACE;
 }
 
-/*
- * Special probe type that uses setjmp-longjmp type tricks to resume
- * execution at a specified entry with a matching prototype corresponding
- * to the probed function - a trick to enable arguments to become
- * accessible seamlessly by probe handling logic.
- * Note:
- * Because of the way compilers allocate stack space for local variables
- * etc upfront, regardless of sub-scopes within a function, this mirroring
- * principle currently works only for probes placed on function entry points.
- */
-struct jprobe {
-       struct kprobe kp;
-       void *entry;    /* probe handling code to jump to */
-};
-
-/* For backward compatibility with old code using JPROBE_ENTRY() */
-#define JPROBE_ENTRY(handler)  (handler)
-
 /*
  * Function-return probe -
  * Note:
@@ -389,9 +364,6 @@ int register_kprobe(struct kprobe *p);
 void unregister_kprobe(struct kprobe *p);
 int register_kprobes(struct kprobe **kps, int num);
 void unregister_kprobes(struct kprobe **kps, int num);
-int setjmp_pre_handler(struct kprobe *, struct pt_regs *);
-int longjmp_break_handler(struct kprobe *, struct pt_regs *);
-void jprobe_return(void);
 unsigned long arch_deref_entry_point(void *);
 
 int register_kretprobe(struct kretprobe *rp);
@@ -439,9 +411,6 @@ static inline void unregister_kprobe(struct kprobe *p)
 static inline void unregister_kprobes(struct kprobe **kps, int num)
 {
 }
-static inline void jprobe_return(void)
-{
-}
 static inline int register_kretprobe(struct kretprobe *rp)
 {
        return -ENOSYS;
@@ -468,20 +437,6 @@ static inline int enable_kprobe(struct kprobe *kp)
        return -ENOSYS;
 }
 #endif /* CONFIG_KPROBES */
-static inline int register_jprobe(struct jprobe *p)
-{
-       return -ENOSYS;
-}
-static inline int register_jprobes(struct jprobe **jps, int num)
-{
-       return -ENOSYS;
-}
-static inline void unregister_jprobe(struct jprobe *p)
-{
-}
-static inline void unregister_jprobes(struct jprobe **jps, int num)
-{
-}
 static inline int disable_kretprobe(struct kretprobe *rp)
 {
        return disable_kprobe(&rp->kp);
@@ -490,14 +445,6 @@ static inline int enable_kretprobe(struct kretprobe *rp)
 {
        return enable_kprobe(&rp->kp);
 }
-static inline int disable_jprobe(struct jprobe *jp)
-{
-       return -ENOSYS;
-}
-static inline int enable_jprobe(struct jprobe *jp)
-{
-       return -ENOSYS;
-}
 
 #ifndef CONFIG_KPROBES
 static inline bool is_kprobe_insn_slot(unsigned long addr)
index 2803264c512f8f6bf80dffc462c4a7ab079ce5f3..c1961761311dbfd5968d6ed64ea91ca3c7d25b0e 100644 (file)
@@ -62,7 +62,6 @@ void *kthread_probe_data(struct task_struct *k);
 int kthread_park(struct task_struct *k);
 void kthread_unpark(struct task_struct *k);
 void kthread_parkme(void);
-void kthread_park_complete(struct task_struct *k);
 
 int kthreadd(void *unused);
 extern struct task_struct *kthreadd_task;
index 5b9fddbaac4166b11121f75a3f3b4db7af1aac4c..b2bb44f87f5a3edb6ee6f179c83fcde42a363fe5 100644 (file)
@@ -93,8 +93,11 @@ static inline ktime_t timeval_to_ktime(struct timeval tv)
 /* Map the ktime_t to timeval conversion to ns_to_timeval function */
 #define ktime_to_timeval(kt)           ns_to_timeval((kt))
 
-/* Convert ktime_t to nanoseconds - NOP in the scalar storage format: */
-#define ktime_to_ns(kt)                        (kt)
+/* Convert ktime_t to nanoseconds */
+static inline s64 ktime_to_ns(const ktime_t kt)
+{
+       return kt;
+}
 
 /**
  * ktime_compare - Compares two ktime_t variables for less, greater or equal
index 8b8946dd63b9d4df3d08c5051604fce0fc147be1..32f247cb5e9ea0c107970d31f135c903dfd04c55 100644 (file)
@@ -210,6 +210,7 @@ enum {
        ATA_FLAG_SLAVE_POSS     = (1 << 0), /* host supports slave dev */
                                            /* (doesn't imply presence) */
        ATA_FLAG_SATA           = (1 << 1),
+       ATA_FLAG_NO_LPM         = (1 << 2), /* host not happy with LPM */
        ATA_FLAG_NO_LOG_PAGE    = (1 << 5), /* do not issue log page read */
        ATA_FLAG_NO_ATAPI       = (1 << 6), /* No ATAPI support */
        ATA_FLAG_PIO_DMA        = (1 << 7), /* PIO cmds via DMA */
@@ -1495,6 +1496,29 @@ static inline bool ata_tag_valid(unsigned int tag)
        return tag < ATA_MAX_QUEUE || ata_tag_internal(tag);
 }
 
+#define __ata_qc_for_each(ap, qc, tag, max_tag, fn)            \
+       for ((tag) = 0; (tag) < (max_tag) &&                    \
+            ({ qc = fn((ap), (tag)); 1; }); (tag)++)           \
+
+/*
+ * Internal use only, iterate commands ignoring error handling and
+ * status of 'qc'.
+ */
+#define ata_qc_for_each_raw(ap, qc, tag)                                       \
+       __ata_qc_for_each(ap, qc, tag, ATA_MAX_QUEUE, __ata_qc_from_tag)
+
+/*
+ * Iterate all potential commands that can be queued
+ */
+#define ata_qc_for_each(ap, qc, tag)                                   \
+       __ata_qc_for_each(ap, qc, tag, ATA_MAX_QUEUE, ata_qc_from_tag)
+
+/*
+ * Like ata_qc_for_each, but with the internal tag included
+ */
+#define ata_qc_for_each_with_internal(ap, qc, tag)                     \
+       __ata_qc_for_each(ap, qc, tag, ATA_MAX_QUEUE + 1, ata_qc_from_tag)
+
 /*
  * device helpers
  */
index 8f1131c8dd54b1a6f5586a793eef839036004cfa..a8ee106b865d3887dc4623878b308d8b565ca3ae 100644 (file)
@@ -1569,7 +1569,7 @@ union security_list_options {
        int (*file_send_sigiotask)(struct task_struct *tsk,
                                        struct fown_struct *fown, int sig);
        int (*file_receive)(struct file *file);
-       int (*file_open)(struct file *file, const struct cred *cred);
+       int (*file_open)(struct file *file);
 
        int (*task_alloc)(struct task_struct *task, unsigned long clone_flags);
        void (*task_free)(struct task_struct *task);
index 4f5f8c21e2830bd3c7de20bd509b360f27e66816..1eb6f244588dae1efa08a2c9dcb8e46460592bcc 100644 (file)
@@ -27,6 +27,8 @@
  */
 #define MARVELL_PHY_ID_88E6390         0x01410f90
 
+#define MARVELL_PHY_FAMILY_ID(id)      ((id) >> 4)
+
 /* struct phy_device dev_flags definitions */
 #define MARVELL_PHY_M1145_FLAGS_RESISTANCE     0x00000001
 #define MARVELL_PHY_M1118_DNS323_LEDS          0x00000002
index 31ca3e28b0ebe98369a1582430230a2f68c6baae..a6ddefc60517899167b55b53b0007ba3e3b9ed80 100644 (file)
@@ -38,6 +38,7 @@ struct memory_block {
 
 int arch_get_memory_phys_device(unsigned long start_pfn);
 unsigned long memory_block_size_bytes(void);
+int set_memory_block_size_order(unsigned int order);
 
 /* These states are exposed to userspace as text strings in sysfs */
 #define        MEM_ONLINE              (1<<0) /* exposed to userspace */
index 80cbb7fdce4a1a9afea00cc7dbfbe16249871b2d..83957920653a0adeb08a90211f937e6227cb32a8 100644 (file)
@@ -358,6 +358,7 @@ struct mlx5_frag_buf_ctrl {
        struct mlx5_frag_buf    frag_buf;
        u32                     sz_m1;
        u32                     frag_sz_m1;
+       u32                     strides_offset;
        u8                      log_sz;
        u8                      log_stride;
        u8                      log_frag_strides;
@@ -983,14 +984,22 @@ static inline u32 mlx5_base_mkey(const u32 key)
        return key & 0xffffff00u;
 }
 
-static inline void mlx5_fill_fbc(u8 log_stride, u8 log_sz,
-                                struct mlx5_frag_buf_ctrl *fbc)
+static inline void mlx5_fill_fbc_offset(u8 log_stride, u8 log_sz,
+                                       u32 strides_offset,
+                                       struct mlx5_frag_buf_ctrl *fbc)
 {
        fbc->log_stride = log_stride;
        fbc->log_sz     = log_sz;
        fbc->sz_m1      = (1 << fbc->log_sz) - 1;
        fbc->log_frag_strides = PAGE_SHIFT - fbc->log_stride;
        fbc->frag_sz_m1 = (1 << fbc->log_frag_strides) - 1;
+       fbc->strides_offset = strides_offset;
+}
+
+static inline void mlx5_fill_fbc(u8 log_stride, u8 log_sz,
+                                struct mlx5_frag_buf_ctrl *fbc)
+{
+       mlx5_fill_fbc_offset(log_stride, log_sz, 0, fbc);
 }
 
 static inline void mlx5_core_init_cq_frag_buf(struct mlx5_frag_buf_ctrl *fbc,
@@ -1004,7 +1013,10 @@ static inline void mlx5_core_init_cq_frag_buf(struct mlx5_frag_buf_ctrl *fbc,
 static inline void *mlx5_frag_buf_get_wqe(struct mlx5_frag_buf_ctrl *fbc,
                                          u32 ix)
 {
-       unsigned int frag = (ix >> fbc->log_frag_strides);
+       unsigned int frag;
+
+       ix  += fbc->strides_offset;
+       frag = ix >> fbc->log_frag_strides;
 
        return fbc->frag_buf.frags[frag].buf +
                ((fbc->frag_sz_m1 & ix) << fbc->log_stride);
index d3c9db492b30065750726992ba1001c48153232b..fab5121ffb8f5de2b5f39b6a0a7e43cca4b047e0 100644 (file)
@@ -8,6 +8,8 @@
 
 #include <linux/mlx5/driver.h>
 
+#define MLX5_ESWITCH_MANAGER(mdev) MLX5_CAP_GEN(mdev, eswitch_manager)
+
 enum {
        SRIOV_NONE,
        SRIOV_LEGACY,
index 27134c4fcb76eb5140ff4828066e73e11d671cd9..ac281f5ec9b8077ba859f33eaf61e3f03ecdeb3d 100644 (file)
@@ -922,7 +922,7 @@ struct mlx5_ifc_cmd_hca_cap_bits {
        u8         vnic_env_queue_counters[0x1];
        u8         ets[0x1];
        u8         nic_flow_table[0x1];
-       u8         eswitch_flow_table[0x1];
+       u8         eswitch_manager[0x1];
        u8         device_memory[0x1];
        u8         mcam_reg[0x1];
        u8         pcam_reg[0x1];
index a0fbb9ffe3805276a16c485564de77047898a18e..68a5121694ef51e177b99a3e4dd26ca1ae5ffcc8 100644 (file)
@@ -155,7 +155,9 @@ extern int overcommit_kbytes_handler(struct ctl_table *, int, void __user *,
  * mmap() functions).
  */
 
-extern struct kmem_cache *vm_area_cachep;
+struct vm_area_struct *vm_area_alloc(struct mm_struct *);
+struct vm_area_struct *vm_area_dup(struct vm_area_struct *);
+void vm_area_free(struct vm_area_struct *);
 
 #ifndef CONFIG_MMU
 extern struct rb_root nommu_region_tree;
@@ -450,6 +452,23 @@ struct vm_operations_struct {
                                          unsigned long addr);
 };
 
+static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm)
+{
+       static const struct vm_operations_struct dummy_vm_ops = {};
+
+       vma->vm_mm = mm;
+       vma->vm_ops = &dummy_vm_ops;
+       INIT_LIST_HEAD(&vma->anon_vma_chain);
+}
+
+static inline void vma_set_anonymous(struct vm_area_struct *vma)
+{
+       vma->vm_ops = NULL;
+}
+
+/* flush_tlb_range() takes a vma, not a mm, and can care about flags */
+#define TLB_FLUSH_VMA(mm,flags) { .vm_mm = (mm), .vm_flags = (flags) }
+
 struct mmu_gather;
 struct inode;
 
@@ -2132,7 +2151,7 @@ extern int __meminit __early_pfn_to_nid(unsigned long pfn,
                                        struct mminit_pfnnid_cache *state);
 #endif
 
-#ifdef CONFIG_HAVE_MEMBLOCK
+#if defined(CONFIG_HAVE_MEMBLOCK) && !defined(CONFIG_FLAT_NODE_MEM_MAP)
 void zero_resv_unavail(void);
 #else
 static inline void zero_resv_unavail(void) {}
index 99ce070e7dcb4fb14af6897d2804b671eb0ff4e2..efdc24dd9e97b87dd17b37e118a6ae61f3933683 100644 (file)
@@ -335,176 +335,183 @@ struct core_state {
 
 struct kioctx_table;
 struct mm_struct {
-       struct vm_area_struct *mmap;            /* list of VMAs */
-       struct rb_root mm_rb;
-       u32 vmacache_seqnum;                   /* per-thread vmacache */
+       struct {
+               struct vm_area_struct *mmap;            /* list of VMAs */
+               struct rb_root mm_rb;
+               u32 vmacache_seqnum;                   /* per-thread vmacache */
 #ifdef CONFIG_MMU
-       unsigned long (*get_unmapped_area) (struct file *filp,
+               unsigned long (*get_unmapped_area) (struct file *filp,
                                unsigned long addr, unsigned long len,
                                unsigned long pgoff, unsigned long flags);
 #endif
-       unsigned long mmap_base;                /* base of mmap area */
-       unsigned long mmap_legacy_base;         /* base of mmap area in bottom-up allocations */
+               unsigned long mmap_base;        /* base of mmap area */
+               unsigned long mmap_legacy_base; /* base of mmap area in bottom-up allocations */
 #ifdef CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES
-       /* Base adresses for compatible mmap() */
-       unsigned long mmap_compat_base;
-       unsigned long mmap_compat_legacy_base;
+               /* Base adresses for compatible mmap() */
+               unsigned long mmap_compat_base;
+               unsigned long mmap_compat_legacy_base;
 #endif
-       unsigned long task_size;                /* size of task vm space */
-       unsigned long highest_vm_end;           /* highest vma end address */
-       pgd_t * pgd;
-
-       /**
-        * @mm_users: The number of users including userspace.
-        *
-        * Use mmget()/mmget_not_zero()/mmput() to modify. When this drops
-        * to 0 (i.e. when the task exits and there are no other temporary
-        * reference holders), we also release a reference on @mm_count
-        * (which may then free the &struct mm_struct if @mm_count also
-        * drops to 0).
-        */
-       atomic_t mm_users;
-
-       /**
-        * @mm_count: The number of references to &struct mm_struct
-        * (@mm_users count as 1).
-        *
-        * Use mmgrab()/mmdrop() to modify. When this drops to 0, the
-        * &struct mm_struct is freed.
-        */
-       atomic_t mm_count;
+               unsigned long task_size;        /* size of task vm space */
+               unsigned long highest_vm_end;   /* highest vma end address */
+               pgd_t * pgd;
+
+               /**
+                * @mm_users: The number of users including userspace.
+                *
+                * Use mmget()/mmget_not_zero()/mmput() to modify. When this
+                * drops to 0 (i.e. when the task exits and there are no other
+                * temporary reference holders), we also release a reference on
+                * @mm_count (which may then free the &struct mm_struct if
+                * @mm_count also drops to 0).
+                */
+               atomic_t mm_users;
+
+               /**
+                * @mm_count: The number of references to &struct mm_struct
+                * (@mm_users count as 1).
+                *
+                * Use mmgrab()/mmdrop() to modify. When this drops to 0, the
+                * &struct mm_struct is freed.
+                */
+               atomic_t mm_count;
 
 #ifdef CONFIG_MMU
-       atomic_long_t pgtables_bytes;           /* PTE page table pages */
+               atomic_long_t pgtables_bytes;   /* PTE page table pages */
 #endif
-       int map_count;                          /* number of VMAs */
+               int map_count;                  /* number of VMAs */
 
-       spinlock_t page_table_lock;             /* Protects page tables and some counters */
-       struct rw_semaphore mmap_sem;
+               spinlock_t page_table_lock; /* Protects page tables and some
+                                            * counters
+                                            */
+               struct rw_semaphore mmap_sem;
 
-       struct list_head mmlist;                /* List of maybe swapped mm's.  These are globally strung
-                                                * together off init_mm.mmlist, and are protected
-                                                * by mmlist_lock
-                                                */
+               struct list_head mmlist; /* List of maybe swapped mm's. These
+                                         * are globally strung together off
+                                         * init_mm.mmlist, and are protected
+                                         * by mmlist_lock
+                                         */
 
 
-       unsigned long hiwater_rss;      /* High-watermark of RSS usage */
-       unsigned long hiwater_vm;       /* High-water virtual memory usage */
+               unsigned long hiwater_rss; /* High-watermark of RSS usage */
+               unsigned long hiwater_vm;  /* High-water virtual memory usage */
 
-       unsigned long total_vm;         /* Total pages mapped */
-       unsigned long locked_vm;        /* Pages that have PG_mlocked set */
-       unsigned long pinned_vm;        /* Refcount permanently increased */
-       unsigned long data_vm;          /* VM_WRITE & ~VM_SHARED & ~VM_STACK */
-       unsigned long exec_vm;          /* VM_EXEC & ~VM_WRITE & ~VM_STACK */
-       unsigned long stack_vm;         /* VM_STACK */
-       unsigned long def_flags;
+               unsigned long total_vm;    /* Total pages mapped */
+               unsigned long locked_vm;   /* Pages that have PG_mlocked set */
+               unsigned long pinned_vm;   /* Refcount permanently increased */
+               unsigned long data_vm;     /* VM_WRITE & ~VM_SHARED & ~VM_STACK */
+               unsigned long exec_vm;     /* VM_EXEC & ~VM_WRITE & ~VM_STACK */
+               unsigned long stack_vm;    /* VM_STACK */
+               unsigned long def_flags;
 
-       spinlock_t arg_lock; /* protect the below fields */
-       unsigned long start_code, end_code, start_data, end_data;
-       unsigned long start_brk, brk, start_stack;
-       unsigned long arg_start, arg_end, env_start, env_end;
+               spinlock_t arg_lock; /* protect the below fields */
+               unsigned long start_code, end_code, start_data, end_data;
+               unsigned long start_brk, brk, start_stack;
+               unsigned long arg_start, arg_end, env_start, env_end;
 
-       unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */
+               unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */
 
-       /*
-        * Special counters, in some configurations protected by the
-        * page_table_lock, in other configurations by being atomic.
-        */
-       struct mm_rss_stat rss_stat;
-
-       struct linux_binfmt *binfmt;
+               /*
+                * Special counters, in some configurations protected by the
+                * page_table_lock, in other configurations by being atomic.
+                */
+               struct mm_rss_stat rss_stat;
 
-       cpumask_var_t cpu_vm_mask_var;
+               struct linux_binfmt *binfmt;
 
-       /* Architecture-specific MM context */
-       mm_context_t context;
+               /* Architecture-specific MM context */
+               mm_context_t context;
 
-       unsigned long flags; /* Must use atomic bitops to access the bits */
+               unsigned long flags; /* Must use atomic bitops to access */
 
-       struct core_state *core_state; /* coredumping support */
+               struct core_state *core_state; /* coredumping support */
 #ifdef CONFIG_MEMBARRIER
-       atomic_t membarrier_state;
+               atomic_t membarrier_state;
 #endif
 #ifdef CONFIG_AIO
-       spinlock_t                      ioctx_lock;
-       struct kioctx_table __rcu       *ioctx_table;
+               spinlock_t                      ioctx_lock;
+               struct kioctx_table __rcu       *ioctx_table;
 #endif
 #ifdef CONFIG_MEMCG
-       /*
-        * "owner" points to a task that is regarded as the canonical
-        * user/owner of this mm. All of the following must be true in
-        * order for it to be changed:
-        *
-        * current == mm->owner
-        * current->mm != mm
-        * new_owner->mm == mm
-        * new_owner->alloc_lock is held
-        */
-       struct task_struct __rcu *owner;
+               /*
+                * "owner" points to a task that is regarded as the canonical
+                * user/owner of this mm. All of the following must be true in
+                * order for it to be changed:
+                *
+                * current == mm->owner
+                * current->mm != mm
+                * new_owner->mm == mm
+                * new_owner->alloc_lock is held
+                */
+               struct task_struct __rcu *owner;
 #endif
-       struct user_namespace *user_ns;
+               struct user_namespace *user_ns;
 
-       /* store ref to file /proc/<pid>/exe symlink points to */
-       struct file __rcu *exe_file;
+               /* store ref to file /proc/<pid>/exe symlink points to */
+               struct file __rcu *exe_file;
 #ifdef CONFIG_MMU_NOTIFIER
-       struct mmu_notifier_mm *mmu_notifier_mm;
+               struct mmu_notifier_mm *mmu_notifier_mm;
 #endif
 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
-       pgtable_t pmd_huge_pte; /* protected by page_table_lock */
-#endif
-#ifdef CONFIG_CPUMASK_OFFSTACK
-       struct cpumask cpumask_allocation;
+               pgtable_t pmd_huge_pte; /* protected by page_table_lock */
 #endif
 #ifdef CONFIG_NUMA_BALANCING
-       /*
-        * numa_next_scan is the next time that the PTEs will be marked
-        * pte_numa. NUMA hinting faults will gather statistics and migrate
-        * pages to new nodes if necessary.
-        */
-       unsigned long numa_next_scan;
+               /*
+                * numa_next_scan is the next time that the PTEs will be marked
+                * pte_numa. NUMA hinting faults will gather statistics and
+                * migrate pages to new nodes if necessary.
+                */
+               unsigned long numa_next_scan;
 
-       /* Restart point for scanning and setting pte_numa */
-       unsigned long numa_scan_offset;
+               /* Restart point for scanning and setting pte_numa */
+               unsigned long numa_scan_offset;
 
-       /* numa_scan_seq prevents two threads setting pte_numa */
-       int numa_scan_seq;
+               /* numa_scan_seq prevents two threads setting pte_numa */
+               int numa_scan_seq;
 #endif
-       /*
-        * An operation with batched TLB flushing is going on. Anything that
-        * can move process memory needs to flush the TLB when moving a
-        * PROT_NONE or PROT_NUMA mapped page.
-        */
-       atomic_t tlb_flush_pending;
+               /*
+                * An operation with batched TLB flushing is going on. Anything
+                * that can move process memory needs to flush the TLB when
+                * moving a PROT_NONE or PROT_NUMA mapped page.
+                */
+               atomic_t tlb_flush_pending;
 #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
-       /* See flush_tlb_batched_pending() */
-       bool tlb_flush_batched;
+               /* See flush_tlb_batched_pending() */
+               bool tlb_flush_batched;
 #endif
-       struct uprobes_state uprobes_state;
+               struct uprobes_state uprobes_state;
 #ifdef CONFIG_HUGETLB_PAGE
-       atomic_long_t hugetlb_usage;
+               atomic_long_t hugetlb_usage;
 #endif
-       struct work_struct async_put_work;
+               struct work_struct async_put_work;
 
 #if IS_ENABLED(CONFIG_HMM)
-       /* HMM needs to track a few things per mm */
-       struct hmm *hmm;
+               /* HMM needs to track a few things per mm */
+               struct hmm *hmm;
 #endif
-} __randomize_layout;
+       } __randomize_layout;
+
+       /*
+        * The mm_cpumask needs to be at the end of mm_struct, because it
+        * is dynamically sized based on nr_cpu_ids.
+        */
+       unsigned long cpu_bitmap[];
+};
 
 extern struct mm_struct init_mm;
 
+/* Pointer magic because the dynamic array size confuses some compilers. */
 static inline void mm_init_cpumask(struct mm_struct *mm)
 {
-#ifdef CONFIG_CPUMASK_OFFSTACK
-       mm->cpu_vm_mask_var = &mm->cpumask_allocation;
-#endif
-       cpumask_clear(mm->cpu_vm_mask_var);
+       unsigned long cpu_bitmap = (unsigned long)mm;
+
+       cpu_bitmap += offsetof(struct mm_struct, cpu_bitmap);
+       cpumask_clear((struct cpumask *)cpu_bitmap);
 }
 
 /* Future-safe accessor for struct mm_struct's cpu_vm_mask. */
 static inline cpumask_t *mm_cpumask(struct mm_struct *mm)
 {
-       return mm->cpu_vm_mask_var;
+       return (struct cpumask *)&mm->cpu_bitmap;
 }
 
 struct mmu_gather;
index 2014bd19f28eff41ae37b80eba324644c537e291..96a71a648eed991530489ecea56b89d8755b395c 100644 (file)
@@ -501,6 +501,7 @@ enum dmi_field {
        DMI_PRODUCT_VERSION,
        DMI_PRODUCT_SERIAL,
        DMI_PRODUCT_UUID,
+       DMI_PRODUCT_SKU,
        DMI_PRODUCT_FAMILY,
        DMI_BOARD_VENDOR,
        DMI_BOARD_NAME,
index 08b6eb964dd6865af3e1a7079a54b1e99f77e077..6554d3ba4396b3df49acac934ad16eeb71a695f4 100644 (file)
@@ -147,7 +147,6 @@ struct proto_ops {
        int             (*getname)   (struct socket *sock,
                                      struct sockaddr *addr,
                                      int peer);
-       __poll_t        (*poll_mask) (struct socket *sock, __poll_t events);
        __poll_t        (*poll)      (struct file *file, struct socket *sock,
                                      struct poll_table_struct *wait);
        int             (*ioctl)     (struct socket *sock, unsigned int cmd,
index 3ec9850c7936f01c0f7564dbe519e95ce0849639..3d0cc0b5cec2d7514dbebf32effab9b1e6388c3c 100644 (file)
@@ -2789,11 +2789,31 @@ static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff **pp,
        if (PTR_ERR(pp) != -EINPROGRESS)
                NAPI_GRO_CB(skb)->flush |= flush;
 }
+static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb,
+                                              struct sk_buff **pp,
+                                              int flush,
+                                              struct gro_remcsum *grc)
+{
+       if (PTR_ERR(pp) != -EINPROGRESS) {
+               NAPI_GRO_CB(skb)->flush |= flush;
+               skb_gro_remcsum_cleanup(skb, grc);
+               skb->remcsum_offload = 0;
+       }
+}
 #else
 static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff **pp, int flush)
 {
        NAPI_GRO_CB(skb)->flush |= flush;
 }
+static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb,
+                                              struct sk_buff **pp,
+                                              int flush,
+                                              struct gro_remcsum *grc)
+{
+       NAPI_GRO_CB(skb)->flush |= flush;
+       skb_gro_remcsum_cleanup(skb, grc);
+       skb->remcsum_offload = 0;
+}
 #endif
 
 static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
index 9dee3c23895d82fae05025961fe83d15b23d45b7..712eed156d0912f1aecc97de222597f1d7cc5dc9 100644 (file)
@@ -1438,6 +1438,8 @@ enum {
        NFS_IOHDR_EOF,
        NFS_IOHDR_REDO,
        NFS_IOHDR_STAT,
+       NFS_IOHDR_RESEND_PNFS,
+       NFS_IOHDR_RESEND_MDS,
 };
 
 struct nfs_io_completion;
index b8d868d23e797b3cd80303c9c8f531300e4ee62b..08f9247e9827e0056eb4d82c5c83a73a19cebd11 100644 (file)
@@ -45,12 +45,18 @@ extern void touch_softlockup_watchdog(void);
 extern void touch_softlockup_watchdog_sync(void);
 extern void touch_all_softlockup_watchdogs(void);
 extern unsigned int  softlockup_panic;
-#else
+
+extern int lockup_detector_online_cpu(unsigned int cpu);
+extern int lockup_detector_offline_cpu(unsigned int cpu);
+#else /* CONFIG_SOFTLOCKUP_DETECTOR */
 static inline void touch_softlockup_watchdog_sched(void) { }
 static inline void touch_softlockup_watchdog(void) { }
 static inline void touch_softlockup_watchdog_sync(void) { }
 static inline void touch_all_softlockup_watchdogs(void) { }
-#endif
+
+#define lockup_detector_online_cpu     NULL
+#define lockup_detector_offline_cpu    NULL
+#endif /* CONFIG_SOFTLOCKUP_DETECTOR */
 
 #ifdef CONFIG_DETECT_HUNG_TASK
 void reset_hung_task_detector(void);
index 340029b2fb382cc15888d72fde0bf5a069a6467c..c133ccfa002e17362288eab00b11bc04895c6805 100644 (file)
@@ -368,7 +368,6 @@ struct pci_dev {
        unsigned int    transparent:1;          /* Subtractive decode bridge */
        unsigned int    multifunction:1;        /* Multi-function device */
 
-       unsigned int    is_added:1;
        unsigned int    is_busmaster:1;         /* Is busmaster */
        unsigned int    no_msi:1;               /* May not use MSI */
        unsigned int    no_64bit_msi:1;         /* May only use 32-bit MSIs */
@@ -1240,6 +1239,8 @@ int pci_register_io_range(struct fwnode_handle *fwnode, phys_addr_t addr,
 unsigned long pci_address_to_pio(phys_addr_t addr);
 phys_addr_t pci_pio_to_address(unsigned long pio);
 int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr);
+int devm_pci_remap_iospace(struct device *dev, const struct resource *res,
+                          phys_addr_t phys_addr);
 void pci_unmap_iospace(struct resource *res);
 void __iomem *devm_pci_remap_cfgspace(struct device *dev,
                                      resource_size_t offset,
index 1fa12887ec020568eaca8ba8b0e334f4471c20f3..53c500f0ca795e8e6cb22245bdcf12f1b899b336 100644 (file)
@@ -490,7 +490,7 @@ struct perf_addr_filters_head {
 };
 
 /**
- * enum perf_event_state - the states of a event
+ * enum perf_event_state - the states of an event:
  */
 enum perf_event_state {
        PERF_EVENT_STATE_DEAD           = -4,
@@ -1130,6 +1130,7 @@ extern void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct
 extern struct perf_callchain_entry *
 get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user,
                   u32 max_stack, bool crosstask, bool add_mark);
+extern struct perf_callchain_entry *perf_callchain(struct perf_event *event, struct pt_regs *regs);
 extern int get_callchain_buffers(int max_stack);
 extern void put_callchain_buffers(void);
 
index 9206a4fef9ac151905a825700c6ae7477d7cbd88..cb8d84090cfb7adb478d156727279aa48686d816 100644 (file)
@@ -234,7 +234,7 @@ struct generic_pm_domain *of_genpd_remove_last(struct device_node *np);
 int of_genpd_parse_idle_states(struct device_node *dn,
                               struct genpd_power_state **states, int *n);
 unsigned int of_genpd_opp_to_performance_state(struct device *dev,
-                               struct device_node *opp_node);
+                               struct device_node *np);
 
 int genpd_dev_pm_attach(struct device *dev);
 struct device *genpd_dev_pm_attach_by_id(struct device *dev,
@@ -274,9 +274,9 @@ static inline int of_genpd_parse_idle_states(struct device_node *dn,
 
 static inline unsigned int
 of_genpd_opp_to_performance_state(struct device *dev,
-                                 struct device_node *opp_node)
+                                 struct device_node *np)
 {
-       return -ENODEV;
+       return 0;
 }
 
 static inline int genpd_dev_pm_attach(struct device *dev)
index fdf86b4cbc71bacca2795107532fb75e3855c0c9..7e0fdcf905d2e77b355c94a7381446927452723c 100644 (file)
@@ -74,18 +74,18 @@ static inline void init_poll_funcptr(poll_table *pt, poll_queue_proc qproc)
        pt->_key   = ~(__poll_t)0; /* all events enabled */
 }
 
-static inline bool file_has_poll_mask(struct file *file)
+static inline bool file_can_poll(struct file *file)
 {
-       return file->f_op->get_poll_head && file->f_op->poll_mask;
+       return file->f_op->poll;
 }
 
-static inline bool file_can_poll(struct file *file)
+static inline __poll_t vfs_poll(struct file *file, struct poll_table_struct *pt)
 {
-       return file->f_op->poll || file_has_poll_mask(file);
+       if (unlikely(!file->f_op->poll))
+               return DEFAULT_POLLMASK;
+       return file->f_op->poll(file, pt);
 }
 
-__poll_t vfs_poll(struct file *file, struct poll_table_struct *pt);
-
 struct poll_table_entry {
        struct file *filp;
        __poll_t key;
index c85704fcdbd2189b517f407ad871b02055924df5..ee7e987ea1b4354ef93a149c997caf75b6269c1b 100644 (file)
@@ -95,8 +95,8 @@ struct k_itimer {
        clockid_t               it_clock;
        timer_t                 it_id;
        int                     it_active;
-       int                     it_overrun;
-       int                     it_overrun_last;
+       s64                     it_overrun;
+       s64                     it_overrun_last;
        int                     it_requeue_pending;
        int                     it_sigev_notify;
        ktime_t                 it_interval;
index 0174883a935a25ddef6b33ad4765c65f0f3b25c4..1a941efcaa6223a55a8bfcceeb7fb798bbbb1bb4 100644 (file)
@@ -6,6 +6,7 @@
 #include <asm/pti.h>
 #else
 static inline void pti_init(void) { }
+static inline void pti_finalize(void) { }
 #endif
 
 #endif
index 36df6ccbc874b6655fa1ac64ebdcaff150d26215..4786c2235b98124919afda4c7203d59c10200702 100644 (file)
@@ -396,7 +396,16 @@ static inline void list_splice_tail_init_rcu(struct list_head *list,
  * @member:    the name of the list_head within the struct.
  *
  * Continue to iterate over list of given type, continuing after
- * the current position.
+ * the current position which must have been in the list when the RCU read
+ * lock was taken.
+ * This would typically require either that you obtained the node from a
+ * previous walk of the list in the same RCU read-side critical section, or
+ * that you held some sort of non-RCU reference (such as a reference count)
+ * to keep the node alive *and* in the list.
+ *
+ * This iterator is similar to list_for_each_entry_from_rcu() except
+ * this starts after the given position and that one starts at the given
+ * position.
  */
 #define list_for_each_entry_continue_rcu(pos, head, member)            \
        for (pos = list_entry_rcu(pos->member.next, typeof(*pos), member); \
@@ -411,6 +420,14 @@ static inline void list_splice_tail_init_rcu(struct list_head *list,
  *
  * Iterate over the tail of a list starting from a given position,
  * which must have been in the list when the RCU read lock was taken.
+ * This would typically require either that you obtained the node from a
+ * previous walk of the list in the same RCU read-side critical section, or
+ * that you held some sort of non-RCU reference (such as a reference count)
+ * to keep the node alive *and* in the list.
+ *
+ * This iterator is similar to list_for_each_entry_continue_rcu() except
+ * this starts from the given position and that one starts from the position
+ * after the given position.
  */
 #define list_for_each_entry_from_rcu(pos, head, member)                        \
        for (; &(pos)->member != (head);                                        \
index 65163aa0bb04ffa7e0b147be32d70965b84589d7..75e5b393cf4408bca6c7dad1a55ff5f585015570 100644 (file)
@@ -64,7 +64,6 @@ void rcu_barrier_tasks(void);
 
 void __rcu_read_lock(void);
 void __rcu_read_unlock(void);
-void rcu_read_unlock_special(struct task_struct *t);
 void synchronize_rcu(void);
 
 /*
@@ -159,11 +158,11 @@ static inline void rcu_init_nohz(void) { }
        } while (0)
 
 /*
- * Note a voluntary context switch for RCU-tasks benefit.  This is a
- * macro rather than an inline function to avoid #include hell.
+ * Note a quasi-voluntary context switch for RCU-tasks's benefit.
+ * This is a macro rather than an inline function to avoid #include hell.
  */
 #ifdef CONFIG_TASKS_RCU
-#define rcu_note_voluntary_context_switch_lite(t) \
+#define rcu_tasks_qs(t) \
        do { \
                if (READ_ONCE((t)->rcu_tasks_holdout)) \
                        WRITE_ONCE((t)->rcu_tasks_holdout, false); \
@@ -171,14 +170,14 @@ static inline void rcu_init_nohz(void) { }
 #define rcu_note_voluntary_context_switch(t) \
        do { \
                rcu_all_qs(); \
-               rcu_note_voluntary_context_switch_lite(t); \
+               rcu_tasks_qs(t); \
        } while (0)
 void call_rcu_tasks(struct rcu_head *head, rcu_callback_t func);
 void synchronize_rcu_tasks(void);
 void exit_tasks_rcu_start(void);
 void exit_tasks_rcu_finish(void);
 #else /* #ifdef CONFIG_TASKS_RCU */
-#define rcu_note_voluntary_context_switch_lite(t)      do { } while (0)
+#define rcu_tasks_qs(t)        do { } while (0)
 #define rcu_note_voluntary_context_switch(t)           rcu_all_qs()
 #define call_rcu_tasks call_rcu_sched
 #define synchronize_rcu_tasks synchronize_sched
@@ -195,8 +194,8 @@ static inline void exit_tasks_rcu_finish(void) { }
  */
 #define cond_resched_tasks_rcu_qs() \
 do { \
-       if (!cond_resched()) \
-               rcu_note_voluntary_context_switch_lite(current); \
+       rcu_tasks_qs(current); \
+       cond_resched(); \
 } while (0)
 
 /*
@@ -567,8 +566,8 @@ static inline void rcu_preempt_sleep_check(void) { }
  * This is simply an identity function, but it documents where a pointer
  * is handed off from RCU to some other synchronization mechanism, for
  * example, reference counting or locking.  In C11, it would map to
- * kill_dependency().  It could be used as follows:
- * ``
+ * kill_dependency().  It could be used as follows::
+ *
  *     rcu_read_lock();
  *     p = rcu_dereference(gp);
  *     long_lived = is_long_lived(p);
@@ -579,7 +578,6 @@ static inline void rcu_preempt_sleep_check(void) { }
  *                     p = rcu_pointer_handoff(p);
  *     }
  *     rcu_read_unlock();
- *``
  */
 #define rcu_pointer_handoff(p) (p)
 
index 7b3c82e8a625dcdb421bd0f2ac97316e3fd1d4be..8d9a0ea8f0b5be65dd78e9c662204dc112b9388f 100644 (file)
@@ -93,7 +93,7 @@ static inline void kfree_call_rcu(struct rcu_head *head,
 #define rcu_note_context_switch(preempt) \
        do { \
                rcu_sched_qs(); \
-               rcu_note_voluntary_context_switch_lite(current); \
+               rcu_tasks_qs(current); \
        } while (0)
 
 static inline int rcu_needs_cpu(u64 basemono, u64 *nextevt)
index 4193c41e383a897273605aac39f331b46512691a..e28cce21bad6cc1692b9624ccab74b93e448fd3e 100644 (file)
@@ -3,9 +3,10 @@
 #define _LINUX_REFCOUNT_H
 
 #include <linux/atomic.h>
-#include <linux/mutex.h>
-#include <linux/spinlock.h>
-#include <linux/kernel.h>
+#include <linux/compiler.h>
+#include <linux/spinlock_types.h>
+
+struct mutex;
 
 /**
  * struct refcount_t - variant of atomic_t specialized for reference counts
@@ -42,17 +43,30 @@ static inline unsigned int refcount_read(const refcount_t *r)
        return atomic_read(&r->refs);
 }
 
+extern __must_check bool refcount_add_not_zero_checked(unsigned int i, refcount_t *r);
+extern void refcount_add_checked(unsigned int i, refcount_t *r);
+
+extern __must_check bool refcount_inc_not_zero_checked(refcount_t *r);
+extern void refcount_inc_checked(refcount_t *r);
+
+extern __must_check bool refcount_sub_and_test_checked(unsigned int i, refcount_t *r);
+
+extern __must_check bool refcount_dec_and_test_checked(refcount_t *r);
+extern void refcount_dec_checked(refcount_t *r);
+
 #ifdef CONFIG_REFCOUNT_FULL
-extern __must_check bool refcount_add_not_zero(unsigned int i, refcount_t *r);
-extern void refcount_add(unsigned int i, refcount_t *r);
 
-extern __must_check bool refcount_inc_not_zero(refcount_t *r);
-extern void refcount_inc(refcount_t *r);
+#define refcount_add_not_zero  refcount_add_not_zero_checked
+#define refcount_add           refcount_add_checked
+
+#define refcount_inc_not_zero  refcount_inc_not_zero_checked
+#define refcount_inc           refcount_inc_checked
 
-extern __must_check bool refcount_sub_and_test(unsigned int i, refcount_t *r);
+#define refcount_sub_and_test  refcount_sub_and_test_checked
+
+#define refcount_dec_and_test  refcount_dec_and_test_checked
+#define refcount_dec           refcount_dec_checked
 
-extern __must_check bool refcount_dec_and_test(refcount_t *r);
-extern void refcount_dec(refcount_t *r);
 #else
 # ifdef CONFIG_ARCH_HAS_REFCOUNT
 #  include <asm/refcount.h>
@@ -98,5 +112,7 @@ extern __must_check bool refcount_dec_if_one(refcount_t *r);
 extern __must_check bool refcount_dec_not_one(refcount_t *r);
 extern __must_check bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock);
 extern __must_check bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock);
-
+extern __must_check bool refcount_dec_and_lock_irqsave(refcount_t *r,
+                                                      spinlock_t *lock,
+                                                      unsigned long *flags);
 #endif /* _LINUX_REFCOUNT_H */
index b72ebdff0b77619ceba8d05a72c5db1f66772f8d..003d09ab308d99681a56e01b0f3bd7f3c8cce4a0 100644 (file)
@@ -165,6 +165,7 @@ void ring_buffer_record_enable(struct ring_buffer *buffer);
 void ring_buffer_record_off(struct ring_buffer *buffer);
 void ring_buffer_record_on(struct ring_buffer *buffer);
 int ring_buffer_record_is_on(struct ring_buffer *buffer);
+int ring_buffer_record_is_set_on(struct ring_buffer *buffer);
 void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu);
 void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu);
 
index 64125443f8a638e787adfbaebab4755f5d63852a..5ef5c7c412a75b5f24e276563d278be8ced3b212 100644 (file)
@@ -354,6 +354,8 @@ struct rmi_driver_data {
        struct mutex irq_mutex;
        struct input_dev *input;
 
+       struct irq_domain *irqdomain;
+
        u8 pdt_props;
 
        u8 num_rx_electrodes;
index 1b92a28dd672ba99c8d89ba4e042f64776b2ad6f..6fd615a0eea94278fa1d6d9a1ae711f4b60e347a 100644 (file)
@@ -106,7 +106,14 @@ static inline int rt_mutex_is_locked(struct rt_mutex *lock)
 extern void __rt_mutex_init(struct rt_mutex *lock, const char *name, struct lock_class_key *key);
 extern void rt_mutex_destroy(struct rt_mutex *lock);
 
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+extern void rt_mutex_lock_nested(struct rt_mutex *lock, unsigned int subclass);
+#define rt_mutex_lock(lock) rt_mutex_lock_nested(lock, 0)
+#else
 extern void rt_mutex_lock(struct rt_mutex *lock);
+#define rt_mutex_lock_nested(lock, subclass) rt_mutex_lock(lock)
+#endif
+
 extern int rt_mutex_lock_interruptible(struct rt_mutex *lock);
 extern int rt_mutex_timed_lock(struct rt_mutex *lock,
                               struct hrtimer_sleeper *timeout);
index 51f52020ad5fdd44ab4fdfa6ad2e0063c4780947..093aa57120b0cf1f40c2a75f28612331c6e6f6e0 100644 (file)
@@ -9,9 +9,6 @@
 #include <asm/io.h>
 
 struct scatterlist {
-#ifdef CONFIG_DEBUG_SG
-       unsigned long   sg_magic;
-#endif
        unsigned long   page_link;
        unsigned int    offset;
        unsigned int    length;
@@ -64,7 +61,6 @@ struct sg_table {
  *
  */
 
-#define SG_MAGIC       0x87654321
 #define SG_CHAIN       0x01UL
 #define SG_END         0x02UL
 
@@ -98,7 +94,6 @@ static inline void sg_assign_page(struct scatterlist *sg, struct page *page)
         */
        BUG_ON((unsigned long) page & (SG_CHAIN | SG_END));
 #ifdef CONFIG_DEBUG_SG
-       BUG_ON(sg->sg_magic != SG_MAGIC);
        BUG_ON(sg_is_chain(sg));
 #endif
        sg->page_link = page_link | (unsigned long) page;
@@ -129,7 +124,6 @@ static inline void sg_set_page(struct scatterlist *sg, struct page *page,
 static inline struct page *sg_page(struct scatterlist *sg)
 {
 #ifdef CONFIG_DEBUG_SG
-       BUG_ON(sg->sg_magic != SG_MAGIC);
        BUG_ON(sg_is_chain(sg));
 #endif
        return (struct page *)((sg)->page_link & ~(SG_CHAIN | SG_END));
@@ -195,9 +189,6 @@ static inline void sg_chain(struct scatterlist *prv, unsigned int prv_nents,
  **/
 static inline void sg_mark_end(struct scatterlist *sg)
 {
-#ifdef CONFIG_DEBUG_SG
-       BUG_ON(sg->sg_magic != SG_MAGIC);
-#endif
        /*
         * Set termination bit, clear potential chain bit
         */
@@ -215,9 +206,6 @@ static inline void sg_mark_end(struct scatterlist *sg)
  **/
 static inline void sg_unmark_end(struct scatterlist *sg)
 {
-#ifdef CONFIG_DEBUG_SG
-       BUG_ON(sg->sg_magic != SG_MAGIC);
-#endif
        sg->page_link &= ~SG_END;
 }
 
@@ -260,12 +248,6 @@ static inline void *sg_virt(struct scatterlist *sg)
 static inline void sg_init_marker(struct scatterlist *sgl,
                                  unsigned int nents)
 {
-#ifdef CONFIG_DEBUG_SG
-       unsigned int i;
-
-       for (i = 0; i < nents; i++)
-               sgl[i].sg_magic = SG_MAGIC;
-#endif
        sg_mark_end(&sgl[nents - 1]);
 }
 
index 87bf02d93a279a9b98df452c7ad78a0b54adc1db..dac5086e381520855b4666afee144a0343c6a416 100644 (file)
@@ -118,7 +118,7 @@ struct task_group;
  * the comment with set_special_state().
  */
 #define is_special_task_state(state)                           \
-       ((state) & (__TASK_STOPPED | __TASK_TRACED | TASK_DEAD))
+       ((state) & (__TASK_STOPPED | __TASK_TRACED | TASK_PARKED | TASK_DEAD))
 
 #define __set_current_state(state_value)                       \
        do {                                                    \
@@ -167,8 +167,8 @@ struct task_group;
  *   need_sleep = false;
  *   wake_up_state(p, TASK_UNINTERRUPTIBLE);
  *
- * Where wake_up_state() (and all other wakeup primitives) imply enough
- * barriers to order the store of the variable against wakeup.
+ * where wake_up_state() executes a full memory barrier before accessing the
+ * task state.
  *
  * Wakeup will do: if (@state & p->state) p->state = TASK_RUNNING, that is,
  * once it observes the TASK_UNINTERRUPTIBLE store the waking CPU can issue a
@@ -1017,7 +1017,6 @@ struct task_struct {
        u64                             last_sum_exec_runtime;
        struct callback_head            numa_work;
 
-       struct list_head                numa_entry;
        struct numa_group               *numa_group;
 
        /*
@@ -1799,20 +1798,22 @@ static inline void rseq_set_notify_resume(struct task_struct *t)
                set_tsk_thread_flag(t, TIF_NOTIFY_RESUME);
 }
 
-void __rseq_handle_notify_resume(struct pt_regs *regs);
+void __rseq_handle_notify_resume(struct ksignal *sig, struct pt_regs *regs);
 
-static inline void rseq_handle_notify_resume(struct pt_regs *regs)
+static inline void rseq_handle_notify_resume(struct ksignal *ksig,
+                                            struct pt_regs *regs)
 {
        if (current->rseq)
-               __rseq_handle_notify_resume(regs);
+               __rseq_handle_notify_resume(ksig, regs);
 }
 
-static inline void rseq_signal_deliver(struct pt_regs *regs)
+static inline void rseq_signal_deliver(struct ksignal *ksig,
+                                      struct pt_regs *regs)
 {
        preempt_disable();
        __set_bit(RSEQ_EVENT_SIGNAL_BIT, &current->rseq_event_mask);
        preempt_enable();
-       rseq_handle_notify_resume(regs);
+       rseq_handle_notify_resume(ksig, regs);
 }
 
 /* rseq_preempt() requires preemption to be disabled. */
@@ -1831,9 +1832,7 @@ static inline void rseq_migrate(struct task_struct *t)
 
 /*
  * If parent process has a registered restartable sequences area, the
- * child inherits. Only applies when forking a process, not a thread. In
- * case a parent fork() in the middle of a restartable sequence, set the
- * resume notifier to force the child to retry.
+ * child inherits. Only applies when forking a process, not a thread.
  */
 static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags)
 {
@@ -1847,7 +1846,6 @@ static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags)
                t->rseq_len = current->rseq_len;
                t->rseq_sig = current->rseq_sig;
                t->rseq_event_mask = current->rseq_event_mask;
-               rseq_preempt(t);
        }
 }
 
@@ -1864,10 +1862,12 @@ static inline void rseq_execve(struct task_struct *t)
 static inline void rseq_set_notify_resume(struct task_struct *t)
 {
 }
-static inline void rseq_handle_notify_resume(struct pt_regs *regs)
+static inline void rseq_handle_notify_resume(struct ksignal *ksig,
+                                            struct pt_regs *regs)
 {
 }
-static inline void rseq_signal_deliver(struct pt_regs *regs)
+static inline void rseq_signal_deliver(struct ksignal *ksig,
+                                      struct pt_regs *regs)
 {
 }
 static inline void rseq_preempt(struct task_struct *t)
index 1c1a1512ec553fdbda5e7651d349737495411e2c..913488d828cb609e15d3bb4f379b49f60f62a659 100644 (file)
@@ -40,7 +40,6 @@ extern unsigned int sysctl_numa_balancing_scan_size;
 #ifdef CONFIG_SCHED_DEBUG
 extern __read_mostly unsigned int sysctl_sched_migration_cost;
 extern __read_mostly unsigned int sysctl_sched_nr_migrate;
-extern __read_mostly unsigned int sysctl_sched_time_avg;
 
 int sched_proc_update_handler(struct ctl_table *table, int write,
                void __user *buffer, size_t *length,
index 5be31eb7b26647ceb8d9f34298db36ebe6b4da20..108ede99e5335033526f754ba640f8fd0205f457 100644 (file)
@@ -75,7 +75,7 @@ extern long _do_fork(unsigned long, unsigned long, unsigned long, int __user *,
 extern long do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *);
 struct task_struct *fork_idle(int);
 extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
-extern long kernel_wait4(pid_t, int *, int, struct rusage *);
+extern long kernel_wait4(pid_t, int __user *, int, struct rusage *);
 
 extern void free_task(struct task_struct *tsk);
 
index 411b52e424e1b2178b961bbec2081c3a0e58affd..abe28d5cb3f4e8096f59778e6c2edb564c3bed71 100644 (file)
@@ -9,17 +9,16 @@
 #define LINUX_SCHED_CLOCK
 
 #ifdef CONFIG_GENERIC_SCHED_CLOCK
-extern void sched_clock_postinit(void);
+extern void generic_sched_clock_init(void);
 
 extern void sched_clock_register(u64 (*read)(void), int bits,
                                 unsigned long rate);
 #else
-static inline void sched_clock_postinit(void) { }
+static inline void generic_sched_clock_init(void) { }
 
 static inline void sched_clock_register(u64 (*read)(void), int bits,
                                        unsigned long rate)
 {
-       ;
 }
 #endif
 
index 63030c85ee1927ae0de30a07148fa511e19231aa..88d30fc975e74de87422011d2554ce74298fec94 100644 (file)
@@ -309,7 +309,7 @@ void security_file_set_fowner(struct file *file);
 int security_file_send_sigiotask(struct task_struct *tsk,
                                 struct fown_struct *fown, int sig);
 int security_file_receive(struct file *file);
-int security_file_open(struct file *file, const struct cred *cred);
+int security_file_open(struct file *file);
 int security_task_alloc(struct task_struct *task, unsigned long clone_flags);
 void security_task_free(struct task_struct *task);
 int security_cred_alloc_blank(struct cred *cred, gfp_t gfp);
@@ -858,8 +858,7 @@ static inline int security_file_receive(struct file *file)
        return 0;
 }
 
-static inline int security_file_open(struct file *file,
-                                    const struct cred *cred)
+static inline int security_file_open(struct file *file)
 {
        return 0;
 }
index c8688595499421d9f051366d4a85e5553751768e..610a201126ee031166798baaf8ecae74fe478c4d 100644 (file)
@@ -630,6 +630,7 @@ typedef unsigned char *sk_buff_data_t;
  *     @hash: the packet hash
  *     @queue_mapping: Queue mapping for multiqueue devices
  *     @xmit_more: More SKBs are pending for this queue
+ *     @pfmemalloc: skbuff was allocated from PFMEMALLOC reserves
  *     @ndisc_nodetype: router type (from link layer)
  *     @ooo_okay: allow the mapping of a socket to a queue to be changed
  *     @l4_hash: indicate hash is a canonical 4-tuple hash over transport
@@ -735,7 +736,7 @@ struct sk_buff {
                                peeked:1,
                                head_frag:1,
                                xmit_more:1,
-                               __unused:1; /* one bit hole */
+                               pfmemalloc:1;
 
        /* fields enclosed in headers_start/headers_end are copied
         * using a single memcpy() in __copy_skb_header()
@@ -754,31 +755,30 @@ struct sk_buff {
 
        __u8                    __pkt_type_offset[0];
        __u8                    pkt_type:3;
-       __u8                    pfmemalloc:1;
        __u8                    ignore_df:1;
-
        __u8                    nf_trace:1;
        __u8                    ip_summed:2;
        __u8                    ooo_okay:1;
+
        __u8                    l4_hash:1;
        __u8                    sw_hash:1;
        __u8                    wifi_acked_valid:1;
        __u8                    wifi_acked:1;
-
        __u8                    no_fcs:1;
        /* Indicates the inner headers are valid in the skbuff. */
        __u8                    encapsulation:1;
        __u8                    encap_hdr_csum:1;
        __u8                    csum_valid:1;
+
        __u8                    csum_complete_sw:1;
        __u8                    csum_level:2;
        __u8                    csum_not_inet:1;
-
        __u8                    dst_pending_confirm:1;
 #ifdef CONFIG_IPV6_NDISC_NODETYPE
        __u8                    ndisc_nodetype:2;
 #endif
        __u8                    ipvs_property:1;
+
        __u8                    inner_protocol_type:1;
        __u8                    remcsum_offload:1;
 #ifdef CONFIG_NET_SWITCHDEV
@@ -3252,7 +3252,8 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
                                    int *peeked, int *off, int *err);
 struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
                                  int *err);
-__poll_t datagram_poll_mask(struct socket *sock, __poll_t events);
+__poll_t datagram_poll(struct file *file, struct socket *sock,
+                          struct poll_table_struct *wait);
 int skb_copy_datagram_iter(const struct sk_buff *from, int offset,
                           struct iov_iter *to, int size);
 static inline int skb_copy_datagram_msg(const struct sk_buff *from, int offset,
index 09fa2c6f0e68e69567b8b918cdda3f13f5ddaefa..3a1a1dbc6f49479f61f4c1a6588e6a672f0b0663 100644 (file)
@@ -155,8 +155,12 @@ struct kmem_cache {
 
 #ifdef CONFIG_SYSFS
 #define SLAB_SUPPORTS_SYSFS
+void sysfs_slab_unlink(struct kmem_cache *);
 void sysfs_slab_release(struct kmem_cache *);
 #else
+static inline void sysfs_slab_unlink(struct kmem_cache *s)
+{
+}
 static inline void sysfs_slab_release(struct kmem_cache *s)
 {
 }
index c174844cf663eaf7df5ab1f3d856919d557aac4b..d0884b52500100b92ae4829e070ab3a9993af4b3 100644 (file)
@@ -25,8 +25,6 @@ struct smpboot_thread_data;
  *                     parked (cpu offline)
  * @unpark:            Optional unpark function, called when the thread is
  *                     unparked (cpu online)
- * @cpumask:           Internal state.  To update which threads are unparked,
- *                     call smpboot_update_cpumask_percpu_thread().
  * @selfparking:       Thread is not parked by the park function.
  * @thread_comm:       The base name of the thread
  */
@@ -40,23 +38,12 @@ struct smp_hotplug_thread {
        void                            (*cleanup)(unsigned int cpu, bool online);
        void                            (*park)(unsigned int cpu);
        void                            (*unpark)(unsigned int cpu);
-       cpumask_var_t                   cpumask;
        bool                            selfparking;
        const char                      *thread_comm;
 };
 
-int smpboot_register_percpu_thread_cpumask(struct smp_hotplug_thread *plug_thread,
-                                          const struct cpumask *cpumask);
-
-static inline int
-smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread)
-{
-       return smpboot_register_percpu_thread_cpumask(plug_thread,
-                                                     cpu_possible_mask);
-}
+int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread);
 
 void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread);
-void smpboot_update_cpumask_percpu_thread(struct smp_hotplug_thread *plug_thread,
-                                         const struct cpumask *);
 
 #endif
index 1e8a46435838456ae57af232664ce9686a4ac5c0..3190997df9cac8dd931c904cc112442a172d5888 100644 (file)
@@ -114,29 +114,48 @@ do {                                                              \
 #endif /*arch_spin_is_contended*/
 
 /*
- * This barrier must provide two things:
+ * smp_mb__after_spinlock() provides the equivalent of a full memory barrier
+ * between program-order earlier lock acquisitions and program-order later
+ * memory accesses.
  *
- *   - it must guarantee a STORE before the spin_lock() is ordered against a
- *     LOAD after it, see the comments at its two usage sites.
+ * This guarantees that the following two properties hold:
  *
- *   - it must ensure the critical section is RCsc.
+ *   1) Given the snippet:
  *
- * The latter is important for cases where we observe values written by other
- * CPUs in spin-loops, without barriers, while being subject to scheduling.
+ *       { X = 0;  Y = 0; }
  *
- * CPU0                        CPU1                    CPU2
+ *       CPU0                          CPU1
  *
- *                     for (;;) {
- *                       if (READ_ONCE(X))
- *                         break;
- *                     }
- * X=1
- *                     <sched-out>
- *                                             <sched-in>
- *                                             r = X;
+ *       WRITE_ONCE(X, 1);             WRITE_ONCE(Y, 1);
+ *       spin_lock(S);                 smp_mb();
+ *       smp_mb__after_spinlock();     r1 = READ_ONCE(X);
+ *       r0 = READ_ONCE(Y);
+ *       spin_unlock(S);
  *
- * without transitivity it could be that CPU1 observes X!=0 breaks the loop,
- * we get migrated and CPU2 sees X==0.
+ *      it is forbidden that CPU0 does not observe CPU1's store to Y (r0 = 0)
+ *      and CPU1 does not observe CPU0's store to X (r1 = 0); see the comments
+ *      preceding the call to smp_mb__after_spinlock() in __schedule() and in
+ *      try_to_wake_up().
+ *
+ *   2) Given the snippet:
+ *
+ *  { X = 0;  Y = 0; }
+ *
+ *  CPU0               CPU1                            CPU2
+ *
+ *  spin_lock(S);      spin_lock(S);                   r1 = READ_ONCE(Y);
+ *  WRITE_ONCE(X, 1);  smp_mb__after_spinlock();       smp_rmb();
+ *  spin_unlock(S);    r0 = READ_ONCE(X);              r2 = READ_ONCE(X);
+ *                     WRITE_ONCE(Y, 1);
+ *                     spin_unlock(S);
+ *
+ *      it is forbidden that CPU0's critical section executes before CPU1's
+ *      critical section (r0 = 1), CPU2 observes CPU1's store to Y (r1 = 1)
+ *      and CPU2 does not observe CPU0's store to X (r2 = 0); see the comments
+ *      preceding the calls to smp_rmb() in try_to_wake_up() for similar
+ *      snippets but "projected" onto two CPUs.
+ *
+ * Property (2) upgrades the lock to an RCsc lock.
  *
  * Since most load-store architectures implement ACQUIRE with an smp_mb() after
  * the LL/SC loop, they need no further barriers. Similarly all our TSO
@@ -427,6 +446,11 @@ extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
 #define atomic_dec_and_lock(atomic, lock) \
                __cond_lock(lock, _atomic_dec_and_lock(atomic, lock))
 
+extern int _atomic_dec_and_lock_irqsave(atomic_t *atomic, spinlock_t *lock,
+                                       unsigned long *flags);
+#define atomic_dec_and_lock_irqsave(atomic, lock, flags) \
+               __cond_lock(lock, _atomic_dec_and_lock_irqsave(atomic, lock, &(flags)))
+
 int alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *lock_mask,
                           size_t max_size, unsigned int cpu_mult,
                           gfp_t gfp);
index 91494d7e8e41276def7f4f28768f516ca78fd8cc..3e72a291c40142d45ae4ee9722239ac8ad2896b0 100644 (file)
@@ -195,6 +195,16 @@ static inline int srcu_read_lock(struct srcu_struct *sp) __acquires(sp)
        return retval;
 }
 
+/* Used by tracing, cannot be traced and cannot invoke lockdep. */
+static inline notrace int
+srcu_read_lock_notrace(struct srcu_struct *sp) __acquires(sp)
+{
+       int retval;
+
+       retval = __srcu_read_lock(sp);
+       return retval;
+}
+
 /**
  * srcu_read_unlock - unregister a old reader from an SRCU-protected structure.
  * @sp: srcu_struct in which to unregister the old reader.
@@ -209,6 +219,13 @@ static inline void srcu_read_unlock(struct srcu_struct *sp, int idx)
        __srcu_read_unlock(sp, idx);
 }
 
+/* Used by tracing, cannot be traced and cannot call lockdep. */
+static inline notrace void
+srcu_read_unlock_notrace(struct srcu_struct *sp, int idx) __releases(sp)
+{
+       __srcu_read_unlock(sp, idx);
+}
+
 /**
  * smp_mb__after_srcu_read_unlock - ensure full ordering after srcu_read_unlock
  *
index bf8cb0dee23c01ed150f081ffd95861b8d416803..73e06e9986d4b4a57807a16224eaed0ea6a0f396 100644 (file)
@@ -16,7 +16,7 @@
  * wait-queues, but the semantics are actually completely different, and
  * every single user we have ever had has been buggy (or pointless).
  *
- * A "swake_up()" only wakes up _one_ waiter, which is not at all what
+ * A "swake_up_one()" only wakes up _one_ waiter, which is not at all what
  * "wake_up()" does, and has led to problems. In other cases, it has
  * been fine, because there's only ever one waiter (kvm), but in that
  * case gthe whole "simple" wait-queue is just pointless to begin with,
@@ -38,8 +38,8 @@
  *    all wakeups are TASK_NORMAL in order to avoid O(n) lookups for the right
  *    sleeper state.
  *
- *  - the exclusive mode; because this requires preserving the list order
- *    and this is hard.
+ *  - the !exclusive mode; because that leads to O(n) wakeups, everything is
+ *    exclusive.
  *
  *  - custom wake callback functions; because you cannot give any guarantees
  *    about random code. This also allows swait to be used in RT, such that
@@ -115,7 +115,7 @@ extern void __init_swait_queue_head(struct swait_queue_head *q, const char *name
  *      CPU0 - waker                    CPU1 - waiter
  *
  *                                      for (;;) {
- *      @cond = true;                     prepare_to_swait(&wq_head, &wait, state);
+ *      @cond = true;                     prepare_to_swait_exclusive(&wq_head, &wait, state);
  *      smp_mb();                         // smp_mb() from set_current_state()
  *      if (swait_active(wq_head))        if (@cond)
  *        wake_up(wq_head);                      break;
@@ -157,20 +157,20 @@ static inline bool swq_has_sleeper(struct swait_queue_head *wq)
        return swait_active(wq);
 }
 
-extern void swake_up(struct swait_queue_head *q);
+extern void swake_up_one(struct swait_queue_head *q);
 extern void swake_up_all(struct swait_queue_head *q);
 extern void swake_up_locked(struct swait_queue_head *q);
 
-extern void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait);
-extern void prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait, int state);
+extern void prepare_to_swait_exclusive(struct swait_queue_head *q, struct swait_queue *wait, int state);
 extern long prepare_to_swait_event(struct swait_queue_head *q, struct swait_queue *wait, int state);
 
 extern void __finish_swait(struct swait_queue_head *q, struct swait_queue *wait);
 extern void finish_swait(struct swait_queue_head *q, struct swait_queue *wait);
 
-/* as per ___wait_event() but for swait, therefore "exclusive == 0" */
+/* as per ___wait_event() but for swait, therefore "exclusive == 1" */
 #define ___swait_event(wq, condition, state, ret, cmd)                 \
 ({                                                                     \
+       __label__ __out;                                                \
        struct swait_queue __wait;                                      \
        long __ret = ret;                                               \
                                                                        \
@@ -183,20 +183,20 @@ extern void finish_swait(struct swait_queue_head *q, struct swait_queue *wait);
                                                                        \
                if (___wait_is_interruptible(state) && __int) {         \
                        __ret = __int;                                  \
-                       break;                                          \
+                       goto __out;                                     \
                }                                                       \
                                                                        \
                cmd;                                                    \
        }                                                               \
        finish_swait(&wq, &__wait);                                     \
-       __ret;                                                          \
+__out: __ret;                                                          \
 })
 
 #define __swait_event(wq, condition)                                   \
        (void)___swait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0,    \
                            schedule())
 
-#define swait_event(wq, condition)                                     \
+#define swait_event_exclusive(wq, condition)                           \
 do {                                                                   \
        if (condition)                                                  \
                break;                                                  \
@@ -208,7 +208,7 @@ do {                                                                        \
                      TASK_UNINTERRUPTIBLE, timeout,                    \
                      __ret = schedule_timeout(__ret))
 
-#define swait_event_timeout(wq, condition, timeout)                    \
+#define swait_event_timeout_exclusive(wq, condition, timeout)          \
 ({                                                                     \
        long __ret = timeout;                                           \
        if (!___wait_cond_timeout(condition))                           \
@@ -220,7 +220,7 @@ do {                                                                        \
        ___swait_event(wq, condition, TASK_INTERRUPTIBLE, 0,            \
                      schedule())
 
-#define swait_event_interruptible(wq, condition)                       \
+#define swait_event_interruptible_exclusive(wq, condition)             \
 ({                                                                     \
        int __ret = 0;                                                  \
        if (!(condition))                                               \
@@ -233,7 +233,7 @@ do {                                                                        \
                      TASK_INTERRUPTIBLE, timeout,                      \
                      __ret = schedule_timeout(__ret))
 
-#define swait_event_interruptible_timeout(wq, condition, timeout)      \
+#define swait_event_interruptible_timeout_exclusive(wq, condition, timeout)\
 ({                                                                     \
        long __ret = timeout;                                           \
        if (!___wait_cond_timeout(condition))                           \
@@ -246,7 +246,7 @@ do {                                                                        \
        (void)___swait_event(wq, condition, TASK_IDLE, 0, schedule())
 
 /**
- * swait_event_idle - wait without system load contribution
+ * swait_event_idle_exclusive - wait without system load contribution
  * @wq: the waitqueue to wait on
  * @condition: a C expression for the event to wait for
  *
@@ -257,7 +257,7 @@ do {                                                                        \
  * condition and doesn't want to contribute to system load. Signals are
  * ignored.
  */
-#define swait_event_idle(wq, condition)                                        \
+#define swait_event_idle_exclusive(wq, condition)                      \
 do {                                                                   \
        if (condition)                                                  \
                break;                                                  \
@@ -270,7 +270,7 @@ do {                                                                        \
                       __ret = schedule_timeout(__ret))
 
 /**
- * swait_event_idle_timeout - wait up to timeout without load contribution
+ * swait_event_idle_timeout_exclusive - wait up to timeout without load contribution
  * @wq: the waitqueue to wait on
  * @condition: a C expression for the event to wait for
  * @timeout: timeout at which we'll give up in jiffies
@@ -288,7 +288,7 @@ do {                                                                        \
  * or the remaining jiffies (at least 1) if the @condition evaluated
  * to %true before the @timeout elapsed.
  */
-#define swait_event_idle_timeout(wq, condition, timeout)               \
+#define swait_event_idle_timeout_exclusive(wq, condition, timeout)     \
 ({                                                                     \
        long __ret = timeout;                                           \
        if (!___wait_cond_timeout(condition))                           \
index 73810808cdf266e5cdcfc1e0c6b3af126a0bf4b1..ebb2f24027e8bd86dd1ac1e9f04a023bcf2838be 100644 (file)
@@ -11,6 +11,7 @@
 #ifndef _LINUX_SYSCALLS_H
 #define _LINUX_SYSCALLS_H
 
+struct __aio_sigset;
 struct epoll_event;
 struct iattr;
 struct inode;
@@ -231,6 +232,9 @@ static inline int is_syscall_trace_event(struct trace_event_call *tp_event)
  */
 #ifndef __SYSCALL_DEFINEx
 #define __SYSCALL_DEFINEx(x, name, ...)                                        \
+       __diag_push();                                                  \
+       __diag_ignore(GCC, 8, "-Wattribute-alias",                      \
+                     "Type aliasing is used to sanitize syscall arguments");\
        asmlinkage long sys##name(__MAP(x,__SC_DECL,__VA_ARGS__))       \
                __attribute__((alias(__stringify(__se_sys##name))));    \
        ALLOW_ERROR_INJECTION(sys##name, ERRNO);                        \
@@ -243,6 +247,7 @@ static inline int is_syscall_trace_event(struct trace_event_call *tp_event)
                __PROTECT(x, ret,__MAP(x,__SC_ARGS,__VA_ARGS__));       \
                return ret;                                             \
        }                                                               \
+       __diag_pop();                                                   \
        static inline long __do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__))
 #endif /* __SYSCALL_DEFINEx */
 
@@ -501,9 +506,9 @@ asmlinkage long sys_sync_file_range(int fd, loff_t offset, loff_t nbytes,
 /* fs/timerfd.c */
 asmlinkage long sys_timerfd_create(int clockid, int flags);
 asmlinkage long sys_timerfd_settime(int ufd, int flags,
-                                   const struct itimerspec __user *utmr,
-                                   struct itimerspec __user *otmr);
-asmlinkage long sys_timerfd_gettime(int ufd, struct itimerspec __user *otmr);
+                                   const struct __kernel_itimerspec __user *utmr,
+                                   struct __kernel_itimerspec __user *otmr);
+asmlinkage long sys_timerfd_gettime(int ufd, struct __kernel_itimerspec __user *otmr);
 
 /* fs/utimes.c */
 asmlinkage long sys_utimensat(int dfd, const char __user *filename,
@@ -568,10 +573,10 @@ asmlinkage long sys_timer_create(clockid_t which_clock,
                                 struct sigevent __user *timer_event_spec,
                                 timer_t __user * created_timer_id);
 asmlinkage long sys_timer_gettime(timer_t timer_id,
-                               struct itimerspec __user *setting);
+                               struct __kernel_itimerspec __user *setting);
 asmlinkage long sys_timer_getoverrun(timer_t timer_id);
 asmlinkage long sys_timer_settime(timer_t timer_id, int flags,
-                               const struct itimerspec __user *new_setting,
+                               const struct __kernel_itimerspec __user *new_setting,
                                struct itimerspec __user *old_setting);
 asmlinkage long sys_timer_delete(timer_t timer_id);
 asmlinkage long sys_clock_settime(clockid_t which_clock,
index aed74463592d8121f2a38c83ad15602aa1af252f..27d83fd2ae6146a615fec51f9fa01660f86fa13d 100644 (file)
@@ -14,9 +14,9 @@ int get_timespec64(struct timespec64 *ts,
 int put_timespec64(const struct timespec64 *ts,
                struct __kernel_timespec __user *uts);
 int get_itimerspec64(struct itimerspec64 *it,
-                       const struct itimerspec __user *uit);
+                       const struct __kernel_itimerspec __user *uit);
 int put_itimerspec64(const struct itimerspec64 *it,
-                       struct itimerspec __user *uit);
+                       struct __kernel_itimerspec __user *uit);
 
 extern time64_t mktime64(const unsigned int year, const unsigned int mon,
                        const unsigned int day, const unsigned int hour,
index 0a7b2f79cec7df617525f6745a2109c8a662e7c4..05634afba0db62f0d4bebf743fed5daf7ce2ffe1 100644 (file)
@@ -12,6 +12,7 @@ typedef __u64 timeu64_t;
  */
 #ifndef CONFIG_64BIT_TIME
 #define __kernel_timespec timespec
+#define __kernel_itimerspec itimerspec
 #endif
 
 #include <uapi/linux/time.h>
index 86bc2026efcea4fd9af338eb19cb63bcea3a9547..e79861418fd79a144f42dbef65a9eaeb225a9c7a 100644 (file)
@@ -177,7 +177,7 @@ static inline time64_t ktime_get_clocktai_seconds(void)
 extern bool timekeeping_rtc_skipsuspend(void);
 extern bool timekeeping_rtc_skipresume(void);
 
-extern void timekeeping_inject_sleeptime64(struct timespec64 *delta);
+extern void timekeeping_inject_sleeptime64(const struct timespec64 *delta);
 
 /*
  * struct system_time_snapshot - simultaneous raw/real time capture with
@@ -243,7 +243,8 @@ extern void ktime_get_snapshot(struct system_time_snapshot *systime_snapshot);
 extern int persistent_clock_is_local;
 
 extern void read_persistent_clock64(struct timespec64 *ts);
-extern void read_boot_clock64(struct timespec64 *ts);
+void read_persistent_clock_and_boot_offset(struct timespec64 *wall_clock,
+                                          struct timespec64 *boot_offset);
 extern int update_persistent_clock64(struct timespec64 now);
 
 /*
index 66272862070b1c7610e8f8587ccb38fa627076a4..61dfd93b6ee4d6f47f2609319e01c6192620b3fb 100644 (file)
@@ -64,6 +64,8 @@ struct torture_random_state {
        long trs_count;
 };
 #define DEFINE_TORTURE_RANDOM(name) struct torture_random_state name = { 0, 0 }
+#define DEFINE_TORTURE_RANDOM_PERCPU(name) \
+       DEFINE_PER_CPU(struct torture_random_state, name)
 unsigned long torture_random(struct torture_random_state *trsp);
 
 /* Task shuffler, which causes CPUs to occasionally go idle. */
@@ -79,7 +81,7 @@ void stutter_wait(const char *title);
 int torture_stutter_init(int s);
 
 /* Initialization and cleanup. */
-bool torture_init_begin(char *ttype, bool v);
+bool torture_init_begin(char *ttype, int v);
 void torture_init_end(void);
 bool torture_cleanup_begin(void);
 void torture_cleanup_end(void);
index 6c5f2074e14f36d1368e1723394d4da9ef0cf3ae..6f8b68cd460f8c2b0aff758848a5de5a3ad65d6c 100644 (file)
@@ -75,7 +75,7 @@ struct uio_device {
         struct fasync_struct    *async_queue;
         wait_queue_head_t       wait;
         struct uio_info         *info;
-       spinlock_t              info_lock;
+       struct mutex            info_lock;
         struct kobject          *map_dir;
         struct kobject          *portio_dir;
 };
index 9324ac2d9ff2db234cd5f3b8bf9f7e1a2bc60e52..43913ae79f644dd9a8dfaea06e81df9d9005d520 100644 (file)
@@ -64,7 +64,8 @@ struct vsock_sock {
        struct list_head pending_links;
        struct list_head accept_queue;
        bool rejected;
-       struct delayed_work dwork;
+       struct delayed_work connect_work;
+       struct delayed_work pending_work;
        struct delayed_work close_work;
        bool close_work_scheduled;
        u32 peer_shutdown;
@@ -77,7 +78,6 @@ struct vsock_sock {
 
 s64 vsock_stream_has_data(struct vsock_sock *vsk);
 s64 vsock_stream_has_space(struct vsock_sock *vsk);
-void vsock_pending_work(struct work_struct *work);
 struct sock *__vsock_create(struct net *net,
                            struct socket *sock,
                            struct sock *parent,
index 53ce8176c31306deaf9c2be5743546abe4d27b53..ec9d6bc658559c55b64ac3c1d23b4e1166cc4b04 100644 (file)
@@ -271,7 +271,7 @@ int  bt_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
                     int flags);
 int  bt_sock_stream_recvmsg(struct socket *sock, struct msghdr *msg,
                            size_t len, int flags);
-__poll_t bt_sock_poll_mask(struct socket *sock, __poll_t events);
+__poll_t bt_sock_poll(struct file *file, struct socket *sock, poll_table *wait);
 int  bt_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
 int  bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo);
 int  bt_sock_wait_ready(struct sock *sk, unsigned long flags);
index 5fbfe61f41c67f19713bf0e307ae0612428d68a6..1beb3ead038561d84c618757919871393f8c80c4 100644 (file)
@@ -5835,10 +5835,11 @@ void cfg80211_mgmt_tx_status(struct wireless_dev *wdev, u64 cookie,
 /**
  * cfg80211_rx_control_port - notification about a received control port frame
  * @dev: The device the frame matched to
- * @buf: control port frame
- * @len: length of the frame data
- * @addr: The peer from which the frame was received
- * @proto: frame protocol, typically PAE or Pre-authentication
+ * @skb: The skbuf with the control port frame.  It is assumed that the skbuf
+ *     is 802.3 formatted (with 802.3 header).  The skb can be non-linear.
+ *     This function does not take ownership of the skb, so the caller is
+ *     responsible for any cleanup.  The caller must also ensure that
+ *     skb->protocol is set appropriately.
  * @unencrypted: Whether the frame was received unencrypted
  *
  * This function is used to inform userspace about a received control port
@@ -5851,8 +5852,7 @@ void cfg80211_mgmt_tx_status(struct wireless_dev *wdev, u64 cookie,
  * Return: %true if the frame was passed to userspace
  */
 bool cfg80211_rx_control_port(struct net_device *dev,
-                             const u8 *buf, size_t len,
-                             const u8 *addr, u16 proto, bool unencrypted);
+                             struct sk_buff *skb, bool unencrypted);
 
 /**
  * cfg80211_cqm_rssi_notify - connection quality monitoring rssi event
index 5cba71d2dc44b9ea2366725ff68c9f668f639345..3d4930528db0d6f8bcdeaa7c141e2a800cbf0118 100644 (file)
@@ -170,6 +170,7 @@ struct fib6_info {
                                        unused:3;
 
        struct fib6_nh                  fib6_nh;
+       struct rcu_head                 rcu;
 };
 
 struct rt6_info {
@@ -273,17 +274,22 @@ static inline void ip6_rt_put(struct rt6_info *rt)
 }
 
 struct fib6_info *fib6_info_alloc(gfp_t gfp_flags);
-void fib6_info_destroy(struct fib6_info *f6i);
+void fib6_info_destroy_rcu(struct rcu_head *head);
 
 static inline void fib6_info_hold(struct fib6_info *f6i)
 {
        atomic_inc(&f6i->fib6_ref);
 }
 
+static inline bool fib6_info_hold_safe(struct fib6_info *f6i)
+{
+       return atomic_inc_not_zero(&f6i->fib6_ref);
+}
+
 static inline void fib6_info_release(struct fib6_info *f6i)
 {
        if (f6i && atomic_dec_and_test(&f6i->fib6_ref))
-               fib6_info_destroy(f6i);
+               call_rcu(&f6i->rcu, fib6_info_destroy_rcu);
 }
 
 enum fib6_walk_state {
index 59656fc580df7e0301e0c9282af9358a255b863f..7b9c82de11cc9388b070992af610e5fd14b66333 100644 (file)
@@ -66,6 +66,12 @@ static inline bool rt6_need_strict(const struct in6_addr *daddr)
                (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK);
 }
 
+static inline bool rt6_qualify_for_ecmp(const struct fib6_info *f6i)
+{
+       return (f6i->fib6_flags & (RTF_GATEWAY|RTF_ADDRCONF|RTF_DYNAMIC)) ==
+              RTF_GATEWAY;
+}
+
 void ip6_route_input(struct sk_buff *skb);
 struct dst_entry *ip6_route_input_lookup(struct net *net,
                                         struct net_device *dev,
index 16475c269749a72f3c487e102e50cabff797317e..8f73be4945037c6d0997ec8ab7c3e9da3980a6e4 100644 (file)
@@ -355,14 +355,7 @@ struct ipv6_txoptions *ipv6_dup_options(struct sock *sk,
 struct ipv6_txoptions *ipv6_renew_options(struct sock *sk,
                                          struct ipv6_txoptions *opt,
                                          int newtype,
-                                         struct ipv6_opt_hdr __user *newopt,
-                                         int newoptlen);
-struct ipv6_txoptions *
-ipv6_renew_options_kern(struct sock *sk,
-                       struct ipv6_txoptions *opt,
-                       int newtype,
-                       struct ipv6_opt_hdr *newopt,
-                       int newoptlen);
+                                         struct ipv6_opt_hdr *newopt);
 struct ipv6_txoptions *ipv6_fixup_options(struct ipv6_txoptions *opt_space,
                                          struct ipv6_txoptions *opt);
 
@@ -830,7 +823,7 @@ static inline __be32 ip6_make_flowlabel(struct net *net, struct sk_buff *skb,
         * to minimize possbility that any useful information to an
         * attacker is leaked. Only lower 20 bits are relevant.
         */
-       rol32(hash, 16);
+       hash = rol32(hash, 16);
 
        flowlabel = (__force __be32)hash & IPV6_FLOWLABEL_MASK;
 
@@ -1107,6 +1100,8 @@ void ipv6_sysctl_unregister(void);
 
 int ipv6_sock_mc_join(struct sock *sk, int ifindex,
                      const struct in6_addr *addr);
+int ipv6_sock_mc_join_ssm(struct sock *sk, int ifindex,
+                         const struct in6_addr *addr, unsigned int mode);
 int ipv6_sock_mc_drop(struct sock *sk, int ifindex,
                      const struct in6_addr *addr);
 #endif /* _NET_IPV6_H */
index b0eaeb02d46d14ceb87f6e62d4765959c8383a66..f4c21b5a1242baac0415b3dde8fbc30524690ee7 100644 (file)
@@ -153,6 +153,8 @@ struct iucv_sock_list {
        atomic_t          autobind_name;
 };
 
+__poll_t iucv_sock_poll(struct file *file, struct socket *sock,
+                           poll_table *wait);
 void iucv_sock_link(struct iucv_sock_list *l, struct sock *s);
 void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *s);
 void iucv_accept_enqueue(struct sock *parent, struct sock *sk);
index dc35f25eb679d4651b59a20c785005aa0cbb4d0f..890a87318014d528a78a3a9631ebdb75bf037a4b 100644 (file)
@@ -116,6 +116,11 @@ static inline void llc_sap_hold(struct llc_sap *sap)
        refcount_inc(&sap->refcnt);
 }
 
+static inline bool llc_sap_hold_safe(struct llc_sap *sap)
+{
+       return refcount_inc_not_zero(&sap->refcnt);
+}
+
 void llc_sap_close(struct llc_sap *sap);
 
 static inline void llc_sap_put(struct llc_sap *sap)
index 47e35cce3b648d696b127ed7bd643036128795f6..a71264d75d7f98d28f92dfd861ffe6e0d39c0198 100644 (file)
@@ -128,6 +128,7 @@ struct net {
 #endif
 #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
        struct netns_nf_frag    nf_frag;
+       struct ctl_table_header *nf_frag_frags_hdr;
 #endif
        struct sock             *nfnl;
        struct sock             *nfnl_stash;
index 08c005ce56e9ce3642804333062f0fc24006f02c..dc417ef0a0c5092208226cab3ab5e79ee283a18e 100644 (file)
@@ -150,6 +150,7 @@ static inline void nft_data_debug(const struct nft_data *data)
  *     @portid: netlink portID of the original message
  *     @seq: netlink sequence number
  *     @family: protocol family
+ *     @level: depth of the chains
  *     @report: notify via unicast netlink message
  */
 struct nft_ctx {
@@ -160,6 +161,7 @@ struct nft_ctx {
        u32                             portid;
        u32                             seq;
        u8                              family;
+       u8                              level;
        bool                            report;
 };
 
@@ -865,7 +867,6 @@ enum nft_chain_flags {
  *     @table: table that this chain belongs to
  *     @handle: chain handle
  *     @use: number of jump references to this chain
- *     @level: length of longest path to this chain
  *     @flags: bitmask of enum nft_chain_flags
  *     @name: name of the chain
  */
@@ -878,7 +879,6 @@ struct nft_chain {
        struct nft_table                *table;
        u64                             handle;
        u32                             use;
-       u16                             level;
        u8                              flags:6,
                                        genmask:2;
        char                            *name;
@@ -1124,7 +1124,6 @@ struct nft_flowtable {
        u32                             genmask:2,
                                        use:30;
        u64                             handle;
-       char                            *dev_name[NFT_FLOWTABLE_DEVICE_MAX];
        /* runtime data below here */
        struct nf_hook_ops              *ops ____cacheline_aligned;
        struct nf_flowtable             data;
index e0c0c2558ec48adfb27629c2180f9b04efb67bcf..a05134507e7bc806d9afd9ff7c86b95e5df084eb 100644 (file)
@@ -65,4 +65,10 @@ extern const struct nft_expr_ops nft_payload_fast_ops;
 extern struct static_key_false nft_counters_enabled;
 extern struct static_key_false nft_trace_enabled;
 
+extern struct nft_set_type nft_set_rhash_type;
+extern struct nft_set_type nft_set_hash_type;
+extern struct nft_set_type nft_set_hash_fast_type;
+extern struct nft_set_type nft_set_rbtree_type;
+extern struct nft_set_type nft_set_bitmap_type;
+
 #endif /* _NET_NF_TABLES_CORE_H */
index 9754a50ecde9c44162cc60e387d48cb034c6e6d4..4cc64c8446eb94f1c122cf15d4bf74c7e3f2275d 100644 (file)
@@ -64,7 +64,7 @@ nf_tproxy_handle_time_wait4(struct net *net, struct sk_buff *skb,
  * belonging to established connections going through that one.
  */
 struct sock *
-nf_tproxy_get_sock_v4(struct net *net, struct sk_buff *skb, void *hp,
+nf_tproxy_get_sock_v4(struct net *net, struct sk_buff *skb,
                      const u8 protocol,
                      const __be32 saddr, const __be32 daddr,
                      const __be16 sport, const __be16 dport,
@@ -103,7 +103,7 @@ nf_tproxy_handle_time_wait6(struct sk_buff *skb, int tproto, int thoff,
                            struct sock *sk);
 
 struct sock *
-nf_tproxy_get_sock_v6(struct net *net, struct sk_buff *skb, int thoff, void *hp,
+nf_tproxy_get_sock_v6(struct net *net, struct sk_buff *skb, int thoff,
                      const u8 protocol,
                      const struct in6_addr *saddr, const struct in6_addr *daddr,
                      const __be16 sport, const __be16 dport,
index c978a31b0f846210b4c2a369af960d5349b5395a..762ac9931b6251152b6ee0e5780df0f7b073f3e6 100644 (file)
@@ -109,7 +109,6 @@ struct netns_ipv6 {
 
 #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
 struct netns_nf_frag {
-       struct netns_sysctl_ipv6 sysctl;
        struct netns_frags      frags;
 };
 #endif
index a3c1a2c47cd4bfd868004548cdf1ef7a361fa4c6..20b059574e600e64838b0bdecfaf6a76e6629d4a 100644 (file)
@@ -111,6 +111,11 @@ void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
 {
 }
 
+static inline bool tcf_block_shared(struct tcf_block *block)
+{
+       return false;
+}
+
 static inline struct Qdisc *tcf_block_q(struct tcf_block *block)
 {
        return NULL;
index 30b3e2fe240a88e3396a8b3664fd879c93fd30bf..8c2caa370e0f683ea764bc0d72da6dfa93699673 100644 (file)
@@ -109,7 +109,8 @@ int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb);
 int sctp_inet_listen(struct socket *sock, int backlog);
 void sctp_write_space(struct sock *sk);
 void sctp_data_ready(struct sock *sk);
-__poll_t sctp_poll_mask(struct socket *sock, __poll_t events);
+__poll_t sctp_poll(struct file *file, struct socket *sock,
+               poll_table *wait);
 void sctp_sock_rfree(struct sk_buff *skb);
 void sctp_copy_sock(struct sock *newsk, struct sock *sk,
                    struct sctp_association *asoc);
index 9470fd7e4350ea9546b43a504ebe12f6362dda18..32d2454c04793021c0dc87bca7f1802b49c5249b 100644 (file)
@@ -7,7 +7,6 @@
 #include <linux/tc_act/tc_csum.h>
 
 struct tcf_csum_params {
-       int action;
        u32 update_flags;
        struct rcu_head rcu;
 };
index efef0b4b1b2bddc76095bcd4d02ebaaa3b2beb56..46b8c7f1c8d5273791df55eeb6345807d8812e96 100644 (file)
@@ -18,7 +18,6 @@
 struct tcf_tunnel_key_params {
        struct rcu_head         rcu;
        int                     tcft_action;
-       int                     action;
        struct metadata_dst     *tcft_enc_metadata;
 };
 
index 0448e7c5d2b4062f8ceecb5b38882385a1be7ead..cd3ecda9386a680e009ca261ea535feaa1349d74 100644 (file)
@@ -342,6 +342,7 @@ ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos,
                        struct pipe_inode_info *pipe, size_t len,
                        unsigned int flags);
 
+void tcp_enter_quickack_mode(struct sock *sk, unsigned int max_quickacks);
 static inline void tcp_dec_quickack_mode(struct sock *sk,
                                         const unsigned int pkts)
 {
@@ -388,7 +389,8 @@ bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst);
 void tcp_close(struct sock *sk, long timeout);
 void tcp_init_sock(struct sock *sk);
 void tcp_init_transfer(struct sock *sk, int bpf_op);
-__poll_t tcp_poll_mask(struct socket *sock, __poll_t events);
+__poll_t tcp_poll(struct file *file, struct socket *sock,
+                     struct poll_table_struct *wait);
 int tcp_getsockopt(struct sock *sk, int level, int optname,
                   char __user *optval, int __user *optlen);
 int tcp_setsockopt(struct sock *sk, int level, int optname,
@@ -538,6 +540,7 @@ void tcp_send_fin(struct sock *sk);
 void tcp_send_active_reset(struct sock *sk, gfp_t priority);
 int tcp_send_synack(struct sock *);
 void tcp_push_one(struct sock *, unsigned int mss_now);
+void __tcp_send_ack(struct sock *sk, u32 rcv_nxt);
 void tcp_send_ack(struct sock *sk);
 void tcp_send_delayed_ack(struct sock *sk);
 void tcp_send_loss_probe(struct sock *sk);
@@ -827,12 +830,21 @@ struct tcp_skb_cb {
 
 #define TCP_SKB_CB(__skb)      ((struct tcp_skb_cb *)&((__skb)->cb[0]))
 
+static inline void bpf_compute_data_end_sk_skb(struct sk_buff *skb)
+{
+       TCP_SKB_CB(skb)->bpf.data_end = skb->data + skb_headlen(skb);
+}
 
 #if IS_ENABLED(CONFIG_IPV6)
 /* This is the variant of inet6_iif() that must be used by TCP,
  * as TCP moves IP6CB into a different location in skb->cb[]
  */
 static inline int tcp_v6_iif(const struct sk_buff *skb)
+{
+       return TCP_SKB_CB(skb)->header.h6.iif;
+}
+
+static inline int tcp_v6_iif_l3_slave(const struct sk_buff *skb)
 {
        bool l3_slave = ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags);
 
@@ -907,8 +919,6 @@ enum tcp_ca_event {
        CA_EVENT_LOSS,          /* loss timeout */
        CA_EVENT_ECN_NO_CE,     /* ECT set, but not CE marked */
        CA_EVENT_ECN_IS_CE,     /* received CE marked IP packet */
-       CA_EVENT_DELAYED_ACK,   /* Delayed ack is sent */
-       CA_EVENT_NON_DELAYED_ACK,
 };
 
 /* Information about inbound ACK, passed to cong_ops->in_ack_event() */
index 7f84ea3e217cf5e3f78698ee63bc9dced179caed..70c273777fe9fe27b2ef1ba7c2c80970da8ea5c4 100644 (file)
@@ -109,7 +109,8 @@ struct tls_sw_context_rx {
 
        struct strparser strp;
        void (*saved_data_ready)(struct sock *sk);
-       __poll_t (*sk_poll_mask)(struct socket *sock, __poll_t events);
+       unsigned int (*sk_poll)(struct file *file, struct socket *sock,
+                               struct poll_table_struct *wait);
        struct sk_buff *recv_pkt;
        u8 control;
        bool decrypted;
@@ -224,7 +225,8 @@ void tls_sw_free_resources_tx(struct sock *sk);
 void tls_sw_free_resources_rx(struct sock *sk);
 int tls_sw_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
                   int nonblock, int flags, int *addr_len);
-__poll_t tls_sw_poll_mask(struct socket *sock, __poll_t events);
+unsigned int tls_sw_poll(struct file *file, struct socket *sock,
+                        struct poll_table_struct *wait);
 ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos,
                           struct pipe_inode_info *pipe,
                           size_t len, unsigned int flags);
index b1ea8b0f5e6a8ce82602e593acd583170b4a6e73..81afdacd4fff04bd05335da85a7a06b1996282f8 100644 (file)
@@ -285,7 +285,7 @@ int udp_init_sock(struct sock *sk);
 int udp_pre_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
 int __udp_disconnect(struct sock *sk, int flags);
 int udp_disconnect(struct sock *sk, int flags);
-__poll_t udp_poll_mask(struct socket *sock, __poll_t events);
+__poll_t udp_poll(struct file *file, struct socket *sock, poll_table *wait);
 struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
                                       netdev_features_t features,
                                       bool is_ipv6);
index 9fe472f2ac950c8f3f042cca547cb8f7ce820a97..7161856bcf9c7f572943f6a78676df2aa458a5f7 100644 (file)
@@ -60,6 +60,10 @@ struct xdp_sock {
        bool zc;
        /* Protects multiple processes in the control path */
        struct mutex mutex;
+       /* Mutual exclusion of NAPI TX thread and sendmsg error paths
+        * in the SKB destructor callback.
+        */
+       spinlock_t tx_completion_lock;
        u64 rx_dropped;
 };
 
index 4c6241bc203931dcc6b74de5be72349e741cb6be..6c003995347a3904cda6e57814c50bcf6c0733a7 100644 (file)
@@ -3391,11 +3391,14 @@ int ib_process_cq_direct(struct ib_cq *cq, int budget);
  *
  * Users can examine the cq structure to determine the actual CQ size.
  */
-struct ib_cq *ib_create_cq(struct ib_device *device,
-                          ib_comp_handler comp_handler,
-                          void (*event_handler)(struct ib_event *, void *),
-                          void *cq_context,
-                          const struct ib_cq_init_attr *cq_attr);
+struct ib_cq *__ib_create_cq(struct ib_device *device,
+                            ib_comp_handler comp_handler,
+                            void (*event_handler)(struct ib_event *, void *),
+                            void *cq_context,
+                            const struct ib_cq_init_attr *cq_attr,
+                            const char *caller);
+#define ib_create_cq(device, cmp_hndlr, evt_hndlr, cq_ctxt, cq_attr) \
+       __ib_create_cq((device), (cmp_hndlr), (evt_hndlr), (cq_ctxt), (cq_attr), KBUILD_MODNAME)
 
 /**
  * ib_resize_cq - Modifies the capacity of the CQ.
index 5936aac357ab0d8eb4425d4c789ef687805f9258..a8d07feff6a0c0c26b0927448641e1ddaed78d4f 100644 (file)
@@ -52,6 +52,7 @@ TRACE_EVENT(rcu_utilization,
  *     "cpuqs": CPU passes through a quiescent state.
  *     "cpuonl": CPU comes online.
  *     "cpuofl": CPU goes offline.
+ *     "cpuofl-bgp": CPU goes offline while blocking a grace period.
  *     "reqwait": GP kthread sleeps waiting for grace-period request.
  *     "reqwaitsig": GP kthread awakened by signal from reqwait state.
  *     "fqswait": GP kthread waiting until time to force quiescent states.
@@ -63,24 +64,24 @@ TRACE_EVENT(rcu_utilization,
  */
 TRACE_EVENT(rcu_grace_period,
 
-       TP_PROTO(const char *rcuname, unsigned long gpnum, const char *gpevent),
+       TP_PROTO(const char *rcuname, unsigned long gp_seq, const char *gpevent),
 
-       TP_ARGS(rcuname, gpnum, gpevent),
+       TP_ARGS(rcuname, gp_seq, gpevent),
 
        TP_STRUCT__entry(
                __field(const char *, rcuname)
-               __field(unsigned long, gpnum)
+               __field(unsigned long, gp_seq)
                __field(const char *, gpevent)
        ),
 
        TP_fast_assign(
                __entry->rcuname = rcuname;
-               __entry->gpnum = gpnum;
+               __entry->gp_seq = gp_seq;
                __entry->gpevent = gpevent;
        ),
 
        TP_printk("%s %lu %s",
-                 __entry->rcuname, __entry->gpnum, __entry->gpevent)
+                 __entry->rcuname, __entry->gp_seq, __entry->gpevent)
 );
 
 /*
@@ -90,8 +91,8 @@ TRACE_EVENT(rcu_grace_period,
  *
  * "Startleaf": Request a grace period based on leaf-node data.
  * "Prestarted": Someone beat us to the request
- * "Startedleaf": Leaf-node start proved sufficient.
- * "Startedleafroot": Leaf-node start proved sufficient after checking root.
+ * "Startedleaf": Leaf node marked for future GP.
+ * "Startedleafroot": All nodes from leaf to root marked for future GP.
  * "Startedroot": Requested a nocb grace period based on root-node data.
  * "NoGPkthread": The RCU grace-period kthread has not yet started.
  * "StartWait": Start waiting for the requested grace period.
@@ -102,17 +103,16 @@ TRACE_EVENT(rcu_grace_period,
  */
 TRACE_EVENT(rcu_future_grace_period,
 
-       TP_PROTO(const char *rcuname, unsigned long gpnum, unsigned long completed,
-                unsigned long c, u8 level, int grplo, int grphi,
+       TP_PROTO(const char *rcuname, unsigned long gp_seq,
+                unsigned long gp_seq_req, u8 level, int grplo, int grphi,
                 const char *gpevent),
 
-       TP_ARGS(rcuname, gpnum, completed, c, level, grplo, grphi, gpevent),
+       TP_ARGS(rcuname, gp_seq, gp_seq_req, level, grplo, grphi, gpevent),
 
        TP_STRUCT__entry(
                __field(const char *, rcuname)
-               __field(unsigned long, gpnum)
-               __field(unsigned long, completed)
-               __field(unsigned long, c)
+               __field(unsigned long, gp_seq)
+               __field(unsigned long, gp_seq_req)
                __field(u8, level)
                __field(int, grplo)
                __field(int, grphi)
@@ -121,19 +121,17 @@ TRACE_EVENT(rcu_future_grace_period,
 
        TP_fast_assign(
                __entry->rcuname = rcuname;
-               __entry->gpnum = gpnum;
-               __entry->completed = completed;
-               __entry->c = c;
+               __entry->gp_seq = gp_seq;
+               __entry->gp_seq_req = gp_seq_req;
                __entry->level = level;
                __entry->grplo = grplo;
                __entry->grphi = grphi;
                __entry->gpevent = gpevent;
        ),
 
-       TP_printk("%s %lu %lu %lu %u %d %d %s",
-                 __entry->rcuname, __entry->gpnum, __entry->completed,
-                 __entry->c, __entry->level, __entry->grplo, __entry->grphi,
-                 __entry->gpevent)
+       TP_printk("%s %lu %lu %u %d %d %s",
+                 __entry->rcuname, __entry->gp_seq, __entry->gp_seq_req, __entry->level,
+                 __entry->grplo, __entry->grphi, __entry->gpevent)
 );
 
 /*
@@ -145,14 +143,14 @@ TRACE_EVENT(rcu_future_grace_period,
  */
 TRACE_EVENT(rcu_grace_period_init,
 
-       TP_PROTO(const char *rcuname, unsigned long gpnum, u8 level,
+       TP_PROTO(const char *rcuname, unsigned long gp_seq, u8 level,
                 int grplo, int grphi, unsigned long qsmask),
 
-       TP_ARGS(rcuname, gpnum, level, grplo, grphi, qsmask),
+       TP_ARGS(rcuname, gp_seq, level, grplo, grphi, qsmask),
 
        TP_STRUCT__entry(
                __field(const char *, rcuname)
-               __field(unsigned long, gpnum)
+               __field(unsigned long, gp_seq)
                __field(u8, level)
                __field(int, grplo)
                __field(int, grphi)
@@ -161,7 +159,7 @@ TRACE_EVENT(rcu_grace_period_init,
 
        TP_fast_assign(
                __entry->rcuname = rcuname;
-               __entry->gpnum = gpnum;
+               __entry->gp_seq = gp_seq;
                __entry->level = level;
                __entry->grplo = grplo;
                __entry->grphi = grphi;
@@ -169,7 +167,7 @@ TRACE_EVENT(rcu_grace_period_init,
        ),
 
        TP_printk("%s %lu %u %d %d %lx",
-                 __entry->rcuname, __entry->gpnum, __entry->level,
+                 __entry->rcuname, __entry->gp_seq, __entry->level,
                  __entry->grplo, __entry->grphi, __entry->qsmask)
 );
 
@@ -301,24 +299,24 @@ TRACE_EVENT(rcu_nocb_wake,
  */
 TRACE_EVENT(rcu_preempt_task,
 
-       TP_PROTO(const char *rcuname, int pid, unsigned long gpnum),
+       TP_PROTO(const char *rcuname, int pid, unsigned long gp_seq),
 
-       TP_ARGS(rcuname, pid, gpnum),
+       TP_ARGS(rcuname, pid, gp_seq),
 
        TP_STRUCT__entry(
                __field(const char *, rcuname)
-               __field(unsigned long, gpnum)
+               __field(unsigned long, gp_seq)
                __field(int, pid)
        ),
 
        TP_fast_assign(
                __entry->rcuname = rcuname;
-               __entry->gpnum = gpnum;
+               __entry->gp_seq = gp_seq;
                __entry->pid = pid;
        ),
 
        TP_printk("%s %lu %d",
-                 __entry->rcuname, __entry->gpnum, __entry->pid)
+                 __entry->rcuname, __entry->gp_seq, __entry->pid)
 );
 
 /*
@@ -328,23 +326,23 @@ TRACE_EVENT(rcu_preempt_task,
  */
 TRACE_EVENT(rcu_unlock_preempted_task,
 
-       TP_PROTO(const char *rcuname, unsigned long gpnum, int pid),
+       TP_PROTO(const char *rcuname, unsigned long gp_seq, int pid),
 
-       TP_ARGS(rcuname, gpnum, pid),
+       TP_ARGS(rcuname, gp_seq, pid),
 
        TP_STRUCT__entry(
                __field(const char *, rcuname)
-               __field(unsigned long, gpnum)
+               __field(unsigned long, gp_seq)
                __field(int, pid)
        ),
 
        TP_fast_assign(
                __entry->rcuname = rcuname;
-               __entry->gpnum = gpnum;
+               __entry->gp_seq = gp_seq;
                __entry->pid = pid;
        ),
 
-       TP_printk("%s %lu %d", __entry->rcuname, __entry->gpnum, __entry->pid)
+       TP_printk("%s %lu %d", __entry->rcuname, __entry->gp_seq, __entry->pid)
 );
 
 /*
@@ -357,15 +355,15 @@ TRACE_EVENT(rcu_unlock_preempted_task,
  */
 TRACE_EVENT(rcu_quiescent_state_report,
 
-       TP_PROTO(const char *rcuname, unsigned long gpnum,
+       TP_PROTO(const char *rcuname, unsigned long gp_seq,
                 unsigned long mask, unsigned long qsmask,
                 u8 level, int grplo, int grphi, int gp_tasks),
 
-       TP_ARGS(rcuname, gpnum, mask, qsmask, level, grplo, grphi, gp_tasks),
+       TP_ARGS(rcuname, gp_seq, mask, qsmask, level, grplo, grphi, gp_tasks),
 
        TP_STRUCT__entry(
                __field(const char *, rcuname)
-               __field(unsigned long, gpnum)
+               __field(unsigned long, gp_seq)
                __field(unsigned long, mask)
                __field(unsigned long, qsmask)
                __field(u8, level)
@@ -376,7 +374,7 @@ TRACE_EVENT(rcu_quiescent_state_report,
 
        TP_fast_assign(
                __entry->rcuname = rcuname;
-               __entry->gpnum = gpnum;
+               __entry->gp_seq = gp_seq;
                __entry->mask = mask;
                __entry->qsmask = qsmask;
                __entry->level = level;
@@ -386,41 +384,41 @@ TRACE_EVENT(rcu_quiescent_state_report,
        ),
 
        TP_printk("%s %lu %lx>%lx %u %d %d %u",
-                 __entry->rcuname, __entry->gpnum,
+                 __entry->rcuname, __entry->gp_seq,
                  __entry->mask, __entry->qsmask, __entry->level,
                  __entry->grplo, __entry->grphi, __entry->gp_tasks)
 );
 
 /*
  * Tracepoint for quiescent states detected by force_quiescent_state().
- * These trace events include the type of RCU, the grace-period number that
- * was blocked by the CPU, the CPU itself, and the type of quiescent state,
- * which can be "dti" for dyntick-idle mode, "ofl" for CPU offline, "kick"
- * when kicking a CPU that has been in dyntick-idle mode for too long, or
- * "rqc" if the CPU got a quiescent state via its rcu_qs_ctr.
+ * These trace events include the type of RCU, the grace-period number
+ * that was blocked by the CPU, the CPU itself, and the type of quiescent
+ * state, which can be "dti" for dyntick-idle mode, "kick" when kicking
+ * a CPU that has been in dyntick-idle mode for too long, or "rqc" if the
+ * CPU got a quiescent state via its rcu_qs_ctr.
  */
 TRACE_EVENT(rcu_fqs,
 
-       TP_PROTO(const char *rcuname, unsigned long gpnum, int cpu, const char *qsevent),
+       TP_PROTO(const char *rcuname, unsigned long gp_seq, int cpu, const char *qsevent),
 
-       TP_ARGS(rcuname, gpnum, cpu, qsevent),
+       TP_ARGS(rcuname, gp_seq, cpu, qsevent),
 
        TP_STRUCT__entry(
                __field(const char *, rcuname)
-               __field(unsigned long, gpnum)
+               __field(unsigned long, gp_seq)
                __field(int, cpu)
                __field(const char *, qsevent)
        ),
 
        TP_fast_assign(
                __entry->rcuname = rcuname;
-               __entry->gpnum = gpnum;
+               __entry->gp_seq = gp_seq;
                __entry->cpu = cpu;
                __entry->qsevent = qsevent;
        ),
 
        TP_printk("%s %lu %d %s",
-                 __entry->rcuname, __entry->gpnum,
+                 __entry->rcuname, __entry->gp_seq,
                  __entry->cpu, __entry->qsevent)
 );
 
@@ -753,23 +751,23 @@ TRACE_EVENT(rcu_barrier,
 
 #else /* #ifdef CONFIG_RCU_TRACE */
 
-#define trace_rcu_grace_period(rcuname, gpnum, gpevent) do { } while (0)
-#define trace_rcu_future_grace_period(rcuname, gpnum, completed, c, \
+#define trace_rcu_grace_period(rcuname, gp_seq, gpevent) do { } while (0)
+#define trace_rcu_future_grace_period(rcuname, gp_seq, gp_seq_req, \
                                      level, grplo, grphi, event) \
                                      do { } while (0)
-#define trace_rcu_grace_period_init(rcuname, gpnum, level, grplo, grphi, \
+#define trace_rcu_grace_period_init(rcuname, gp_seq, level, grplo, grphi, \
                                    qsmask) do { } while (0)
 #define trace_rcu_exp_grace_period(rcuname, gqseq, gpevent) \
        do { } while (0)
 #define trace_rcu_exp_funnel_lock(rcuname, level, grplo, grphi, gpevent) \
        do { } while (0)
 #define trace_rcu_nocb_wake(rcuname, cpu, reason) do { } while (0)
-#define trace_rcu_preempt_task(rcuname, pid, gpnum) do { } while (0)
-#define trace_rcu_unlock_preempted_task(rcuname, gpnum, pid) do { } while (0)
-#define trace_rcu_quiescent_state_report(rcuname, gpnum, mask, qsmask, level, \
+#define trace_rcu_preempt_task(rcuname, pid, gp_seq) do { } while (0)
+#define trace_rcu_unlock_preempted_task(rcuname, gp_seq, pid) do { } while (0)
+#define trace_rcu_quiescent_state_report(rcuname, gp_seq, mask, qsmask, level, \
                                         grplo, grphi, gp_tasks) do { } \
        while (0)
-#define trace_rcu_fqs(rcuname, gpnum, cpu, qsevent) do { } while (0)
+#define trace_rcu_fqs(rcuname, gp_seq, cpu, qsevent) do { } while (0)
 #define trace_rcu_dyntick(polarity, oldnesting, newnesting, dyntick) do { } while (0)
 #define trace_rcu_callback(rcuname, rhp, qlen_lazy, qlen) do { } while (0)
 #define trace_rcu_kfree_callback(rcuname, rhp, offset, qlen_lazy, qlen) \
index d00221345c1988ff59de79f47401903d560c55e0..d4593a6062ef00d436bc237c9209082cba62805d 100644 (file)
@@ -29,7 +29,6 @@
 
 #include <linux/types.h>
 #include <linux/fs.h>
-#include <linux/signal.h>
 #include <asm/byteorder.h>
 
 typedef __kernel_ulong_t aio_context_t;
@@ -39,8 +38,10 @@ enum {
        IOCB_CMD_PWRITE = 1,
        IOCB_CMD_FSYNC = 2,
        IOCB_CMD_FDSYNC = 3,
-       /* 4 was the experimental IOCB_CMD_PREADX */
-       IOCB_CMD_POLL = 5,
+       /* These two are experimental.
+        * IOCB_CMD_PREADX = 4,
+        * IOCB_CMD_POLL = 5,
+        */
        IOCB_CMD_NOOP = 6,
        IOCB_CMD_PREADV = 7,
        IOCB_CMD_PWRITEV = 8,
@@ -108,10 +109,5 @@ struct iocb {
 #undef IFBIG
 #undef IFLITTLE
 
-struct __aio_sigset {
-       const sigset_t __user   *sigmask;
-       size_t          sigsetsize;
-};
-
 #endif /* __LINUX__AIO_ABI_H */
 
index 59b19b6a40d73ea6575f8810a6f4345a931c5a01..b7db3261c62d124760e98d9c851c1b01e64bdb03 100644 (file)
@@ -1857,7 +1857,8 @@ union bpf_attr {
  *             is resolved), the nexthop address is returned in ipv4_dst
  *             or ipv6_dst based on family, smac is set to mac address of
  *             egress device, dmac is set to nexthop mac address, rt_metric
- *             is set to metric from route (IPv4/IPv6 only).
+ *             is set to metric from route (IPv4/IPv6 only), and ifindex
+ *             is set to the device index of the nexthop from the FIB lookup.
  *
  *             *plen* argument is the size of the passed in struct.
  *             *flags* argument can be a combination of one or more of the
@@ -1873,9 +1874,10 @@ union bpf_attr {
  *             *ctx* is either **struct xdp_md** for XDP programs or
  *             **struct sk_buff** tc cls_act programs.
  *     Return
- *             Egress device index on success, 0 if packet needs to continue
- *             up the stack for further processing or a negative error in case
- *             of failure.
+ *             * < 0 if any input argument is invalid
+ *             *   0 on success (packet is forwarded, nexthop neighbor exists)
+ *             * > 0 one of **BPF_FIB_LKUP_RET_** codes explaining why the
+ *             *     packet is not forwarded or needs assist from full stack
  *
  * int bpf_sock_hash_update(struct bpf_sock_ops_kern *skops, struct bpf_map *map, void *key, u64 flags)
  *     Description
@@ -2612,6 +2614,18 @@ struct bpf_raw_tracepoint_args {
 #define BPF_FIB_LOOKUP_DIRECT  BIT(0)
 #define BPF_FIB_LOOKUP_OUTPUT  BIT(1)
 
+enum {
+       BPF_FIB_LKUP_RET_SUCCESS,      /* lookup successful */
+       BPF_FIB_LKUP_RET_BLACKHOLE,    /* dest is blackholed; can be dropped */
+       BPF_FIB_LKUP_RET_UNREACHABLE,  /* dest is unreachable; can be dropped */
+       BPF_FIB_LKUP_RET_PROHIBIT,     /* dest not allowed; can be dropped */
+       BPF_FIB_LKUP_RET_NOT_FWDED,    /* packet is not forwarded */
+       BPF_FIB_LKUP_RET_FWD_DISABLED, /* fwding is not enabled on ingress */
+       BPF_FIB_LKUP_RET_UNSUPP_LWT,   /* fwd requires encapsulation */
+       BPF_FIB_LKUP_RET_NO_NEIGH,     /* no neighbor entry for nh */
+       BPF_FIB_LKUP_RET_FRAG_NEEDED,  /* fragmentation required to fwd */
+};
+
 struct bpf_fib_lookup {
        /* input:  network family for lookup (AF_INET, AF_INET6)
         * output: network family of egress nexthop
@@ -2625,7 +2639,11 @@ struct bpf_fib_lookup {
 
        /* total length of packet from network header - used for MTU check */
        __u16   tot_len;
-       __u32   ifindex;  /* L3 device index for lookup */
+
+       /* input: L3 device index for lookup
+        * output: device index from FIB lookup
+        */
+       __u32   ifindex;
 
        union {
                /* inputs to lookup */
index 0b5ddbe135a47aa7f39b40ca44e665e5757014de..972265f328717b8286edc2fc93c9d2a54ed394f8 100644 (file)
@@ -76,7 +76,7 @@ struct btf_type {
  */
 #define BTF_INT_ENCODING(VAL)  (((VAL) & 0x0f000000) >> 24)
 #define BTF_INT_OFFSET(VAL)    (((VAL  & 0x00ff0000)) >> 16)
-#define BTF_INT_BITS(VAL)      ((VAL)  & 0x0000ffff)
+#define BTF_INT_BITS(VAL)      ((VAL)  & 0x000000ff)
 
 /* Attributes stored in the BTF_INT_ENCODING */
 #define BTF_INT_SIGNED (1 << 0)
index 4e12c423b9fe9352df063f14c447858cf6e774e9..c5358e0ae7c5e1f7fdbb358fa4b666a5b38b7a81 100644 (file)
@@ -422,6 +422,8 @@ typedef struct elf64_shdr {
 #define NT_ARM_SVE     0x405           /* ARM Scalable Vector Extension registers */
 #define NT_ARC_V2      0x600           /* ARCv2 accumulator/extra registers */
 #define NT_VMCOREDD    0x700           /* Vmcore Device Dump Note */
+#define NT_MIPS_DSP    0x800           /* MIPS DSP ASE registers */
+#define NT_MIPS_FP_MODE        0x801           /* MIPS floating-point mode */
 
 /* Note header in a PT_NOTE section */
 typedef struct elf32_note {
index 4ca65b56084f94526435a58a8663d58054c924f4..7363f18e65a553e12f4d1cc13844dfbf2bbe6f17 100644 (file)
@@ -226,7 +226,7 @@ enum tunable_id {
        ETHTOOL_TX_COPYBREAK,
        ETHTOOL_PFC_PREVENTION_TOUT, /* timeout in msecs */
        /*
-        * Add your fresh new tubale attribute above and remember to update
+        * Add your fresh new tunable attribute above and remember to update
         * tunable_strings[] in net/core/ethtool.c
         */
        __ETHTOOL_TUNABLE_COUNT,
index b6270a3b38e9f3fb410e8c80d8658b2c01a8ef96..b955b986b3413ac2fd2a7262b6bbee29006a65aa 100644 (file)
@@ -949,6 +949,7 @@ struct kvm_ppc_resize_hpt {
 #define KVM_CAP_GET_MSR_FEATURES 153
 #define KVM_CAP_HYPERV_EVENTFD 154
 #define KVM_CAP_HYPERV_TLBFLUSH 155
+#define KVM_CAP_S390_HPAGE_1M 156
 
 #ifdef KVM_CAP_IRQ_ROUTING
 
index 85a3fb65e40a6f3941337c7fad17e7fdff3b33d0..20d6cc91435df90f08741c478ab29ea85efa7167 100644 (file)
@@ -53,6 +53,9 @@ enum {
 /* These are client behavior specific flags. */
 #define NBD_CFLAG_DESTROY_ON_DISCONNECT        (1 << 0) /* delete the nbd device on
                                                    disconnect. */
+#define NBD_CFLAG_DISCONNECT_ON_CLOSE (1 << 1) /* disconnect the nbd device on
+                                               *  close by last opener.
+                                               */
 
 /* userspace doesn't need the nbd_device structure */
 
index b8e288a1f7409012d50e464e7993b96d4c404610..eeb787b1c53c72771c8d684154b7a87dc029a45b 100644 (file)
@@ -143,6 +143,8 @@ enum perf_event_sample_format {
        PERF_SAMPLE_PHYS_ADDR                   = 1U << 19,
 
        PERF_SAMPLE_MAX = 1U << 20,             /* non-ABI */
+
+       __PERF_SAMPLE_CALLCHAIN_EARLY           = 1ULL << 63,
 };
 
 /*
index d620fa43756cab2685428861f31c27d9a59b2a39..9a402fdb60e97bc92591312ebc7071de54fa900c 100644 (file)
  * Copyright (c) 2015-2018 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
  */
 
-#ifdef __KERNEL__
-# include <linux/types.h>
-#else
-# include <stdint.h>
-#endif
-
-#include <linux/types_32_64.h>
+#include <linux/types.h>
+#include <asm/byteorder.h>
 
 enum rseq_cpu_id_state {
        RSEQ_CPU_ID_UNINITIALIZED               = -1,
@@ -52,10 +47,10 @@ struct rseq_cs {
        __u32 version;
        /* enum rseq_cs_flags */
        __u32 flags;
-       LINUX_FIELD_u32_u64(start_ip);
+       __u64 start_ip;
        /* Offset from start_ip. */
-       LINUX_FIELD_u32_u64(post_commit_offset);
-       LINUX_FIELD_u32_u64(abort_ip);
+       __u64 post_commit_offset;
+       __u64 abort_ip;
 } __attribute__((aligned(4 * sizeof(__u64))));
 
 /*
@@ -67,28 +62,30 @@ struct rseq_cs {
 struct rseq {
        /*
         * Restartable sequences cpu_id_start field. Updated by the
-        * kernel, and read by user-space with single-copy atomicity
-        * semantics. Aligned on 32-bit. Always contains a value in the
-        * range of possible CPUs, although the value may not be the
-        * actual current CPU (e.g. if rseq is not initialized). This
-        * CPU number value should always be compared against the value
-        * of the cpu_id field before performing a rseq commit or
-        * returning a value read from a data structure indexed using
-        * the cpu_id_start value.
+        * kernel. Read by user-space with single-copy atomicity
+        * semantics. This field should only be read by the thread which
+        * registered this data structure. Aligned on 32-bit. Always
+        * contains a value in the range of possible CPUs, although the
+        * value may not be the actual current CPU (e.g. if rseq is not
+        * initialized). This CPU number value should always be compared
+        * against the value of the cpu_id field before performing a rseq
+        * commit or returning a value read from a data structure indexed
+        * using the cpu_id_start value.
         */
        __u32 cpu_id_start;
        /*
-        * Restartable sequences cpu_id field. Updated by the kernel,
-        * and read by user-space with single-copy atomicity semantics.
-        * Aligned on 32-bit. Values RSEQ_CPU_ID_UNINITIALIZED and
-        * RSEQ_CPU_ID_REGISTRATION_FAILED have a special semantic: the
-        * former means "rseq uninitialized", and latter means "rseq
-        * initialization failed". This value is meant to be read within
-        * rseq critical sections and compared with the cpu_id_start
-        * value previously read, before performing the commit instruction,
-        * or read and compared with the cpu_id_start value before returning
-        * a value loaded from a data structure indexed using the
-        * cpu_id_start value.
+        * Restartable sequences cpu_id field. Updated by the kernel.
+        * Read by user-space with single-copy atomicity semantics. This
+        * field should only be read by the thread which registered this
+        * data structure. Aligned on 32-bit. Values
+        * RSEQ_CPU_ID_UNINITIALIZED and RSEQ_CPU_ID_REGISTRATION_FAILED
+        * have a special semantic: the former means "rseq uninitialized",
+        * and latter means "rseq initialization failed". This value is
+        * meant to be read within rseq critical sections and compared
+        * with the cpu_id_start value previously read, before performing
+        * the commit instruction, or read and compared with the
+        * cpu_id_start value before returning a value loaded from a data
+        * structure indexed using the cpu_id_start value.
         */
        __u32 cpu_id;
        /*
@@ -105,27 +102,44 @@ struct rseq {
         * targeted by the rseq_cs. Also needs to be set to NULL by user-space
         * before reclaiming memory that contains the targeted struct rseq_cs.
         *
-        * Read and set by the kernel with single-copy atomicity semantics.
-        * Set by user-space with single-copy atomicity semantics. Aligned
-        * on 64-bit.
+        * Read and set by the kernel. Set by user-space with single-copy
+        * atomicity semantics. This field should only be updated by the
+        * thread which registered this data structure. Aligned on 64-bit.
         */
-       LINUX_FIELD_u32_u64(rseq_cs);
+       union {
+               __u64 ptr64;
+#ifdef __LP64__
+               __u64 ptr;
+#else
+               struct {
+#if (defined(__BYTE_ORDER) && (__BYTE_ORDER == __BIG_ENDIAN)) || defined(__BIG_ENDIAN)
+                       __u32 padding;          /* Initialized to zero. */
+                       __u32 ptr32;
+#else /* LITTLE */
+                       __u32 ptr32;
+                       __u32 padding;          /* Initialized to zero. */
+#endif /* ENDIAN */
+               } ptr;
+#endif
+       } rseq_cs;
+
        /*
-        * - RSEQ_DISABLE flag:
+        * Restartable sequences flags field.
+        *
+        * This field should only be updated by the thread which
+        * registered this data structure. Read by the kernel.
+        * Mainly used for single-stepping through rseq critical sections
+        * with debuggers.
         *
-        * Fallback fast-track flag for single-stepping.
-        * Set by user-space if lack of progress is detected.
-        * Cleared by user-space after rseq finish.
-        * Read by the kernel.
         * - RSEQ_CS_FLAG_NO_RESTART_ON_PREEMPT
-        *     Inhibit instruction sequence block restart and event
-        *     counter increment on preemption for this thread.
+        *     Inhibit instruction sequence block restart on preemption
+        *     for this thread.
         * - RSEQ_CS_FLAG_NO_RESTART_ON_SIGNAL
-        *     Inhibit instruction sequence block restart and event
-        *     counter increment on signal delivery for this thread.
+        *     Inhibit instruction sequence block restart on signal
+        *     delivery for this thread.
         * - RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE
-        *     Inhibit instruction sequence block restart and event
-        *     counter increment on migration for this thread.
+        *     Inhibit instruction sequence block restart on migration for
+        *     this thread.
         */
        __u32 flags;
 } __attribute__((aligned(4 * sizeof(__u64))));
index 6e299349b15876d3302cc784576dd84cff6f1d66..b7b57967d90f09cd428d90e12b0035e3ecbcfc67 100644 (file)
@@ -44,6 +44,7 @@
 #define TCMU_MAILBOX_VERSION 2
 #define ALIGN_SIZE 64 /* Should be enough for most CPUs */
 #define TCMU_MAILBOX_FLAG_CAP_OOOC (1 << 0) /* Out-of-order completions */
+#define TCMU_MAILBOX_FLAG_CAP_READ_LEN (1 << 1) /* Read data length */
 
 struct tcmu_mailbox {
        __u16 version;
@@ -71,6 +72,7 @@ struct tcmu_cmd_entry_hdr {
        __u16 cmd_id;
        __u8 kflags;
 #define TCMU_UFLAG_UNKNOWN_OP 0x1
+#define TCMU_UFLAG_READ_LEN   0x2
        __u8 uflags;
 
 } __packed;
@@ -119,7 +121,7 @@ struct tcmu_cmd_entry {
                        __u8 scsi_status;
                        __u8 __pad1;
                        __u16 __pad2;
-                       __u32 __pad3;
+                       __u32 read_len;
                        char sense_buffer[TCMU_SENSE_BUFFERSIZE];
                } rsp;
        };
index 29eb659aa77a183e36082599866fb512908d1197..e3f6ed8a7064f9276ca2b57ed5ecff3364786e9d 100644 (file)
@@ -127,6 +127,10 @@ enum {
 
 #define TCP_CM_INQ             TCP_INQ
 
+#define TCP_REPAIR_ON          1
+#define TCP_REPAIR_OFF         0
+#define TCP_REPAIR_OFF_NO_WP   -1      /* Turn off without window probes */
+
 struct tcp_repair_opt {
        __u32   opt_code;
        __u32   opt_val;
index fcf9366564936d5026c6e27cc56804debfb8eefb..6b56a2208be7b51032533db95bf26faf3032ee55 100644 (file)
@@ -49,6 +49,13 @@ struct __kernel_timespec {
 };
 #endif
 
+#ifndef __kernel_itimerspec
+struct __kernel_itimerspec {
+       struct __kernel_timespec it_interval;    /* timer period */
+       struct __kernel_timespec it_value;       /* timer expiration */
+};
+#endif
+
 /*
  * legacy timeval structure, only embedded in structures that
  * traditionally used 'timeval' to pass time intervals (not absolute
diff --git a/include/uapi/linux/types_32_64.h b/include/uapi/linux/types_32_64.h
deleted file mode 100644 (file)
index 0a87ace..0000000
+++ /dev/null
@@ -1,50 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
-#ifndef _UAPI_LINUX_TYPES_32_64_H
-#define _UAPI_LINUX_TYPES_32_64_H
-
-/*
- * linux/types_32_64.h
- *
- * Integer type declaration for pointers across 32-bit and 64-bit systems.
- *
- * Copyright (c) 2015-2018 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#ifdef __KERNEL__
-# include <linux/types.h>
-#else
-# include <stdint.h>
-#endif
-
-#include <asm/byteorder.h>
-
-#ifdef __BYTE_ORDER
-# if (__BYTE_ORDER == __BIG_ENDIAN)
-#  define LINUX_BYTE_ORDER_BIG_ENDIAN
-# else
-#  define LINUX_BYTE_ORDER_LITTLE_ENDIAN
-# endif
-#else
-# ifdef __BIG_ENDIAN
-#  define LINUX_BYTE_ORDER_BIG_ENDIAN
-# else
-#  define LINUX_BYTE_ORDER_LITTLE_ENDIAN
-# endif
-#endif
-
-#ifdef __LP64__
-# define LINUX_FIELD_u32_u64(field)                    __u64 field
-# define LINUX_FIELD_u32_u64_INIT_ONSTACK(field, v)    field = (intptr_t)v
-#else
-# ifdef LINUX_BYTE_ORDER_BIG_ENDIAN
-#  define LINUX_FIELD_u32_u64(field)   __u32 field ## _padding, field
-#  define LINUX_FIELD_u32_u64_INIT_ONSTACK(field, v)   \
-       field ## _padding = 0, field = (intptr_t)v
-# else
-#  define LINUX_FIELD_u32_u64(field)   __u32 field, field ## _padding
-#  define LINUX_FIELD_u32_u64_INIT_ONSTACK(field, v)   \
-       field = (intptr_t)v, field ## _padding = 0
-# endif
-#endif
-
-#endif /* _UAPI_LINUX_TYPES_32_64_H */
index 9d4340c907d17d0c2ecacdd6762e36d7c9d6def5..1e1d9bd0bd3788711d8722e7ec9e1a15661d7c3b 100644 (file)
@@ -25,12 +25,16 @@ extern bool xen_pvh;
 #define xen_hvm_domain()       (xen_domain_type == XEN_HVM_DOMAIN)
 #define xen_pvh_domain()       (xen_pvh)
 
+#include <linux/types.h>
+
+extern uint32_t xen_start_flags;
+
 #ifdef CONFIG_XEN_DOM0
 #include <xen/interface/xen.h>
 #include <asm/xen/hypervisor.h>
 
 #define xen_initial_domain()   (xen_domain() && \
-                                xen_start_info && xen_start_info->flags & SIF_INITDOMAIN)
+                                (xen_start_flags & SIF_INITDOMAIN))
 #else  /* !CONFIG_XEN_DOM0 */
 #define xen_initial_domain()   (0)
 #endif /* CONFIG_XEN_DOM0 */
index 5a52f07259a2aab4ad5993801a6d15b5dfe5d4a3..8b1ab81ecda8f8cea83da326ffa9e9d150473fcf 100644 (file)
@@ -125,10 +125,13 @@ config HAVE_KERNEL_LZO
 config HAVE_KERNEL_LZ4
        bool
 
+config HAVE_KERNEL_UNCOMPRESSED
+       bool
+
 choice
        prompt "Kernel compression mode"
        default KERNEL_GZIP
-       depends on HAVE_KERNEL_GZIP || HAVE_KERNEL_BZIP2 || HAVE_KERNEL_LZMA || HAVE_KERNEL_XZ || HAVE_KERNEL_LZO || HAVE_KERNEL_LZ4
+       depends on HAVE_KERNEL_GZIP || HAVE_KERNEL_BZIP2 || HAVE_KERNEL_LZMA || HAVE_KERNEL_XZ || HAVE_KERNEL_LZO || HAVE_KERNEL_LZ4 || HAVE_KERNEL_UNCOMPRESSED
        help
          The linux kernel is a kind of self-extracting executable.
          Several compression algorithms are available, which differ
@@ -207,6 +210,16 @@ config KERNEL_LZ4
          is about 8% bigger than LZO. But the decompression speed is
          faster than LZO.
 
+config KERNEL_UNCOMPRESSED
+       bool "None"
+       depends on HAVE_KERNEL_UNCOMPRESSED
+       help
+         Produce uncompressed kernel image. This option is usually not what
+         you want. It is useful for debugging the kernel in slow simulation
+         environments, where decompressing and moving the kernel is awfully
+         slow. This option allows early boot code to skip the decompressor
+         and jump right at uncompressed kernel image.
+
 endchoice
 
 config DEFAULT_HOSTNAME
@@ -1051,10 +1064,9 @@ config LD_DEAD_CODE_DATA_ELIMINATION
        depends on HAVE_LD_DEAD_CODE_DATA_ELIMINATION
        depends on EXPERT
        help
-         Select this if the architecture wants to do dead code and
-         data elimination with the linker by compiling with
-         -ffunction-sections -fdata-sections, and linking with
-         --gc-sections.
+         Enable this if you want to do dead code and data elimination with
+         the linker by compiling with -ffunction-sections -fdata-sections,
+         and linking with --gc-sections.
 
          This can reduce on disk and in-memory size of the kernel
          code and static data, particularly for small configs and
@@ -1719,10 +1731,6 @@ source "arch/Kconfig"
 
 endmenu                # General setup
 
-config HAVE_GENERIC_DMA_COHERENT
-       bool
-       default n
-
 config RT_MUTEXES
        bool
 
index 3b4ada11ed521a1bb25ead3ca461640ba1026923..38c68b593d0d704f103bec42d37b42ca4742673f 100644 (file)
@@ -79,7 +79,7 @@
 #include <linux/pti.h>
 #include <linux/blkdev.h>
 #include <linux/elevator.h>
-#include <linux/sched_clock.h>
+#include <linux/sched/clock.h>
 #include <linux/sched/task.h>
 #include <linux/sched/task_stack.h>
 #include <linux/context_tracking.h>
@@ -561,8 +561,8 @@ asmlinkage __visible void __init start_kernel(void)
        setup_command_line(command_line);
        setup_nr_cpu_ids();
        setup_per_cpu_areas();
-       boot_cpu_state_init();
        smp_prepare_boot_cpu(); /* arch-specific boot-cpu hooks */
+       boot_cpu_hotplug_init();
 
        build_all_zonelists(NULL);
        page_alloc_init();
@@ -642,7 +642,6 @@ asmlinkage __visible void __init start_kernel(void)
        softirq_init();
        timekeeping_init();
        time_init();
-       sched_clock_postinit();
        printk_safe_init();
        perf_event_init();
        profile_init();
@@ -697,6 +696,7 @@ asmlinkage __visible void __init start_kernel(void)
        acpi_early_init();
        if (late_time_init)
                late_time_init();
+       sched_clock_init();
        calibrate_delay();
        pid_idr_init();
        anon_vma_init();
@@ -1065,6 +1065,13 @@ static int __ref kernel_init(void *unused)
        jump_label_invalidate_initmem();
        free_initmem();
        mark_readonly();
+
+       /*
+        * Kernel mappings are now finalized - update the userspace page-table
+        * to finalize PTI.
+        */
+       pti_finalize();
+
        system_state = SYSTEM_RUNNING;
        numa_default_policy();
 
index 5af1943ad782b415a3dd331161e9b2ecccf89210..76e95e4f3aa284f6ded3962b3055233ea533add8 100644 (file)
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -2118,7 +2118,7 @@ static long do_semtimedop(int semid, struct sembuf __user *tsops,
        }
 
        do {
-               queue.status = -EINTR;
+               WRITE_ONCE(queue.status, -EINTR);
                queue.sleeper = current;
 
                __set_current_state(TASK_INTERRUPTIBLE);
index 051a3e1fb8df9b2bcb2073e8299d31cdfc938724..d8a38b3be5dd293ede3ecdb097bbc591a0abdda6 100644 (file)
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -427,6 +427,17 @@ static int shm_split(struct vm_area_struct *vma, unsigned long addr)
        return 0;
 }
 
+static unsigned long shm_pagesize(struct vm_area_struct *vma)
+{
+       struct file *file = vma->vm_file;
+       struct shm_file_data *sfd = shm_file_data(file);
+
+       if (sfd->vm_ops->pagesize)
+               return sfd->vm_ops->pagesize(vma);
+
+       return PAGE_SIZE;
+}
+
 #ifdef CONFIG_NUMA
 static int shm_set_policy(struct vm_area_struct *vma, struct mempolicy *new)
 {
@@ -554,6 +565,7 @@ static const struct vm_operations_struct shm_vm_ops = {
        .close  = shm_close,    /* callback for when the vm-area is released */
        .fault  = shm_fault,
        .split  = shm_split,
+       .pagesize = shm_pagesize,
 #if defined(CONFIG_NUMA)
        .set_policy = shm_set_policy,
        .get_policy = shm_get_policy,
@@ -1354,15 +1366,14 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg,
        struct shmid_kernel *shp;
        unsigned long addr = (unsigned long)shmaddr;
        unsigned long size;
-       struct file *file;
+       struct file *file, *base;
        int    err;
        unsigned long flags = MAP_SHARED;
        unsigned long prot;
        int acc_mode;
        struct ipc_namespace *ns;
        struct shm_file_data *sfd;
-       struct path path;
-       fmode_t f_mode;
+       int f_flags;
        unsigned long populate = 0;
 
        err = -EINVAL;
@@ -1395,11 +1406,11 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg,
        if (shmflg & SHM_RDONLY) {
                prot = PROT_READ;
                acc_mode = S_IRUGO;
-               f_mode = FMODE_READ;
+               f_flags = O_RDONLY;
        } else {
                prot = PROT_READ | PROT_WRITE;
                acc_mode = S_IRUGO | S_IWUGO;
-               f_mode = FMODE_READ | FMODE_WRITE;
+               f_flags = O_RDWR;
        }
        if (shmflg & SHM_EXEC) {
                prot |= PROT_EXEC;
@@ -1435,46 +1446,44 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg,
                goto out_unlock;
        }
 
-       path = shp->shm_file->f_path;
-       path_get(&path);
+       /*
+        * We need to take a reference to the real shm file to prevent the
+        * pointer from becoming stale in cases where the lifetime of the outer
+        * file extends beyond that of the shm segment.  It's not usually
+        * possible, but it can happen during remap_file_pages() emulation as
+        * that unmaps the memory, then does ->mmap() via file reference only.
+        * We'll deny the ->mmap() if the shm segment was since removed, but to
+        * detect shm ID reuse we need to compare the file pointers.
+        */
+       base = get_file(shp->shm_file);
        shp->shm_nattch++;
-       size = i_size_read(d_inode(path.dentry));
+       size = i_size_read(file_inode(base));
        ipc_unlock_object(&shp->shm_perm);
        rcu_read_unlock();
 
        err = -ENOMEM;
        sfd = kzalloc(sizeof(*sfd), GFP_KERNEL);
        if (!sfd) {
-               path_put(&path);
+               fput(base);
                goto out_nattch;
        }
 
-       file = alloc_file(&path, f_mode,
-                         is_file_hugepages(shp->shm_file) ?
+       file = alloc_file_clone(base, f_flags,
+                         is_file_hugepages(base) ?
                                &shm_file_operations_huge :
                                &shm_file_operations);
        err = PTR_ERR(file);
        if (IS_ERR(file)) {
                kfree(sfd);
-               path_put(&path);
+               fput(base);
                goto out_nattch;
        }
 
-       file->private_data = sfd;
-       file->f_mapping = shp->shm_file->f_mapping;
        sfd->id = shp->shm_perm.id;
        sfd->ns = get_ipc_ns(ns);
-       /*
-        * We need to take a reference to the real shm file to prevent the
-        * pointer from becoming stale in cases where the lifetime of the outer
-        * file extends beyond that of the shm segment.  It's not usually
-        * possible, but it can happen during remap_file_pages() emulation as
-        * that unmaps the memory, then does ->mmap() via file reference only.
-        * We'll deny the ->mmap() if the shm segment was since removed, but to
-        * detect shm ID reuse we need to compare the file pointers.
-        */
-       sfd->file = get_file(shp->shm_file);
+       sfd->file = base;
        sfd->vm_ops = NULL;
+       file->private_data = sfd;
 
        err = security_mmap_file(file, prot, flags);
        if (err)
index d2001624fe7a31b788508e5da97924173bf2e33e..04bc07c2b42a9dfef399caea56a12b072f0ad028 100644 (file)
@@ -41,6 +41,7 @@ obj-y += printk/
 obj-y += irq/
 obj-y += rcu/
 obj-y += livepatch/
+obj-y += dma/
 
 obj-$(CONFIG_CHECKPOINT_RESTORE) += kcmp.o
 obj-$(CONFIG_FREEZER) += freezer.o
index ceb1c4596c511e9a291d27a1fe710eefde439284..80d672a1108883be0553212757c13912cabe5bec 100644 (file)
@@ -1279,8 +1279,12 @@ static void show_special(struct audit_context *context, int *call_panic)
                break;
        case AUDIT_KERN_MODULE:
                audit_log_format(ab, "name=");
-               audit_log_untrustedstring(ab, context->module.name);
-               kfree(context->module.name);
+               if (context->module.name) {
+                       audit_log_untrustedstring(ab, context->module.name);
+                       kfree(context->module.name);
+               } else
+                       audit_log_format(ab, "(null)");
+
                break;
        }
        audit_log_end(ab);
@@ -2411,8 +2415,9 @@ void __audit_log_kern_module(char *name)
 {
        struct audit_context *context = audit_context();
 
-       context->module.name = kmalloc(strlen(name) + 1, GFP_KERNEL);
-       strcpy(context->module.name, name);
+       context->module.name = kstrdup(name, GFP_KERNEL);
+       if (!context->module.name)
+               audit_log_lost("out of memory in __audit_log_kern_module");
        context->type = AUDIT_KERN_MODULE;
 }
 
index 544e58f5f6429ef732ddbe9f167549cc2ecea548..2aa55d030c774b972b5a1d1eaac6c053f4959115 100644 (file)
@@ -378,7 +378,7 @@ static int array_map_check_btf(const struct bpf_map *map, const struct btf *btf,
                return -EINVAL;
 
        value_type = btf_type_id_size(btf, &btf_value_id, &value_size);
-       if (!value_type || value_size > map->value_size)
+       if (!value_type || value_size != map->value_size)
                return -EINVAL;
 
        return 0;
index 2d49d18b793abaf60379c4050e4148a77bae732f..2590700237c13cd7a1a7394c8797a0a2aafd063a 100644 (file)
@@ -450,7 +450,7 @@ static const struct btf_type *btf_type_by_id(const struct btf *btf, u32 type_id)
  */
 static bool btf_type_int_is_regular(const struct btf_type *t)
 {
-       u16 nr_bits, nr_bytes;
+       u8 nr_bits, nr_bytes;
        u32 int_data;
 
        int_data = btf_type_int(t);
@@ -991,38 +991,38 @@ static void btf_int_bits_seq_show(const struct btf *btf,
                                  void *data, u8 bits_offset,
                                  struct seq_file *m)
 {
+       u16 left_shift_bits, right_shift_bits;
        u32 int_data = btf_type_int(t);
-       u16 nr_bits = BTF_INT_BITS(int_data);
-       u16 total_bits_offset;
-       u16 nr_copy_bytes;
-       u16 nr_copy_bits;
-       u8 nr_upper_bits;
-       union {
-               u64 u64_num;
-               u8  u8_nums[8];
-       } print_num;
+       u8 nr_bits = BTF_INT_BITS(int_data);
+       u8 total_bits_offset;
+       u8 nr_copy_bytes;
+       u8 nr_copy_bits;
+       u64 print_num;
 
+       /*
+        * bits_offset is at most 7.
+        * BTF_INT_OFFSET() cannot exceed 64 bits.
+        */
        total_bits_offset = bits_offset + BTF_INT_OFFSET(int_data);
        data += BITS_ROUNDDOWN_BYTES(total_bits_offset);
        bits_offset = BITS_PER_BYTE_MASKED(total_bits_offset);
        nr_copy_bits = nr_bits + bits_offset;
        nr_copy_bytes = BITS_ROUNDUP_BYTES(nr_copy_bits);
 
-       print_num.u64_num = 0;
-       memcpy(&print_num.u64_num, data, nr_copy_bytes);
-
-       /* Ditch the higher order bits */
-       nr_upper_bits = BITS_PER_BYTE_MASKED(nr_copy_bits);
-       if (nr_upper_bits) {
-               /* We need to mask out some bits of the upper byte. */
-               u8 mask = (1 << nr_upper_bits) - 1;
+       print_num = 0;
+       memcpy(&print_num, data, nr_copy_bytes);
 
-               print_num.u8_nums[nr_copy_bytes - 1] &= mask;
-       }
+#ifdef __BIG_ENDIAN_BITFIELD
+       left_shift_bits = bits_offset;
+#else
+       left_shift_bits = BITS_PER_U64 - nr_copy_bits;
+#endif
+       right_shift_bits = BITS_PER_U64 - nr_bits;
 
-       print_num.u64_num >>= bits_offset;
+       print_num <<= left_shift_bits;
+       print_num >>= right_shift_bits;
 
-       seq_printf(m, "0x%llx", print_num.u64_num);
+       seq_printf(m, "0x%llx", print_num);
 }
 
 static void btf_int_seq_show(const struct btf *btf, const struct btf_type *t,
@@ -1032,7 +1032,7 @@ static void btf_int_seq_show(const struct btf *btf, const struct btf_type *t,
        u32 int_data = btf_type_int(t);
        u8 encoding = BTF_INT_ENCODING(int_data);
        bool sign = encoding & BTF_INT_SIGNED;
-       u32 nr_bits = BTF_INT_BITS(int_data);
+       u8 nr_bits = BTF_INT_BITS(int_data);
 
        if (bits_offset || BTF_INT_OFFSET(int_data) ||
            BITS_PER_BYTE_MASKED(nr_bits)) {
@@ -1519,9 +1519,9 @@ static s32 btf_struct_check_meta(struct btf_verifier_env *env,
 {
        bool is_union = BTF_INFO_KIND(t->info) == BTF_KIND_UNION;
        const struct btf_member *member;
+       u32 meta_needed, last_offset;
        struct btf *btf = env->btf;
        u32 struct_size = t->size;
-       u32 meta_needed;
        u16 i;
 
        meta_needed = btf_type_vlen(t) * sizeof(*member);
@@ -1534,6 +1534,7 @@ static s32 btf_struct_check_meta(struct btf_verifier_env *env,
 
        btf_verifier_log_type(env, t, NULL);
 
+       last_offset = 0;
        for_each_member(i, t, member) {
                if (!btf_name_offset_valid(btf, member->name_off)) {
                        btf_verifier_log_member(env, t, member,
@@ -1555,6 +1556,16 @@ static s32 btf_struct_check_meta(struct btf_verifier_env *env,
                        return -EINVAL;
                }
 
+               /*
+                * ">" instead of ">=" because the last member could be
+                * "char a[0];"
+                */
+               if (last_offset > member->offset) {
+                       btf_verifier_log_member(env, t, member,
+                                               "Invalid member bits_offset");
+                       return -EINVAL;
+               }
+
                if (BITS_ROUNDUP_BYTES(member->offset) > struct_size) {
                        btf_verifier_log_member(env, t, member,
                                                "Memmber bits_offset exceeds its struct size");
@@ -1562,6 +1573,7 @@ static s32 btf_struct_check_meta(struct btf_verifier_env *env,
                }
 
                btf_verifier_log_member(env, t, member, NULL);
+               last_offset = member->offset;
        }
 
        return meta_needed;
index f7c00bd6f8e49ca9cc4e6ee323b01f718aebd9ec..3d83ee7df381b1def956b5e645376451d797440e 100644 (file)
@@ -428,6 +428,60 @@ int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
        return ret;
 }
 
+int cgroup_bpf_prog_attach(const union bpf_attr *attr,
+                          enum bpf_prog_type ptype, struct bpf_prog *prog)
+{
+       struct cgroup *cgrp;
+       int ret;
+
+       cgrp = cgroup_get_from_fd(attr->target_fd);
+       if (IS_ERR(cgrp))
+               return PTR_ERR(cgrp);
+
+       ret = cgroup_bpf_attach(cgrp, prog, attr->attach_type,
+                               attr->attach_flags);
+       cgroup_put(cgrp);
+       return ret;
+}
+
+int cgroup_bpf_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype)
+{
+       struct bpf_prog *prog;
+       struct cgroup *cgrp;
+       int ret;
+
+       cgrp = cgroup_get_from_fd(attr->target_fd);
+       if (IS_ERR(cgrp))
+               return PTR_ERR(cgrp);
+
+       prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
+       if (IS_ERR(prog))
+               prog = NULL;
+
+       ret = cgroup_bpf_detach(cgrp, prog, attr->attach_type, 0);
+       if (prog)
+               bpf_prog_put(prog);
+
+       cgroup_put(cgrp);
+       return ret;
+}
+
+int cgroup_bpf_prog_query(const union bpf_attr *attr,
+                         union bpf_attr __user *uattr)
+{
+       struct cgroup *cgrp;
+       int ret;
+
+       cgrp = cgroup_get_from_fd(attr->query.target_fd);
+       if (IS_ERR(cgrp))
+               return PTR_ERR(cgrp);
+
+       ret = cgroup_bpf_query(cgrp, attr, uattr);
+
+       cgroup_put(cgrp);
+       return ret;
+}
+
 /**
  * __cgroup_bpf_run_filter_skb() - Run a program for packet filtering
  * @sk: The socket sending or receiving traffic
index 9f1493705f4043066033dd44ec6deb95e7418287..1e5625d46414cc68efe372b2c6a8dab266a24dd6 100644 (file)
@@ -350,6 +350,20 @@ struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
        return prog_adj;
 }
 
+void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp)
+{
+       int i;
+
+       for (i = 0; i < fp->aux->func_cnt; i++)
+               bpf_prog_kallsyms_del(fp->aux->func[i]);
+}
+
+void bpf_prog_kallsyms_del_all(struct bpf_prog *fp)
+{
+       bpf_prog_kallsyms_del_subprogs(fp);
+       bpf_prog_kallsyms_del(fp);
+}
+
 #ifdef CONFIG_BPF_JIT
 /* All BPF JIT sysctl knobs here. */
 int bpf_jit_enable   __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_ALWAYS_ON);
@@ -1434,6 +1448,17 @@ static int bpf_check_tail_call(const struct bpf_prog *fp)
        return 0;
 }
 
+static void bpf_prog_select_func(struct bpf_prog *fp)
+{
+#ifndef CONFIG_BPF_JIT_ALWAYS_ON
+       u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1);
+
+       fp->bpf_func = interpreters[(round_up(stack_depth, 32) / 32) - 1];
+#else
+       fp->bpf_func = __bpf_prog_ret0_warn;
+#endif
+}
+
 /**
  *     bpf_prog_select_runtime - select exec runtime for BPF program
  *     @fp: bpf_prog populated with internal BPF program
@@ -1444,13 +1469,13 @@ static int bpf_check_tail_call(const struct bpf_prog *fp)
  */
 struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
 {
-#ifndef CONFIG_BPF_JIT_ALWAYS_ON
-       u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1);
+       /* In case of BPF to BPF calls, verifier did all the prep
+        * work with regards to JITing, etc.
+        */
+       if (fp->bpf_func)
+               goto finalize;
 
-       fp->bpf_func = interpreters[(round_up(stack_depth, 32) / 32) - 1];
-#else
-       fp->bpf_func = __bpf_prog_ret0_warn;
-#endif
+       bpf_prog_select_func(fp);
 
        /* eBPF JITs can rewrite the program in case constant
         * blinding is active. However, in case of error during
@@ -1471,6 +1496,8 @@ struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
                if (*err)
                        return fp;
        }
+
+finalize:
        bpf_prog_lock_ro(fp);
 
        /* The tail call compatibility check can only be done at
index e0918d180f08e6e25c55310963b0a1c4cf7441e7..46f5f29605d474cff41ab302dd9898349addfa3a 100644 (file)
@@ -69,7 +69,7 @@ struct bpf_cpu_map {
 };
 
 static int bq_flush_to_queue(struct bpf_cpu_map_entry *rcpu,
-                            struct xdp_bulk_queue *bq);
+                            struct xdp_bulk_queue *bq, bool in_napi_ctx);
 
 static u64 cpu_map_bitmap_size(const union bpf_attr *attr)
 {
@@ -375,7 +375,7 @@ static void __cpu_map_entry_free(struct rcu_head *rcu)
                struct xdp_bulk_queue *bq = per_cpu_ptr(rcpu->bulkq, cpu);
 
                /* No concurrent bq_enqueue can run at this point */
-               bq_flush_to_queue(rcpu, bq);
+               bq_flush_to_queue(rcpu, bq, false);
        }
        free_percpu(rcpu->bulkq);
        /* Cannot kthread_stop() here, last put free rcpu resources */
@@ -558,7 +558,7 @@ const struct bpf_map_ops cpu_map_ops = {
 };
 
 static int bq_flush_to_queue(struct bpf_cpu_map_entry *rcpu,
-                            struct xdp_bulk_queue *bq)
+                            struct xdp_bulk_queue *bq, bool in_napi_ctx)
 {
        unsigned int processed = 0, drops = 0;
        const int to_cpu = rcpu->cpu;
@@ -578,7 +578,10 @@ static int bq_flush_to_queue(struct bpf_cpu_map_entry *rcpu,
                err = __ptr_ring_produce(q, xdpf);
                if (err) {
                        drops++;
-                       xdp_return_frame_rx_napi(xdpf);
+                       if (likely(in_napi_ctx))
+                               xdp_return_frame_rx_napi(xdpf);
+                       else
+                               xdp_return_frame(xdpf);
                }
                processed++;
        }
@@ -598,7 +601,7 @@ static int bq_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf)
        struct xdp_bulk_queue *bq = this_cpu_ptr(rcpu->bulkq);
 
        if (unlikely(bq->count == CPU_MAP_BULK_SIZE))
-               bq_flush_to_queue(rcpu, bq);
+               bq_flush_to_queue(rcpu, bq, true);
 
        /* Notice, xdp_buff/page MUST be queued here, long enough for
         * driver to code invoking us to finished, due to driver
@@ -661,7 +664,7 @@ void __cpu_map_flush(struct bpf_map *map)
 
                /* Flush all frames in bulkq to real queue */
                bq = this_cpu_ptr(rcpu->bulkq);
-               bq_flush_to_queue(rcpu, bq);
+               bq_flush_to_queue(rcpu, bq, true);
 
                /* If already running, costs spin_lock_irqsave + smb_mb */
                wake_up_process(rcpu->kthread);
index a7cc7b3494a90f582886485668562ccfef5f5ffd..750d45edae7989e612bc6d57a217dc594b8856bc 100644 (file)
@@ -217,7 +217,8 @@ void __dev_map_insert_ctx(struct bpf_map *map, u32 bit)
 }
 
 static int bq_xmit_all(struct bpf_dtab_netdev *obj,
-                      struct xdp_bulk_queue *bq, u32 flags)
+                      struct xdp_bulk_queue *bq, u32 flags,
+                      bool in_napi_ctx)
 {
        struct net_device *dev = obj->dev;
        int sent = 0, drops = 0, err = 0;
@@ -254,7 +255,10 @@ error:
                struct xdp_frame *xdpf = bq->q[i];
 
                /* RX path under NAPI protection, can return frames faster */
-               xdp_return_frame_rx_napi(xdpf);
+               if (likely(in_napi_ctx))
+                       xdp_return_frame_rx_napi(xdpf);
+               else
+                       xdp_return_frame(xdpf);
                drops++;
        }
        goto out;
@@ -286,7 +290,7 @@ void __dev_map_flush(struct bpf_map *map)
                __clear_bit(bit, bitmap);
 
                bq = this_cpu_ptr(dev->bulkq);
-               bq_xmit_all(dev, bq, XDP_XMIT_FLUSH);
+               bq_xmit_all(dev, bq, XDP_XMIT_FLUSH, true);
        }
 }
 
@@ -316,7 +320,7 @@ static int bq_enqueue(struct bpf_dtab_netdev *obj, struct xdp_frame *xdpf,
        struct xdp_bulk_queue *bq = this_cpu_ptr(obj->bulkq);
 
        if (unlikely(bq->count == DEV_MAP_BULK_SIZE))
-               bq_xmit_all(obj, bq, 0);
+               bq_xmit_all(obj, bq, 0, true);
 
        /* Ingress dev_rx will be the same for all xdp_frame's in
         * bulk_queue, because bq stored per-CPU and must be flushed
@@ -334,10 +338,15 @@ int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
 {
        struct net_device *dev = dst->dev;
        struct xdp_frame *xdpf;
+       int err;
 
        if (!dev->netdev_ops->ndo_xdp_xmit)
                return -EOPNOTSUPP;
 
+       err = xdp_ok_fwd_dev(dev, xdp->data_end - xdp->data);
+       if (unlikely(err))
+               return err;
+
        xdpf = convert_to_xdp_frame(xdp);
        if (unlikely(!xdpf))
                return -EOVERFLOW;
@@ -345,6 +354,20 @@ int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
        return bq_enqueue(dst, xdpf, dev_rx);
 }
 
+int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb,
+                            struct bpf_prog *xdp_prog)
+{
+       int err;
+
+       err = xdp_ok_fwd_dev(dst->dev, skb->len);
+       if (unlikely(err))
+               return err;
+       skb->dev = dst->dev;
+       generic_xdp_tx(skb, xdp_prog);
+
+       return 0;
+}
+
 static void *dev_map_lookup_elem(struct bpf_map *map, void *key)
 {
        struct bpf_dtab_netdev *obj = __dev_map_lookup_elem(map, *(u32 *)key);
@@ -366,7 +389,7 @@ static void dev_map_flush_old(struct bpf_dtab_netdev *dev)
                        __clear_bit(dev->bit, bitmap);
 
                        bq = per_cpu_ptr(dev->bulkq, cpu);
-                       bq_xmit_all(dev, bq, XDP_XMIT_FLUSH);
+                       bq_xmit_all(dev, bq, XDP_XMIT_FLUSH, false);
                }
        }
 }
index 3ca2198a6d22d9ab67c61e964811046cf9c9e512..513d9dfcf4ee136dd5e6733789a996612272376d 100644 (file)
@@ -747,13 +747,15 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
                                 * old element will be freed immediately.
                                 * Otherwise return an error
                                 */
-                               atomic_dec(&htab->count);
-                               return ERR_PTR(-E2BIG);
+                               l_new = ERR_PTR(-E2BIG);
+                               goto dec_count;
                        }
                l_new = kmalloc_node(htab->elem_size, GFP_ATOMIC | __GFP_NOWARN,
                                     htab->map.numa_node);
-               if (!l_new)
-                       return ERR_PTR(-ENOMEM);
+               if (!l_new) {
+                       l_new = ERR_PTR(-ENOMEM);
+                       goto dec_count;
+               }
        }
 
        memcpy(l_new->key, key, key_size);
@@ -766,7 +768,8 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
                                                  GFP_ATOMIC | __GFP_NOWARN);
                        if (!pptr) {
                                kfree(l_new);
-                               return ERR_PTR(-ENOMEM);
+                               l_new = ERR_PTR(-ENOMEM);
+                               goto dec_count;
                        }
                }
 
@@ -780,6 +783,9 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
 
        l_new->hash = hash;
        return l_new;
+dec_count:
+       atomic_dec(&htab->count);
+       return l_new;
 }
 
 static int check_flags(struct bpf_htab *htab, struct htab_elem *l_old,
index 52a91d816c0eb9a1f9fe96fd77b3ffefd6145149..c4d75c52b4fc13d28c53cae1f9d8084b0f8e41e5 100644 (file)
@@ -72,6 +72,7 @@ struct bpf_htab {
        u32 n_buckets;
        u32 elem_size;
        struct bpf_sock_progs progs;
+       struct rcu_head rcu;
 };
 
 struct htab_elem {
@@ -89,8 +90,8 @@ enum smap_psock_state {
 struct smap_psock_map_entry {
        struct list_head list;
        struct sock **entry;
-       struct htab_elem *hash_link;
-       struct bpf_htab *htab;
+       struct htab_elem __rcu *hash_link;
+       struct bpf_htab __rcu *htab;
 };
 
 struct smap_psock {
@@ -120,6 +121,7 @@ struct smap_psock {
        struct bpf_prog *bpf_parse;
        struct bpf_prog *bpf_verdict;
        struct list_head maps;
+       spinlock_t maps_lock;
 
        /* Back reference used when sock callback trigger sockmap operations */
        struct sock *sock;
@@ -140,6 +142,7 @@ static int bpf_tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
 static int bpf_tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
 static int bpf_tcp_sendpage(struct sock *sk, struct page *page,
                            int offset, size_t size, int flags);
+static void bpf_tcp_close(struct sock *sk, long timeout);
 
 static inline struct smap_psock *smap_psock_sk(const struct sock *sk)
 {
@@ -161,7 +164,42 @@ out:
        return !empty;
 }
 
-static struct proto tcp_bpf_proto;
+enum {
+       SOCKMAP_IPV4,
+       SOCKMAP_IPV6,
+       SOCKMAP_NUM_PROTS,
+};
+
+enum {
+       SOCKMAP_BASE,
+       SOCKMAP_TX,
+       SOCKMAP_NUM_CONFIGS,
+};
+
+static struct proto *saved_tcpv6_prot __read_mostly;
+static DEFINE_SPINLOCK(tcpv6_prot_lock);
+static struct proto bpf_tcp_prots[SOCKMAP_NUM_PROTS][SOCKMAP_NUM_CONFIGS];
+static void build_protos(struct proto prot[SOCKMAP_NUM_CONFIGS],
+                        struct proto *base)
+{
+       prot[SOCKMAP_BASE]                      = *base;
+       prot[SOCKMAP_BASE].close                = bpf_tcp_close;
+       prot[SOCKMAP_BASE].recvmsg              = bpf_tcp_recvmsg;
+       prot[SOCKMAP_BASE].stream_memory_read   = bpf_tcp_stream_read;
+
+       prot[SOCKMAP_TX]                        = prot[SOCKMAP_BASE];
+       prot[SOCKMAP_TX].sendmsg                = bpf_tcp_sendmsg;
+       prot[SOCKMAP_TX].sendpage               = bpf_tcp_sendpage;
+}
+
+static void update_sk_prot(struct sock *sk, struct smap_psock *psock)
+{
+       int family = sk->sk_family == AF_INET6 ? SOCKMAP_IPV6 : SOCKMAP_IPV4;
+       int conf = psock->bpf_tx_msg ? SOCKMAP_TX : SOCKMAP_BASE;
+
+       sk->sk_prot = &bpf_tcp_prots[family][conf];
+}
+
 static int bpf_tcp_init(struct sock *sk)
 {
        struct smap_psock *psock;
@@ -181,14 +219,17 @@ static int bpf_tcp_init(struct sock *sk)
        psock->save_close = sk->sk_prot->close;
        psock->sk_proto = sk->sk_prot;
 
-       if (psock->bpf_tx_msg) {
-               tcp_bpf_proto.sendmsg = bpf_tcp_sendmsg;
-               tcp_bpf_proto.sendpage = bpf_tcp_sendpage;
-               tcp_bpf_proto.recvmsg = bpf_tcp_recvmsg;
-               tcp_bpf_proto.stream_memory_read = bpf_tcp_stream_read;
+       /* Build IPv6 sockmap whenever the address of tcpv6_prot changes */
+       if (sk->sk_family == AF_INET6 &&
+           unlikely(sk->sk_prot != smp_load_acquire(&saved_tcpv6_prot))) {
+               spin_lock_bh(&tcpv6_prot_lock);
+               if (likely(sk->sk_prot != saved_tcpv6_prot)) {
+                       build_protos(bpf_tcp_prots[SOCKMAP_IPV6], sk->sk_prot);
+                       smp_store_release(&saved_tcpv6_prot, sk->sk_prot);
+               }
+               spin_unlock_bh(&tcpv6_prot_lock);
        }
-
-       sk->sk_prot = &tcp_bpf_proto;
+       update_sk_prot(sk, psock);
        rcu_read_unlock();
        return 0;
 }
@@ -219,24 +260,64 @@ out:
        rcu_read_unlock();
 }
 
+static struct htab_elem *lookup_elem_raw(struct hlist_head *head,
+                                        u32 hash, void *key, u32 key_size)
+{
+       struct htab_elem *l;
+
+       hlist_for_each_entry_rcu(l, head, hash_node) {
+               if (l->hash == hash && !memcmp(&l->key, key, key_size))
+                       return l;
+       }
+
+       return NULL;
+}
+
+static inline struct bucket *__select_bucket(struct bpf_htab *htab, u32 hash)
+{
+       return &htab->buckets[hash & (htab->n_buckets - 1)];
+}
+
+static inline struct hlist_head *select_bucket(struct bpf_htab *htab, u32 hash)
+{
+       return &__select_bucket(htab, hash)->head;
+}
+
 static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l)
 {
        atomic_dec(&htab->count);
        kfree_rcu(l, rcu);
 }
 
+static struct smap_psock_map_entry *psock_map_pop(struct sock *sk,
+                                                 struct smap_psock *psock)
+{
+       struct smap_psock_map_entry *e;
+
+       spin_lock_bh(&psock->maps_lock);
+       e = list_first_entry_or_null(&psock->maps,
+                                    struct smap_psock_map_entry,
+                                    list);
+       if (e)
+               list_del(&e->list);
+       spin_unlock_bh(&psock->maps_lock);
+       return e;
+}
+
 static void bpf_tcp_close(struct sock *sk, long timeout)
 {
        void (*close_fun)(struct sock *sk, long timeout);
-       struct smap_psock_map_entry *e, *tmp;
+       struct smap_psock_map_entry *e;
        struct sk_msg_buff *md, *mtmp;
        struct smap_psock *psock;
        struct sock *osk;
 
+       lock_sock(sk);
        rcu_read_lock();
        psock = smap_psock_sk(sk);
        if (unlikely(!psock)) {
                rcu_read_unlock();
+               release_sock(sk);
                return sk->sk_prot->close(sk, timeout);
        }
 
@@ -247,7 +328,6 @@ static void bpf_tcp_close(struct sock *sk, long timeout)
         */
        close_fun = psock->save_close;
 
-       write_lock_bh(&sk->sk_callback_lock);
        if (psock->cork) {
                free_start_sg(psock->sock, psock->cork);
                kfree(psock->cork);
@@ -260,21 +340,40 @@ static void bpf_tcp_close(struct sock *sk, long timeout)
                kfree(md);
        }
 
-       list_for_each_entry_safe(e, tmp, &psock->maps, list) {
+       e = psock_map_pop(sk, psock);
+       while (e) {
                if (e->entry) {
                        osk = cmpxchg(e->entry, sk, NULL);
                        if (osk == sk) {
-                               list_del(&e->list);
                                smap_release_sock(psock, sk);
                        }
                } else {
-                       hlist_del_rcu(&e->hash_link->hash_node);
-                       smap_release_sock(psock, e->hash_link->sk);
-                       free_htab_elem(e->htab, e->hash_link);
+                       struct htab_elem *link = rcu_dereference(e->hash_link);
+                       struct bpf_htab *htab = rcu_dereference(e->htab);
+                       struct hlist_head *head;
+                       struct htab_elem *l;
+                       struct bucket *b;
+
+                       b = __select_bucket(htab, link->hash);
+                       head = &b->head;
+                       raw_spin_lock_bh(&b->lock);
+                       l = lookup_elem_raw(head,
+                                           link->hash, link->key,
+                                           htab->map.key_size);
+                       /* If another thread deleted this object skip deletion.
+                        * The refcnt on psock may or may not be zero.
+                        */
+                       if (l) {
+                               hlist_del_rcu(&link->hash_node);
+                               smap_release_sock(psock, link->sk);
+                               free_htab_elem(htab, link);
+                       }
+                       raw_spin_unlock_bh(&b->lock);
                }
+               e = psock_map_pop(sk, psock);
        }
-       write_unlock_bh(&sk->sk_callback_lock);
        rcu_read_unlock();
+       release_sock(sk);
        close_fun(sk, timeout);
 }
 
@@ -472,7 +571,8 @@ static int free_sg(struct sock *sk, int start, struct sk_msg_buff *md)
        while (sg[i].length) {
                free += sg[i].length;
                sk_mem_uncharge(sk, sg[i].length);
-               put_page(sg_page(&sg[i]));
+               if (!md->skb)
+                       put_page(sg_page(&sg[i]));
                sg[i].length = 0;
                sg[i].page_link = 0;
                sg[i].offset = 0;
@@ -481,6 +581,8 @@ static int free_sg(struct sock *sk, int start, struct sk_msg_buff *md)
                if (i == MAX_SKB_FRAGS)
                        i = 0;
        }
+       if (md->skb)
+               consume_skb(md->skb);
 
        return free;
 }
@@ -946,12 +1048,12 @@ static int bpf_tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
        timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
 
        while (msg_data_left(msg)) {
-               struct sk_msg_buff *m;
+               struct sk_msg_buff *m = NULL;
                bool enospc = false;
                int copy;
 
                if (sk->sk_err) {
-                       err = sk->sk_err;
+                       err = -sk->sk_err;
                        goto out_err;
                }
 
@@ -1014,8 +1116,11 @@ wait_for_sndbuf:
                set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
 wait_for_memory:
                err = sk_stream_wait_memory(sk, &timeo);
-               if (err)
+               if (err) {
+                       if (m && m != psock->cork)
+                               free_start_sg(sk, m);
                        goto out_err;
+               }
        }
 out_err:
        if (err < 0)
@@ -1111,8 +1216,7 @@ static void bpf_tcp_msg_add(struct smap_psock *psock,
 
 static int bpf_tcp_ulp_register(void)
 {
-       tcp_bpf_proto = tcp_prot;
-       tcp_bpf_proto.close = bpf_tcp_close;
+       build_protos(bpf_tcp_prots[SOCKMAP_IPV4], &tcp_prot);
        /* Once BPF TX ULP is registered it is never unregistered. It
         * will be in the ULP list for the lifetime of the system. Doing
         * duplicate registers is not a problem.
@@ -1135,7 +1239,7 @@ static int smap_verdict_func(struct smap_psock *psock, struct sk_buff *skb)
         */
        TCP_SKB_CB(skb)->bpf.sk_redir = NULL;
        skb->sk = psock->sock;
-       bpf_compute_data_pointers(skb);
+       bpf_compute_data_end_sk_skb(skb);
        preempt_disable();
        rc = (*prog->bpf_func)(skb, prog->insnsi);
        preempt_enable();
@@ -1357,7 +1461,9 @@ static void smap_release_sock(struct smap_psock *psock, struct sock *sock)
 {
        if (refcount_dec_and_test(&psock->refcnt)) {
                tcp_cleanup_ulp(sock);
+               write_lock_bh(&sock->sk_callback_lock);
                smap_stop_sock(psock, sock);
+               write_unlock_bh(&sock->sk_callback_lock);
                clear_bit(SMAP_TX_RUNNING, &psock->state);
                rcu_assign_sk_user_data(sock, NULL);
                call_rcu_sched(&psock->rcu, smap_destroy_psock);
@@ -1388,7 +1494,7 @@ static int smap_parse_func_strparser(struct strparser *strp,
         * any socket yet.
         */
        skb->sk = psock->sock;
-       bpf_compute_data_pointers(skb);
+       bpf_compute_data_end_sk_skb(skb);
        rc = (*prog->bpf_func)(skb, prog->insnsi);
        skb->sk = NULL;
        rcu_read_unlock();
@@ -1508,6 +1614,7 @@ static struct smap_psock *smap_init_psock(struct sock *sock, int node)
        INIT_LIST_HEAD(&psock->maps);
        INIT_LIST_HEAD(&psock->ingress);
        refcount_set(&psock->refcnt, 1);
+       spin_lock_init(&psock->maps_lock);
 
        rcu_assign_sk_user_data(sock, psock);
        sock_hold(sock);
@@ -1564,18 +1671,32 @@ free_stab:
        return ERR_PTR(err);
 }
 
-static void smap_list_remove(struct smap_psock *psock,
-                            struct sock **entry,
-                            struct htab_elem *hash_link)
+static void smap_list_map_remove(struct smap_psock *psock,
+                                struct sock **entry)
 {
        struct smap_psock_map_entry *e, *tmp;
 
+       spin_lock_bh(&psock->maps_lock);
        list_for_each_entry_safe(e, tmp, &psock->maps, list) {
-               if (e->entry == entry || e->hash_link == hash_link) {
+               if (e->entry == entry)
                        list_del(&e->list);
-                       break;
-               }
        }
+       spin_unlock_bh(&psock->maps_lock);
+}
+
+static void smap_list_hash_remove(struct smap_psock *psock,
+                                 struct htab_elem *hash_link)
+{
+       struct smap_psock_map_entry *e, *tmp;
+
+       spin_lock_bh(&psock->maps_lock);
+       list_for_each_entry_safe(e, tmp, &psock->maps, list) {
+               struct htab_elem *c = rcu_dereference(e->hash_link);
+
+               if (c == hash_link)
+                       list_del(&e->list);
+       }
+       spin_unlock_bh(&psock->maps_lock);
 }
 
 static void sock_map_free(struct bpf_map *map)
@@ -1601,7 +1722,6 @@ static void sock_map_free(struct bpf_map *map)
                if (!sock)
                        continue;
 
-               write_lock_bh(&sock->sk_callback_lock);
                psock = smap_psock_sk(sock);
                /* This check handles a racing sock event that can get the
                 * sk_callback_lock before this case but after xchg happens
@@ -1609,10 +1729,9 @@ static void sock_map_free(struct bpf_map *map)
                 * to be null and queued for garbage collection.
                 */
                if (likely(psock)) {
-                       smap_list_remove(psock, &stab->sock_map[i], NULL);
+                       smap_list_map_remove(psock, &stab->sock_map[i]);
                        smap_release_sock(psock, sock);
                }
-               write_unlock_bh(&sock->sk_callback_lock);
        }
        rcu_read_unlock();
 
@@ -1661,17 +1780,15 @@ static int sock_map_delete_elem(struct bpf_map *map, void *key)
        if (!sock)
                return -EINVAL;
 
-       write_lock_bh(&sock->sk_callback_lock);
        psock = smap_psock_sk(sock);
        if (!psock)
                goto out;
 
        if (psock->bpf_parse)
                smap_stop_sock(psock, sock);
-       smap_list_remove(psock, &stab->sock_map[k], NULL);
+       smap_list_map_remove(psock, &stab->sock_map[k]);
        smap_release_sock(psock, sock);
 out:
-       write_unlock_bh(&sock->sk_callback_lock);
        return 0;
 }
 
@@ -1752,7 +1869,6 @@ static int __sock_map_ctx_update_elem(struct bpf_map *map,
                }
        }
 
-       write_lock_bh(&sock->sk_callback_lock);
        psock = smap_psock_sk(sock);
 
        /* 2. Do not allow inheriting programs if psock exists and has
@@ -1789,7 +1905,7 @@ static int __sock_map_ctx_update_elem(struct bpf_map *map,
                e = kzalloc(sizeof(*e), GFP_ATOMIC | __GFP_NOWARN);
                if (!e) {
                        err = -ENOMEM;
-                       goto out_progs;
+                       goto out_free;
                }
        }
 
@@ -1809,7 +1925,9 @@ static int __sock_map_ctx_update_elem(struct bpf_map *map,
                if (err)
                        goto out_free;
                smap_init_progs(psock, verdict, parse);
+               write_lock_bh(&sock->sk_callback_lock);
                smap_start_sock(psock, sock);
+               write_unlock_bh(&sock->sk_callback_lock);
        }
 
        /* 4. Place psock in sockmap for use and stop any programs on
@@ -1819,9 +1937,10 @@ static int __sock_map_ctx_update_elem(struct bpf_map *map,
         */
        if (map_link) {
                e->entry = map_link;
+               spin_lock_bh(&psock->maps_lock);
                list_add_tail(&e->list, &psock->maps);
+               spin_unlock_bh(&psock->maps_lock);
        }
-       write_unlock_bh(&sock->sk_callback_lock);
        return err;
 out_free:
        smap_release_sock(psock, sock);
@@ -1832,7 +1951,6 @@ out_progs:
        }
        if (tx_msg)
                bpf_prog_put(tx_msg);
-       write_unlock_bh(&sock->sk_callback_lock);
        kfree(e);
        return err;
 }
@@ -1869,10 +1987,8 @@ static int sock_map_ctx_update_elem(struct bpf_sock_ops_kern *skops,
        if (osock) {
                struct smap_psock *opsock = smap_psock_sk(osock);
 
-               write_lock_bh(&osock->sk_callback_lock);
-               smap_list_remove(opsock, &stab->sock_map[i], NULL);
+               smap_list_map_remove(opsock, &stab->sock_map[i]);
                smap_release_sock(opsock, osock);
-               write_unlock_bh(&osock->sk_callback_lock);
        }
 out:
        return err;
@@ -1915,6 +2031,24 @@ int sock_map_prog(struct bpf_map *map, struct bpf_prog *prog, u32 type)
        return 0;
 }
 
+int sockmap_get_from_fd(const union bpf_attr *attr, int type,
+                       struct bpf_prog *prog)
+{
+       int ufd = attr->target_fd;
+       struct bpf_map *map;
+       struct fd f;
+       int err;
+
+       f = fdget(ufd);
+       map = __bpf_map_get(f);
+       if (IS_ERR(map))
+               return PTR_ERR(map);
+
+       err = sock_map_prog(map, prog, attr->attach_type);
+       fdput(f);
+       return err;
+}
+
 static void *sock_map_lookup(struct bpf_map *map, void *key)
 {
        return NULL;
@@ -1944,7 +2078,13 @@ static int sock_map_update_elem(struct bpf_map *map,
                return -EOPNOTSUPP;
        }
 
+       lock_sock(skops.sk);
+       preempt_disable();
+       rcu_read_lock();
        err = sock_map_ctx_update_elem(&skops, map, key, flags);
+       rcu_read_unlock();
+       preempt_enable();
+       release_sock(skops.sk);
        fput(socket->file);
        return err;
 }
@@ -2043,14 +2183,13 @@ free_htab:
        return ERR_PTR(err);
 }
 
-static inline struct bucket *__select_bucket(struct bpf_htab *htab, u32 hash)
+static void __bpf_htab_free(struct rcu_head *rcu)
 {
-       return &htab->buckets[hash & (htab->n_buckets - 1)];
-}
+       struct bpf_htab *htab;
 
-static inline struct hlist_head *select_bucket(struct bpf_htab *htab, u32 hash)
-{
-       return &__select_bucket(htab, hash)->head;
+       htab = container_of(rcu, struct bpf_htab, rcu);
+       bpf_map_area_free(htab->buckets);
+       kfree(htab);
 }
 
 static void sock_hash_free(struct bpf_map *map)
@@ -2069,16 +2208,18 @@ static void sock_hash_free(struct bpf_map *map)
         */
        rcu_read_lock();
        for (i = 0; i < htab->n_buckets; i++) {
-               struct hlist_head *head = select_bucket(htab, i);
+               struct bucket *b = __select_bucket(htab, i);
+               struct hlist_head *head;
                struct hlist_node *n;
                struct htab_elem *l;
 
+               raw_spin_lock_bh(&b->lock);
+               head = &b->head;
                hlist_for_each_entry_safe(l, n, head, hash_node) {
                        struct sock *sock = l->sk;
                        struct smap_psock *psock;
 
                        hlist_del_rcu(&l->hash_node);
-                       write_lock_bh(&sock->sk_callback_lock);
                        psock = smap_psock_sk(sock);
                        /* This check handles a racing sock event that can get
                         * the sk_callback_lock before this case but after xchg
@@ -2086,16 +2227,15 @@ static void sock_hash_free(struct bpf_map *map)
                         * (psock) to be null and queued for garbage collection.
                         */
                        if (likely(psock)) {
-                               smap_list_remove(psock, NULL, l);
+                               smap_list_hash_remove(psock, l);
                                smap_release_sock(psock, sock);
                        }
-                       write_unlock_bh(&sock->sk_callback_lock);
-                       kfree(l);
+                       free_htab_elem(htab, l);
                }
+               raw_spin_unlock_bh(&b->lock);
        }
        rcu_read_unlock();
-       bpf_map_area_free(htab->buckets);
-       kfree(htab);
+       call_rcu(&htab->rcu, __bpf_htab_free);
 }
 
 static struct htab_elem *alloc_sock_hash_elem(struct bpf_htab *htab,
@@ -2122,19 +2262,6 @@ static struct htab_elem *alloc_sock_hash_elem(struct bpf_htab *htab,
        return l_new;
 }
 
-static struct htab_elem *lookup_elem_raw(struct hlist_head *head,
-                                        u32 hash, void *key, u32 key_size)
-{
-       struct htab_elem *l;
-
-       hlist_for_each_entry_rcu(l, head, hash_node) {
-               if (l->hash == hash && !memcmp(&l->key, key, key_size))
-                       return l;
-       }
-
-       return NULL;
-}
-
 static inline u32 htab_map_hash(const void *key, u32 key_len)
 {
        return jhash(key, key_len, 0);
@@ -2230,7 +2357,10 @@ static int sock_hash_ctx_update_elem(struct bpf_sock_ops_kern *skops,
        if (err)
                goto err;
 
-       /* bpf_map_update_elem() can be called in_irq() */
+       /* psock is valid here because otherwise above *ctx_update_elem would
+        * have thrown an error. It is safe to skip error check.
+        */
+       psock = smap_psock_sk(sock);
        raw_spin_lock_bh(&b->lock);
        l_old = lookup_elem_raw(head, hash, key, key_size);
        if (l_old && map_flags == BPF_NOEXIST) {
@@ -2248,15 +2378,12 @@ static int sock_hash_ctx_update_elem(struct bpf_sock_ops_kern *skops,
                goto bucket_err;
        }
 
-       psock = smap_psock_sk(sock);
-       if (unlikely(!psock)) {
-               err = -EINVAL;
-               goto bucket_err;
-       }
-
-       e->hash_link = l_new;
-       e->htab = container_of(map, struct bpf_htab, map);
+       rcu_assign_pointer(e->hash_link, l_new);
+       rcu_assign_pointer(e->htab,
+                          container_of(map, struct bpf_htab, map));
+       spin_lock_bh(&psock->maps_lock);
        list_add_tail(&e->list, &psock->maps);
+       spin_unlock_bh(&psock->maps_lock);
 
        /* add new element to the head of the list, so that
         * concurrent search will find it before old elem
@@ -2266,19 +2393,17 @@ static int sock_hash_ctx_update_elem(struct bpf_sock_ops_kern *skops,
                psock = smap_psock_sk(l_old->sk);
 
                hlist_del_rcu(&l_old->hash_node);
-               smap_list_remove(psock, NULL, l_old);
+               smap_list_hash_remove(psock, l_old);
                smap_release_sock(psock, l_old->sk);
                free_htab_elem(htab, l_old);
        }
        raw_spin_unlock_bh(&b->lock);
        return 0;
 bucket_err:
+       smap_release_sock(psock, sock);
        raw_spin_unlock_bh(&b->lock);
 err:
        kfree(e);
-       psock = smap_psock_sk(sock);
-       if (psock)
-               smap_release_sock(psock, sock);
        return err;
 }
 
@@ -2300,7 +2425,13 @@ static int sock_hash_update_elem(struct bpf_map *map,
                return -EINVAL;
        }
 
+       lock_sock(skops.sk);
+       preempt_disable();
+       rcu_read_lock();
        err = sock_hash_ctx_update_elem(&skops, map, key, flags);
+       rcu_read_unlock();
+       preempt_enable();
+       release_sock(skops.sk);
        fput(socket->file);
        return err;
 }
@@ -2326,7 +2457,6 @@ static int sock_hash_delete_elem(struct bpf_map *map, void *key)
                struct smap_psock *psock;
 
                hlist_del_rcu(&l->hash_node);
-               write_lock_bh(&sock->sk_callback_lock);
                psock = smap_psock_sk(sock);
                /* This check handles a racing sock event that can get the
                 * sk_callback_lock before this case but after xchg happens
@@ -2334,10 +2464,9 @@ static int sock_hash_delete_elem(struct bpf_map *map, void *key)
                 * to be null and queued for garbage collection.
                 */
                if (likely(psock)) {
-                       smap_list_remove(psock, NULL, l);
+                       smap_list_hash_remove(psock, l);
                        smap_release_sock(psock, sock);
                }
-               write_unlock_bh(&sock->sk_callback_lock);
                free_htab_elem(htab, l);
                ret = 0;
        }
@@ -2359,10 +2488,8 @@ struct sock  *__sock_hash_lookup_elem(struct bpf_map *map, void *key)
        b = __select_bucket(htab, hash);
        head = &b->head;
 
-       raw_spin_lock_bh(&b->lock);
        l = lookup_elem_raw(head, hash, key, key_size);
        sk = l ? l->sk : NULL;
-       raw_spin_unlock_bh(&b->lock);
        return sk;
 }
 
@@ -2383,6 +2510,7 @@ const struct bpf_map_ops sock_hash_ops = {
        .map_get_next_key = sock_hash_get_next_key,
        .map_update_elem = sock_hash_update_elem,
        .map_delete_elem = sock_hash_delete_elem,
+       .map_release_uref = sock_map_release,
 };
 
 BPF_CALL_4(bpf_sock_map_update, struct bpf_sock_ops_kern *, bpf_sock,
index 0fa20624707f23b15d3200c1302765a119ee9fc1..b41c6cf2eb883ee208c87a2edce9853c00aa5b84 100644 (file)
@@ -575,7 +575,7 @@ static struct bpf_map *bpf_map_inc_not_zero(struct bpf_map *map,
 {
        int refold;
 
-       refold = __atomic_add_unless(&map->refcnt, 1, 0);
+       refold = atomic_fetch_add_unless(&map->refcnt, 1, 0);
 
        if (refold >= BPF_MAX_REFCNT) {
                __bpf_map_put(map, false);
@@ -735,7 +735,9 @@ static int map_update_elem(union bpf_attr *attr)
        if (bpf_map_is_dev_bound(map)) {
                err = bpf_map_offload_update_elem(map, key, value, attr->flags);
                goto out;
-       } else if (map->map_type == BPF_MAP_TYPE_CPUMAP) {
+       } else if (map->map_type == BPF_MAP_TYPE_CPUMAP ||
+                  map->map_type == BPF_MAP_TYPE_SOCKHASH ||
+                  map->map_type == BPF_MAP_TYPE_SOCKMAP) {
                err = map->ops->map_update_elem(map, key, value, attr->flags);
                goto out;
        }
@@ -1034,14 +1036,9 @@ static void __bpf_prog_put_rcu(struct rcu_head *rcu)
 static void __bpf_prog_put(struct bpf_prog *prog, bool do_idr_lock)
 {
        if (atomic_dec_and_test(&prog->aux->refcnt)) {
-               int i;
-
                /* bpf_prog_free_id() must be called first */
                bpf_prog_free_id(prog, do_idr_lock);
-
-               for (i = 0; i < prog->aux->func_cnt; i++)
-                       bpf_prog_kallsyms_del(prog->aux->func[i]);
-               bpf_prog_kallsyms_del(prog);
+               bpf_prog_kallsyms_del_all(prog);
 
                call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu);
        }
@@ -1147,7 +1144,7 @@ struct bpf_prog *bpf_prog_inc_not_zero(struct bpf_prog *prog)
 {
        int refold;
 
-       refold = __atomic_add_unless(&prog->aux->refcnt, 1, 0);
+       refold = atomic_fetch_add_unless(&prog->aux->refcnt, 1, 0);
 
        if (refold >= BPF_MAX_REFCNT) {
                __bpf_prog_put(prog, false);
@@ -1358,9 +1355,7 @@ static int bpf_prog_load(union bpf_attr *attr)
        if (err < 0)
                goto free_used_maps;
 
-       /* eBPF program is ready to be JITed */
-       if (!prog->bpf_func)
-               prog = bpf_prog_select_runtime(prog, &err);
+       prog = bpf_prog_select_runtime(prog, &err);
        if (err < 0)
                goto free_used_maps;
 
@@ -1384,6 +1379,7 @@ static int bpf_prog_load(union bpf_attr *attr)
        return err;
 
 free_used_maps:
+       bpf_prog_kallsyms_del_subprogs(prog);
        free_used_maps(prog->aux);
 free_prog:
        bpf_prog_uncharge_memlock(prog);
@@ -1489,8 +1485,6 @@ out_free_tp:
        return err;
 }
 
-#ifdef CONFIG_CGROUP_BPF
-
 static int bpf_prog_attach_check_attach_type(const struct bpf_prog *prog,
                                             enum bpf_attach_type attach_type)
 {
@@ -1505,40 +1499,6 @@ static int bpf_prog_attach_check_attach_type(const struct bpf_prog *prog,
 
 #define BPF_PROG_ATTACH_LAST_FIELD attach_flags
 
-static int sockmap_get_from_fd(const union bpf_attr *attr,
-                              int type, bool attach)
-{
-       struct bpf_prog *prog = NULL;
-       int ufd = attr->target_fd;
-       struct bpf_map *map;
-       struct fd f;
-       int err;
-
-       f = fdget(ufd);
-       map = __bpf_map_get(f);
-       if (IS_ERR(map))
-               return PTR_ERR(map);
-
-       if (attach) {
-               prog = bpf_prog_get_type(attr->attach_bpf_fd, type);
-               if (IS_ERR(prog)) {
-                       fdput(f);
-                       return PTR_ERR(prog);
-               }
-       }
-
-       err = sock_map_prog(map, prog, attr->attach_type);
-       if (err) {
-               fdput(f);
-               if (prog)
-                       bpf_prog_put(prog);
-               return err;
-       }
-
-       fdput(f);
-       return 0;
-}
-
 #define BPF_F_ATTACH_MASK \
        (BPF_F_ALLOW_OVERRIDE | BPF_F_ALLOW_MULTI)
 
@@ -1546,7 +1506,6 @@ static int bpf_prog_attach(const union bpf_attr *attr)
 {
        enum bpf_prog_type ptype;
        struct bpf_prog *prog;
-       struct cgroup *cgrp;
        int ret;
 
        if (!capable(CAP_NET_ADMIN))
@@ -1583,12 +1542,15 @@ static int bpf_prog_attach(const union bpf_attr *attr)
                ptype = BPF_PROG_TYPE_CGROUP_DEVICE;
                break;
        case BPF_SK_MSG_VERDICT:
-               return sockmap_get_from_fd(attr, BPF_PROG_TYPE_SK_MSG, true);
+               ptype = BPF_PROG_TYPE_SK_MSG;
+               break;
        case BPF_SK_SKB_STREAM_PARSER:
        case BPF_SK_SKB_STREAM_VERDICT:
-               return sockmap_get_from_fd(attr, BPF_PROG_TYPE_SK_SKB, true);
+               ptype = BPF_PROG_TYPE_SK_SKB;
+               break;
        case BPF_LIRC_MODE2:
-               return lirc_prog_attach(attr);
+               ptype = BPF_PROG_TYPE_LIRC_MODE2;
+               break;
        default:
                return -EINVAL;
        }
@@ -1602,18 +1564,20 @@ static int bpf_prog_attach(const union bpf_attr *attr)
                return -EINVAL;
        }
 
-       cgrp = cgroup_get_from_fd(attr->target_fd);
-       if (IS_ERR(cgrp)) {
-               bpf_prog_put(prog);
-               return PTR_ERR(cgrp);
+       switch (ptype) {
+       case BPF_PROG_TYPE_SK_SKB:
+       case BPF_PROG_TYPE_SK_MSG:
+               ret = sockmap_get_from_fd(attr, ptype, prog);
+               break;
+       case BPF_PROG_TYPE_LIRC_MODE2:
+               ret = lirc_prog_attach(attr, prog);
+               break;
+       default:
+               ret = cgroup_bpf_prog_attach(attr, ptype, prog);
        }
 
-       ret = cgroup_bpf_attach(cgrp, prog, attr->attach_type,
-                               attr->attach_flags);
        if (ret)
                bpf_prog_put(prog);
-       cgroup_put(cgrp);
-
        return ret;
 }
 
@@ -1622,9 +1586,6 @@ static int bpf_prog_attach(const union bpf_attr *attr)
 static int bpf_prog_detach(const union bpf_attr *attr)
 {
        enum bpf_prog_type ptype;
-       struct bpf_prog *prog;
-       struct cgroup *cgrp;
-       int ret;
 
        if (!capable(CAP_NET_ADMIN))
                return -EPERM;
@@ -1657,29 +1618,17 @@ static int bpf_prog_detach(const union bpf_attr *attr)
                ptype = BPF_PROG_TYPE_CGROUP_DEVICE;
                break;
        case BPF_SK_MSG_VERDICT:
-               return sockmap_get_from_fd(attr, BPF_PROG_TYPE_SK_MSG, false);
+               return sockmap_get_from_fd(attr, BPF_PROG_TYPE_SK_MSG, NULL);
        case BPF_SK_SKB_STREAM_PARSER:
        case BPF_SK_SKB_STREAM_VERDICT:
-               return sockmap_get_from_fd(attr, BPF_PROG_TYPE_SK_SKB, false);
+               return sockmap_get_from_fd(attr, BPF_PROG_TYPE_SK_SKB, NULL);
        case BPF_LIRC_MODE2:
                return lirc_prog_detach(attr);
        default:
                return -EINVAL;
        }
 
-       cgrp = cgroup_get_from_fd(attr->target_fd);
-       if (IS_ERR(cgrp))
-               return PTR_ERR(cgrp);
-
-       prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
-       if (IS_ERR(prog))
-               prog = NULL;
-
-       ret = cgroup_bpf_detach(cgrp, prog, attr->attach_type, 0);
-       if (prog)
-               bpf_prog_put(prog);
-       cgroup_put(cgrp);
-       return ret;
+       return cgroup_bpf_prog_detach(attr, ptype);
 }
 
 #define BPF_PROG_QUERY_LAST_FIELD query.prog_cnt
@@ -1687,9 +1636,6 @@ static int bpf_prog_detach(const union bpf_attr *attr)
 static int bpf_prog_query(const union bpf_attr *attr,
                          union bpf_attr __user *uattr)
 {
-       struct cgroup *cgrp;
-       int ret;
-
        if (!capable(CAP_NET_ADMIN))
                return -EPERM;
        if (CHECK_ATTR(BPF_PROG_QUERY))
@@ -1717,14 +1663,9 @@ static int bpf_prog_query(const union bpf_attr *attr,
        default:
                return -EINVAL;
        }
-       cgrp = cgroup_get_from_fd(attr->query.target_fd);
-       if (IS_ERR(cgrp))
-               return PTR_ERR(cgrp);
-       ret = cgroup_bpf_query(cgrp, attr, uattr);
-       cgroup_put(cgrp);
-       return ret;
+
+       return cgroup_bpf_prog_query(attr, uattr);
 }
-#endif /* CONFIG_CGROUP_BPF */
 
 #define BPF_PROG_TEST_RUN_LAST_FIELD test.duration
 
@@ -2371,7 +2312,6 @@ SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, siz
        case BPF_OBJ_GET:
                err = bpf_obj_get(&attr);
                break;
-#ifdef CONFIG_CGROUP_BPF
        case BPF_PROG_ATTACH:
                err = bpf_prog_attach(&attr);
                break;
@@ -2381,7 +2321,6 @@ SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, siz
        case BPF_PROG_QUERY:
                err = bpf_prog_query(&attr, uattr);
                break;
-#endif
        case BPF_PROG_TEST_RUN:
                err = bpf_prog_test_run(&attr, uattr);
                break;
index 9e2bf834f13a21090b566862a853e7567ee03ac9..63aaac52a26553fb29529790cf0350f6dafa504b 100644 (file)
@@ -5430,6 +5430,10 @@ static int jit_subprogs(struct bpf_verifier_env *env)
                if (insn->code != (BPF_JMP | BPF_CALL) ||
                    insn->src_reg != BPF_PSEUDO_CALL)
                        continue;
+               /* Upon error here we cannot fall back to interpreter but
+                * need a hard reject of the program. Thus -EFAULT is
+                * propagated in any case.
+                */
                subprog = find_subprog(env, i + insn->imm + 1);
                if (subprog < 0) {
                        WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
@@ -5450,7 +5454,7 @@ static int jit_subprogs(struct bpf_verifier_env *env)
 
        func = kcalloc(env->subprog_cnt, sizeof(prog), GFP_KERNEL);
        if (!func)
-               return -ENOMEM;
+               goto out_undo_insn;
 
        for (i = 0; i < env->subprog_cnt; i++) {
                subprog_start = subprog_end;
@@ -5515,7 +5519,7 @@ static int jit_subprogs(struct bpf_verifier_env *env)
                tmp = bpf_int_jit_compile(func[i]);
                if (tmp != func[i] || func[i]->bpf_func != old_bpf_func) {
                        verbose(env, "JIT doesn't support bpf-to-bpf calls\n");
-                       err = -EFAULT;
+                       err = -ENOTSUPP;
                        goto out_free;
                }
                cond_resched();
@@ -5552,6 +5556,7 @@ out_free:
                if (func[i])
                        bpf_jit_free(func[i]);
        kfree(func);
+out_undo_insn:
        /* cleanup main prog to be interpreted */
        prog->jit_requested = 0;
        for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
@@ -5578,6 +5583,8 @@ static int fixup_call_args(struct bpf_verifier_env *env)
                err = jit_subprogs(env);
                if (err == 0)
                        return 0;
+               if (err == -EFAULT)
+                       return err;
        }
 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
        for (i = 0; i < prog->len; i++, insn++) {
index 702aa846ddacabde1fdf4c0e29c491f5c9c10d00..8e40efc2928a113231fe09c9baaa99c2d77db728 100644 (file)
@@ -324,35 +324,6 @@ COMPAT_SYSCALL_DEFINE3(sched_getaffinity, compat_pid_t,  pid, unsigned int, len,
        return ret;
 }
 
-/* Todo: Delete these extern declarations when get/put_compat_itimerspec64()
- * are moved to kernel/time/time.c .
- */
-extern int __compat_get_timespec64(struct timespec64 *ts64,
-                                  const struct compat_timespec __user *cts);
-extern int __compat_put_timespec64(const struct timespec64 *ts64,
-                                  struct compat_timespec __user *cts);
-
-int get_compat_itimerspec64(struct itimerspec64 *its,
-                       const struct compat_itimerspec __user *uits)
-{
-
-       if (__compat_get_timespec64(&its->it_interval, &uits->it_interval) ||
-           __compat_get_timespec64(&its->it_value, &uits->it_value))
-               return -EFAULT;
-       return 0;
-}
-EXPORT_SYMBOL_GPL(get_compat_itimerspec64);
-
-int put_compat_itimerspec64(const struct itimerspec64 *its,
-                       struct compat_itimerspec __user *uits)
-{
-       if (__compat_put_timespec64(&its->it_interval, &uits->it_interval) ||
-           __compat_put_timespec64(&its->it_value, &uits->it_value))
-               return -EFAULT;
-       return 0;
-}
-EXPORT_SYMBOL_GPL(put_compat_itimerspec64);
-
 /*
  * We currently only need the following fields from the sigevent
  * structure: sigev_value, sigev_signo, sig_notify and (sometimes
index 0db8938fbb236e58284927040e19998718ae74e7..dd8634dde1ae683d117c7f35f88ccdcdf34df79c 100644 (file)
@@ -1274,7 +1274,7 @@ static struct cpuhp_step cpuhp_hp_states[] = {
         * otherwise a RCU stall occurs.
         */
        [CPUHP_TIMERS_PREPARE] = {
-               .name                   = "timers:dead",
+               .name                   = "timers:prepare",
                .startup.single         = timers_prepare_cpu,
                .teardown.single        = timers_dead_cpu,
        },
@@ -1344,6 +1344,11 @@ static struct cpuhp_step cpuhp_hp_states[] = {
                .startup.single         = perf_event_init_cpu,
                .teardown.single        = perf_event_exit_cpu,
        },
+       [CPUHP_AP_WATCHDOG_ONLINE] = {
+               .name                   = "lockup_detector:online",
+               .startup.single         = lockup_detector_online_cpu,
+               .teardown.single        = lockup_detector_offline_cpu,
+       },
        [CPUHP_AP_WORKQUEUE_ONLINE] = {
                .name                   = "workqueue:online",
                .startup.single         = workqueue_online_cpu,
@@ -2010,7 +2015,7 @@ void __init boot_cpu_init(void)
 /*
  * Must be called _AFTER_ setting up the per_cpu areas
  */
-void __init boot_cpu_state_init(void)
+void __init boot_cpu_hotplug_init(void)
 {
        per_cpu_ptr(&cpuhp_state, smp_processor_id())->state = CPUHP_ONLINE;
 }
diff --git a/kernel/dma/Kconfig b/kernel/dma/Kconfig
new file mode 100644 (file)
index 0000000..9bd5430
--- /dev/null
@@ -0,0 +1,50 @@
+
+config HAS_DMA
+       bool
+       depends on !NO_DMA
+       default y
+
+config NEED_SG_DMA_LENGTH
+       bool
+
+config NEED_DMA_MAP_STATE
+       bool
+
+config ARCH_DMA_ADDR_T_64BIT
+       def_bool 64BIT || PHYS_ADDR_T_64BIT
+
+config HAVE_GENERIC_DMA_COHERENT
+       bool
+
+config ARCH_HAS_SYNC_DMA_FOR_DEVICE
+       bool
+
+config ARCH_HAS_SYNC_DMA_FOR_CPU
+       bool
+       select NEED_DMA_MAP_STATE
+
+config DMA_DIRECT_OPS
+       bool
+       depends on HAS_DMA
+
+config DMA_NONCOHERENT_OPS
+       bool
+       depends on HAS_DMA
+       select DMA_DIRECT_OPS
+
+config DMA_NONCOHERENT_MMAP
+       bool
+       depends on DMA_NONCOHERENT_OPS
+
+config DMA_NONCOHERENT_CACHE_SYNC
+       bool
+       depends on DMA_NONCOHERENT_OPS
+
+config DMA_VIRT_OPS
+       bool
+       depends on HAS_DMA
+
+config SWIOTLB
+       bool
+       select DMA_DIRECT_OPS
+       select NEED_DMA_MAP_STATE
diff --git a/kernel/dma/Makefile b/kernel/dma/Makefile
new file mode 100644 (file)
index 0000000..6de44e4
--- /dev/null
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_HAS_DMA)                  += mapping.o
+obj-$(CONFIG_DMA_CMA)                  += contiguous.o
+obj-$(CONFIG_HAVE_GENERIC_DMA_COHERENT) += coherent.o
+obj-$(CONFIG_DMA_DIRECT_OPS)           += direct.o
+obj-$(CONFIG_DMA_NONCOHERENT_OPS)      += noncoherent.o
+obj-$(CONFIG_DMA_VIRT_OPS)             += virt.o
+obj-$(CONFIG_DMA_API_DEBUG)            += debug.o
+obj-$(CONFIG_SWIOTLB)                  += swiotlb.o
+
similarity index 100%
rename from lib/dma-debug.c
rename to kernel/dma/debug.c
similarity index 100%
rename from lib/dma-direct.c
rename to kernel/dma/direct.c
similarity index 99%
rename from drivers/base/dma-mapping.c
rename to kernel/dma/mapping.c
index f831a582209c63b7412a6a4276ed7f1205371610..d2a92ddaac4d14c8683433856672fddbace7a4c9 100644 (file)
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0
 /*
- * drivers/base/dma-mapping.c - arch-independent dma-mapping routines
+ * arch-independent dma-mapping routines
  *
  * Copyright (c) 2006  SUSE Linux Products GmbH
  * Copyright (c) 2006  Tejun Heo <teheo@suse.de>
similarity index 92%
rename from lib/dma-noncoherent.c
rename to kernel/dma/noncoherent.c
index 79e9a757387f6fa255c738d58257f4d38649fa63..031fe235d958a1d8f1c0a212cfb1ba6e760dc4ba 100644 (file)
@@ -49,11 +49,13 @@ static int dma_noncoherent_map_sg(struct device *dev, struct scatterlist *sgl,
        return nents;
 }
 
-#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU
+#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
+    defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL)
 static void dma_noncoherent_sync_single_for_cpu(struct device *dev,
                dma_addr_t addr, size_t size, enum dma_data_direction dir)
 {
        arch_sync_dma_for_cpu(dev, dma_to_phys(dev, addr), size, dir);
+       arch_sync_dma_for_cpu_all(dev);
 }
 
 static void dma_noncoherent_sync_sg_for_cpu(struct device *dev,
@@ -64,6 +66,7 @@ static void dma_noncoherent_sync_sg_for_cpu(struct device *dev,
 
        for_each_sg(sgl, sg, nents, i)
                arch_sync_dma_for_cpu(dev, sg_phys(sg), sg->length, dir);
+       arch_sync_dma_for_cpu_all(dev);
 }
 
 static void dma_noncoherent_unmap_page(struct device *dev, dma_addr_t addr,
@@ -89,7 +92,8 @@ const struct dma_map_ops dma_noncoherent_ops = {
        .sync_sg_for_device     = dma_noncoherent_sync_sg_for_device,
        .map_page               = dma_noncoherent_map_page,
        .map_sg                 = dma_noncoherent_map_sg,
-#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU
+#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
+    defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL)
        .sync_single_for_cpu    = dma_noncoherent_sync_single_for_cpu,
        .sync_sg_for_cpu        = dma_noncoherent_sync_sg_for_cpu,
        .unmap_page             = dma_noncoherent_unmap_page,
similarity index 99%
rename from lib/swiotlb.c
rename to kernel/dma/swiotlb.c
index 04b68d9dfface72cd56d1f48ec0a58c68d2c2032..904541055792bf227faaf49734cd737142fd4111 100644 (file)
@@ -1085,3 +1085,4 @@ const struct dma_map_ops swiotlb_dma_ops = {
        .unmap_page             = swiotlb_unmap_page,
        .dma_supported          = dma_direct_supported,
 };
+EXPORT_SYMBOL(swiotlb_dma_ops);
similarity index 98%
rename from lib/dma-virt.c
rename to kernel/dma/virt.c
index 8e61a02ef9ca06cb2aabfaef7484a44f31610ef1..631ddec4b60a8b94576b1c3a36db71a818bff5d8 100644 (file)
@@ -1,7 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0
 /*
- *     lib/dma-virt.c
- *
  * DMA operations that map to virtual addresses without flushing memory.
  */
 #include <linux/export.h>
index 80cca2b30c4fe02c1baca59b08c0cae158a7368e..f6ea33a9f9049aa811fd3289d91b9b750557a52d 100644 (file)
@@ -1656,7 +1656,7 @@ perf_event_groups_next(struct perf_event *event)
                                typeof(*event), group_node))
 
 /*
- * Add a event from the lists for its context.
+ * Add an event from the lists for its context.
  * Must be called with ctx->mutex and ctx->lock held.
  */
 static void
@@ -1844,7 +1844,7 @@ static void perf_group_attach(struct perf_event *event)
 }
 
 /*
- * Remove a event from the lists for its context.
+ * Remove an event from the lists for its context.
  * Must be called with ctx->mutex and ctx->lock held.
  */
 static void
@@ -2148,7 +2148,7 @@ static void __perf_event_disable(struct perf_event *event,
 }
 
 /*
- * Disable a event.
+ * Disable an event.
  *
  * If event->ctx is a cloned context, callers must make sure that
  * every task struct that event->ctx->task could possibly point to
@@ -2677,7 +2677,7 @@ static void __perf_event_enable(struct perf_event *event,
 }
 
 /*
- * Enable a event.
+ * Enable an event.
  *
  * If event->ctx is a cloned context, callers must make sure that
  * every task struct that event->ctx->task could possibly point to
@@ -2755,7 +2755,7 @@ static int __perf_event_stop(void *info)
         * events will refuse to restart because of rb::aux_mmap_count==0,
         * see comments in perf_aux_output_begin().
         *
-        * Since this is happening on a event-local CPU, no trace is lost
+        * Since this is happening on an event-local CPU, no trace is lost
         * while restarting.
         */
        if (sd->restart)
@@ -4827,7 +4827,7 @@ __perf_read(struct perf_event *event, char __user *buf, size_t count)
        int ret;
 
        /*
-        * Return end-of-file for a read on a event that is in
+        * Return end-of-file for a read on an event that is in
         * error state (i.e. because it was pinned but it couldn't be
         * scheduled on to the CPU at some point).
         */
@@ -5273,11 +5273,11 @@ unlock:
 }
 EXPORT_SYMBOL_GPL(perf_event_update_userpage);
 
-static int perf_mmap_fault(struct vm_fault *vmf)
+static vm_fault_t perf_mmap_fault(struct vm_fault *vmf)
 {
        struct perf_event *event = vmf->vma->vm_file->private_data;
        struct ring_buffer *rb;
-       int ret = VM_FAULT_SIGBUS;
+       vm_fault_t ret = VM_FAULT_SIGBUS;
 
        if (vmf->flags & FAULT_FLAG_MKWRITE) {
                if (vmf->pgoff == 0)
@@ -6343,7 +6343,7 @@ static u64 perf_virt_to_phys(u64 virt)
 
 static struct perf_callchain_entry __empty_callchain = { .nr = 0, };
 
-static struct perf_callchain_entry *
+struct perf_callchain_entry *
 perf_callchain(struct perf_event *event, struct pt_regs *regs)
 {
        bool kernel = !event->attr.exclude_callchain_kernel;
@@ -6382,7 +6382,9 @@ void perf_prepare_sample(struct perf_event_header *header,
        if (sample_type & PERF_SAMPLE_CALLCHAIN) {
                int size = 1;
 
-               data->callchain = perf_callchain(event, regs);
+               if (!(sample_type & __PERF_SAMPLE_CALLCHAIN_EARLY))
+                       data->callchain = perf_callchain(event, regs);
+
                size += data->callchain->nr;
 
                header->size += size * sizeof(u64);
@@ -6482,7 +6484,7 @@ void perf_prepare_sample(struct perf_event_header *header,
                data->phys_addr = perf_virt_to_phys(data->addr);
 }
 
-static void __always_inline
+static __always_inline void
 __perf_event_output(struct perf_event *event,
                    struct perf_sample_data *data,
                    struct pt_regs *regs,
@@ -7335,6 +7337,10 @@ static bool perf_addr_filter_match(struct perf_addr_filter *filter,
                                     struct file *file, unsigned long offset,
                                     unsigned long size)
 {
+       /* d_inode(NULL) won't be equal to any mapped user-space file */
+       if (!filter->path.dentry)
+               return false;
+
        if (d_inode(filter->path.dentry) != file_inode(file))
                return false;
 
@@ -9898,7 +9904,7 @@ enabled:
 }
 
 /*
- * Allocate and initialize a event structure
+ * Allocate and initialize an event structure
  */
 static struct perf_event *
 perf_event_alloc(struct perf_event_attr *attr, int cpu,
@@ -11229,7 +11235,7 @@ const struct perf_event_attr *perf_event_attrs(struct perf_event *event)
 }
 
 /*
- * Inherit a event from parent task to child task.
+ * Inherit an event from parent task to child task.
  *
  * Returns:
  *  - valid pointer on success
index 6e28d2866be54afe5ab37d39ccc06f1ee0e8b713..b3814fce5ecb6bf7729ea858ec17ddb9018105f1 100644 (file)
@@ -345,13 +345,13 @@ void release_bp_slot(struct perf_event *bp)
        mutex_unlock(&nr_bp_mutex);
 }
 
-static int __modify_bp_slot(struct perf_event *bp, u64 old_type)
+static int __modify_bp_slot(struct perf_event *bp, u64 old_type, u64 new_type)
 {
        int err;
 
        __release_bp_slot(bp, old_type);
 
-       err = __reserve_bp_slot(bp, bp->attr.bp_type);
+       err = __reserve_bp_slot(bp, new_type);
        if (err) {
                /*
                 * Reserve the old_type slot back in case
@@ -367,12 +367,12 @@ static int __modify_bp_slot(struct perf_event *bp, u64 old_type)
        return err;
 }
 
-static int modify_bp_slot(struct perf_event *bp, u64 old_type)
+static int modify_bp_slot(struct perf_event *bp, u64 old_type, u64 new_type)
 {
        int ret;
 
        mutex_lock(&nr_bp_mutex);
-       ret = __modify_bp_slot(bp, old_type);
+       ret = __modify_bp_slot(bp, old_type, new_type);
        mutex_unlock(&nr_bp_mutex);
        return ret;
 }
@@ -400,16 +400,18 @@ int dbg_release_bp_slot(struct perf_event *bp)
        return 0;
 }
 
-static int validate_hw_breakpoint(struct perf_event *bp)
+static int hw_breakpoint_parse(struct perf_event *bp,
+                              const struct perf_event_attr *attr,
+                              struct arch_hw_breakpoint *hw)
 {
-       int ret;
+       int err;
 
-       ret = arch_validate_hwbkpt_settings(bp);
-       if (ret)
-               return ret;
+       err = hw_breakpoint_arch_parse(bp, attr, hw);
+       if (err)
+               return err;
 
-       if (arch_check_bp_in_kernelspace(bp)) {
-               if (bp->attr.exclude_kernel)
+       if (arch_check_bp_in_kernelspace(hw)) {
+               if (attr->exclude_kernel)
                        return -EINVAL;
                /*
                 * Don't let unprivileged users set a breakpoint in the trap
@@ -424,19 +426,22 @@ static int validate_hw_breakpoint(struct perf_event *bp)
 
 int register_perf_hw_breakpoint(struct perf_event *bp)
 {
-       int ret;
-
-       ret = reserve_bp_slot(bp);
-       if (ret)
-               return ret;
+       struct arch_hw_breakpoint hw;
+       int err;
 
-       ret = validate_hw_breakpoint(bp);
+       err = reserve_bp_slot(bp);
+       if (err)
+               return err;
 
-       /* if arch_validate_hwbkpt_settings() fails then release bp slot */
-       if (ret)
+       err = hw_breakpoint_parse(bp, &bp->attr, &hw);
+       if (err) {
                release_bp_slot(bp);
+               return err;
+       }
 
-       return ret;
+       bp->hw.info = hw;
+
+       return 0;
 }
 
 /**
@@ -456,35 +461,44 @@ register_user_hw_breakpoint(struct perf_event_attr *attr,
 }
 EXPORT_SYMBOL_GPL(register_user_hw_breakpoint);
 
+static void hw_breakpoint_copy_attr(struct perf_event_attr *to,
+                                   struct perf_event_attr *from)
+{
+       to->bp_addr = from->bp_addr;
+       to->bp_type = from->bp_type;
+       to->bp_len  = from->bp_len;
+       to->disabled = from->disabled;
+}
+
 int
 modify_user_hw_breakpoint_check(struct perf_event *bp, struct perf_event_attr *attr,
                                bool check)
 {
-       u64 old_addr = bp->attr.bp_addr;
-       u64 old_len  = bp->attr.bp_len;
-       int old_type = bp->attr.bp_type;
-       bool modify  = attr->bp_type != old_type;
-       int err = 0;
+       struct arch_hw_breakpoint hw;
+       int err;
 
-       bp->attr.bp_addr = attr->bp_addr;
-       bp->attr.bp_type = attr->bp_type;
-       bp->attr.bp_len  = attr->bp_len;
+       err = hw_breakpoint_parse(bp, attr, &hw);
+       if (err)
+               return err;
 
-       if (check && memcmp(&bp->attr, attr, sizeof(*attr)))
-               return -EINVAL;
+       if (check) {
+               struct perf_event_attr old_attr;
 
-       err = validate_hw_breakpoint(bp);
-       if (!err && modify)
-               err = modify_bp_slot(bp, old_type);
+               old_attr = bp->attr;
+               hw_breakpoint_copy_attr(&old_attr, attr);
+               if (memcmp(&old_attr, attr, sizeof(*attr)))
+                       return -EINVAL;
+       }
 
-       if (err) {
-               bp->attr.bp_addr = old_addr;
-               bp->attr.bp_type = old_type;
-               bp->attr.bp_len  = old_len;
-               return err;
+       if (bp->attr.bp_type != attr->bp_type) {
+               err = modify_bp_slot(bp, bp->attr.bp_type, attr->bp_type);
+               if (err)
+                       return err;
        }
 
-       bp->attr.disabled = attr->disabled;
+       hw_breakpoint_copy_attr(&bp->attr, attr);
+       bp->hw.info = hw;
+
        return 0;
 }
 
index 045a37e9ddee3255fac6ab34b80682f4e2e968e1..5d3cf407e37469a7b1cafab8c4af303d074bbdf8 100644 (file)
@@ -103,7 +103,7 @@ out:
        preempt_enable();
 }
 
-static bool __always_inline
+static __always_inline bool
 ring_buffer_has_space(unsigned long head, unsigned long tail,
                      unsigned long data_size, unsigned int size,
                      bool backward)
@@ -114,7 +114,7 @@ ring_buffer_has_space(unsigned long head, unsigned long tail,
                return CIRC_SPACE(tail, head, data_size) >= size;
 }
 
-static int __always_inline
+static __always_inline int
 __perf_output_begin(struct perf_output_handle *handle,
                    struct perf_event *event, unsigned int size,
                    bool backward)
@@ -414,7 +414,7 @@ err:
 }
 EXPORT_SYMBOL_GPL(perf_aux_output_begin);
 
-static bool __always_inline rb_need_aux_wakeup(struct ring_buffer *rb)
+static __always_inline bool rb_need_aux_wakeup(struct ring_buffer *rb)
 {
        if (rb->aux_overwrite)
                return false;
index ccc579a7d32e0c8a11835a19aee17b71d1375b8a..aed1ba56995440b6640c530f0332f1f4ce28b220 100644 (file)
@@ -918,7 +918,7 @@ int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *
 EXPORT_SYMBOL_GPL(uprobe_register);
 
 /*
- * uprobe_apply - unregister a already registered probe.
+ * uprobe_apply - unregister an already registered probe.
  * @inode: the file in which the probe has to be removed.
  * @offset: offset from the start of the file.
  * @uc: consumer which wants to add more or remove some breakpoints
@@ -947,7 +947,7 @@ int uprobe_apply(struct inode *inode, loff_t offset,
 }
 
 /*
- * uprobe_unregister - unregister a already registered probe.
+ * uprobe_unregister - unregister an already registered probe.
  * @inode: the file in which the probe has to be removed.
  * @offset: offset from the start of the file.
  * @uc: identify which probe if multiple probes are colocated.
@@ -1403,7 +1403,7 @@ static struct return_instance *free_ret_instance(struct return_instance *ri)
 
 /*
  * Called with no locks held.
- * Called in context of a exiting or a exec-ing thread.
+ * Called in context of an exiting or an exec-ing thread.
  */
 void uprobe_free_utask(struct task_struct *t)
 {
index 5349c91c22983c4b9dc0d362aacf9579a705cf70..bc80a4e268c0bc27132ecb782df98276b282b237 100644 (file)
@@ -184,9 +184,6 @@ static int fei_kprobe_handler(struct kprobe *kp, struct pt_regs *regs)
        if (should_fail(&fei_fault_attr, 1)) {
                regs_set_return_value(regs, attr->retval);
                override_function_with_return(regs);
-               /* Kprobe specific fixup */
-               reset_current_kprobe();
-               preempt_enable_no_resched();
                return 1;
        }
 
index 9440d61b925ca08faa3beb7d3a02fedfdd0a9b84..9d8d0e016fc6c2cb54bc90bfadd8725612174b78 100644 (file)
@@ -303,11 +303,36 @@ struct kmem_cache *files_cachep;
 struct kmem_cache *fs_cachep;
 
 /* SLAB cache for vm_area_struct structures */
-struct kmem_cache *vm_area_cachep;
+static struct kmem_cache *vm_area_cachep;
 
 /* SLAB cache for mm_struct structures (tsk->mm) */
 static struct kmem_cache *mm_cachep;
 
+struct vm_area_struct *vm_area_alloc(struct mm_struct *mm)
+{
+       struct vm_area_struct *vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
+
+       if (vma)
+               vma_init(vma, mm);
+       return vma;
+}
+
+struct vm_area_struct *vm_area_dup(struct vm_area_struct *orig)
+{
+       struct vm_area_struct *new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
+
+       if (new) {
+               *new = *orig;
+               INIT_LIST_HEAD(&new->anon_vma_chain);
+       }
+       return new;
+}
+
+void vm_area_free(struct vm_area_struct *vma)
+{
+       kmem_cache_free(vm_area_cachep, vma);
+}
+
 static void account_kernel_stack(struct task_struct *tsk, int account)
 {
        void *stack = task_stack_page(tsk);
@@ -455,11 +480,9 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm,
                                goto fail_nomem;
                        charge = len;
                }
-               tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
+               tmp = vm_area_dup(mpnt);
                if (!tmp)
                        goto fail_nomem;
-               *tmp = *mpnt;
-               INIT_LIST_HEAD(&tmp->anon_vma_chain);
                retval = vma_dup_policy(mpnt, tmp);
                if (retval)
                        goto fail_nomem_policy;
@@ -539,7 +562,7 @@ fail_uprobe_end:
 fail_nomem_anon_vma_fork:
        mpol_put(vma_policy(tmp));
 fail_nomem_policy:
-       kmem_cache_free(vm_area_cachep, tmp);
+       vm_area_free(tmp);
 fail_nomem:
        retval = -ENOMEM;
        vm_unacct_memory(charge);
@@ -2253,6 +2276,8 @@ static void sighand_ctor(void *data)
 
 void __init proc_caches_init(void)
 {
+       unsigned int mm_size;
+
        sighand_cachep = kmem_cache_create("sighand_cache",
                        sizeof(struct sighand_struct), 0,
                        SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_TYPESAFE_BY_RCU|
@@ -2269,15 +2294,16 @@ void __init proc_caches_init(void)
                        sizeof(struct fs_struct), 0,
                        SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT,
                        NULL);
+
        /*
-        * FIXME! The "sizeof(struct mm_struct)" currently includes the
-        * whole struct cpumask for the OFFSTACK case. We could change
-        * this to *only* allocate as much of it as required by the
-        * maximum number of CPU's we can ever have.  The cpumask_allocation
-        * is at the end of the structure, exactly for that reason.
+        * The mm_cpumask is located at the end of mm_struct, and is
+        * dynamically sized based on the maximum CPU number this system
+        * can have, taking hotplug into account (nr_cpu_ids).
         */
+       mm_size = sizeof(struct mm_struct) + cpumask_size();
+
        mm_cachep = kmem_cache_create_usercopy("mm_struct",
-                       sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
+                       mm_size, ARCH_MIN_MMSTRUCT_ALIGN,
                        SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT,
                        offsetof(struct mm_struct, saved_auxv),
                        sizeof_field(struct mm_struct, saved_auxv),
index c6766f326072d54322d460cb54afe225b6754ac5..5f3e2baefca92e45442b9aa4a2b1399bbf0ce21e 100644 (file)
@@ -134,7 +134,6 @@ config GENERIC_IRQ_DEBUGFS
 endmenu
 
 config GENERIC_IRQ_MULTI_HANDLER
-       depends on !MULTI_IRQ_HANDLER
        bool
        help
          Allow to specify the low level IRQ handler at run time.
index 4dadeb3d666621239a7273f7651847fa7099dacf..6f636136cccc05993e20034e92effc0c0fc3e7e2 100644 (file)
@@ -55,6 +55,7 @@ static const struct irq_bit_descr irqchip_flags[] = {
        BIT_MASK_DESCR(IRQCHIP_SKIP_SET_WAKE),
        BIT_MASK_DESCR(IRQCHIP_ONESHOT_SAFE),
        BIT_MASK_DESCR(IRQCHIP_EOI_THREADED),
+       BIT_MASK_DESCR(IRQCHIP_SUPPORTS_LEVEL_MSI),
 };
 
 static void
index afc7f902d74a3eff5d1f8a5e407159c5d28625f7..578d0e5f1b5b65a147aaf7fff037761ff091f4f2 100644 (file)
@@ -443,6 +443,7 @@ static void free_desc(unsigned int irq)
         * We free the descriptor, masks and stat fields via RCU. That
         * allows demultiplex interrupts to do rcu based management of
         * the child interrupts.
+        * This also allows us to use rcu in kstat_irqs_usr().
         */
        call_rcu(&desc->rcu, delayed_free_desc);
 }
@@ -928,17 +929,17 @@ unsigned int kstat_irqs(unsigned int irq)
  * kstat_irqs_usr - Get the statistics for an interrupt
  * @irq:       The interrupt number
  *
- * Returns the sum of interrupt counts on all cpus since boot for
- * @irq. Contrary to kstat_irqs() this can be called from any
- * preemptible context. It's protected against concurrent removal of
- * an interrupt descriptor when sparse irqs are enabled.
+ * Returns the sum of interrupt counts on all cpus since boot for @irq.
+ * Contrary to kstat_irqs() this can be called from any context.
+ * It uses rcu since a concurrent removal of an interrupt descriptor is
+ * observing an rcu grace period before delayed_free_desc()/irq_kobj_release().
  */
 unsigned int kstat_irqs_usr(unsigned int irq)
 {
        unsigned int sum;
 
-       irq_lock_sparse();
+       rcu_read_lock();
        sum = kstat_irqs(irq);
-       irq_unlock_sparse();
+       rcu_read_unlock();
        return sum;
 }
index daeabd791d5896a366d7ba7b386dde76bfbc0a73..fb86146037a745e85a862771f437866d9ab31707 100644 (file)
@@ -790,9 +790,19 @@ static irqreturn_t irq_forced_secondary_handler(int irq, void *dev_id)
 
 static int irq_wait_for_interrupt(struct irqaction *action)
 {
-       set_current_state(TASK_INTERRUPTIBLE);
+       for (;;) {
+               set_current_state(TASK_INTERRUPTIBLE);
 
-       while (!kthread_should_stop()) {
+               if (kthread_should_stop()) {
+                       /* may need to run one last time */
+                       if (test_and_clear_bit(IRQTF_RUNTHREAD,
+                                              &action->thread_flags)) {
+                               __set_current_state(TASK_RUNNING);
+                               return 0;
+                       }
+                       __set_current_state(TASK_RUNNING);
+                       return -1;
+               }
 
                if (test_and_clear_bit(IRQTF_RUNTHREAD,
                                       &action->thread_flags)) {
@@ -800,10 +810,7 @@ static int irq_wait_for_interrupt(struct irqaction *action)
                        return 0;
                }
                schedule();
-               set_current_state(TASK_INTERRUPTIBLE);
        }
-       __set_current_state(TASK_RUNNING);
-       return -1;
 }
 
 /*
@@ -1024,11 +1031,8 @@ static int irq_thread(void *data)
        /*
         * This is the regular exit path. __free_irq() is stopping the
         * thread via kthread_stop() after calling
-        * synchronize_irq(). So neither IRQTF_RUNTHREAD nor the
-        * oneshot mask bit can be set. We cannot verify that as we
-        * cannot touch the oneshot mask at this point anymore as
-        * __setup_irq() might have given out currents thread_mask
-        * again.
+        * synchronize_hardirq(). So neither IRQTF_RUNTHREAD nor the
+        * oneshot mask bit can be set.
         */
        task_work_cancel(current, irq_thread_dtor);
        return 0;
@@ -1068,6 +1072,13 @@ static int irq_setup_forced_threading(struct irqaction *new)
        if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT))
                return 0;
 
+       /*
+        * No further action required for interrupts which are requested as
+        * threaded interrupts already
+        */
+       if (new->handler == irq_default_primary_handler)
+               return 0;
+
        new->flags |= IRQF_ONESHOT;
 
        /*
@@ -1075,7 +1086,7 @@ static int irq_setup_forced_threading(struct irqaction *new)
         * thread handler. We force thread them as well by creating a
         * secondary action.
         */
-       if (new->handler != irq_default_primary_handler && new->thread_fn) {
+       if (new->handler && new->thread_fn) {
                /* Allocate the secondary action */
                new->secondary = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
                if (!new->secondary)
@@ -1244,8 +1255,10 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
 
        /*
         * Protects against a concurrent __free_irq() call which might wait
-        * for synchronize_irq() to complete without holding the optional
-        * chip bus lock and desc->lock.
+        * for synchronize_hardirq() to complete without holding the optional
+        * chip bus lock and desc->lock. Also protects against handing out
+        * a recycled oneshot thread_mask bit while it's still in use by
+        * its previous owner.
         */
        mutex_lock(&desc->request_mutex);
 
@@ -1564,9 +1577,6 @@ static struct irqaction *__free_irq(struct irq_desc *desc, void *dev_id)
 
        WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
 
-       if (!desc)
-               return NULL;
-
        mutex_lock(&desc->request_mutex);
        chip_bus_lock(desc);
        raw_spin_lock_irqsave(&desc->lock, flags);
@@ -1613,11 +1623,11 @@ static struct irqaction *__free_irq(struct irq_desc *desc, void *dev_id)
        /*
         * Drop bus_lock here so the changes which were done in the chip
         * callbacks above are synced out to the irq chips which hang
-        * behind a slow bus (I2C, SPI) before calling synchronize_irq().
+        * behind a slow bus (I2C, SPI) before calling synchronize_hardirq().
         *
         * Aside of that the bus_lock can also be taken from the threaded
         * handler in irq_finalize_oneshot() which results in a deadlock
-        * because synchronize_irq() would wait forever for the thread to
+        * because kthread_stop() would wait forever for the thread to
         * complete, which is blocked on the bus lock.
         *
         * The still held desc->request_mutex() protects against a
@@ -1629,7 +1639,7 @@ static struct irqaction *__free_irq(struct irq_desc *desc, void *dev_id)
        unregister_handler_proc(irq, action);
 
        /* Make sure it's not being used on another CPU: */
-       synchronize_irq(irq);
+       synchronize_hardirq(irq);
 
 #ifdef CONFIG_DEBUG_SHIRQ
        /*
@@ -1638,7 +1648,7 @@ static struct irqaction *__free_irq(struct irq_desc *desc, void *dev_id)
         * is so by doing an extra call to the handler ....
         *
         * ( We do this after actually deregistering it, to make sure that a
-        *   'real' IRQ doesn't run in parallel with our fake. )
+        *   'real' IRQ doesn't run in parallel with our fake. )
         */
        if (action->flags & IRQF_SHARED) {
                local_irq_save(flags);
@@ -1647,6 +1657,12 @@ static struct irqaction *__free_irq(struct irq_desc *desc, void *dev_id)
        }
 #endif
 
+       /*
+        * The action has already been removed above, but the thread writes
+        * its oneshot mask bit when it completes. Though request_mutex is
+        * held across this which prevents __setup_irq() from handing out
+        * the same bit to a newly requested action.
+        */
        if (action->thread) {
                kthread_stop(action->thread);
                put_task_struct(action->thread);
index 37eda10f5c362fb173f4280fe77fd24ebc8c195f..da9addb8d655719cbd67a59e5d8535670e28f885 100644 (file)
@@ -475,22 +475,24 @@ int show_interrupts(struct seq_file *p, void *v)
                seq_putc(p, '\n');
        }
 
-       irq_lock_sparse();
+       rcu_read_lock();
        desc = irq_to_desc(i);
        if (!desc)
                goto outsparse;
 
-       raw_spin_lock_irqsave(&desc->lock, flags);
-       for_each_online_cpu(j)
-               any_count |= kstat_irqs_cpu(i, j);
-       action = desc->action;
-       if ((!action || irq_desc_is_chained(desc)) && !any_count)
-               goto out;
+       if (desc->kstat_irqs)
+               for_each_online_cpu(j)
+                       any_count |= *per_cpu_ptr(desc->kstat_irqs, j);
+
+       if ((!desc->action || irq_desc_is_chained(desc)) && !any_count)
+               goto outsparse;
 
        seq_printf(p, "%*d: ", prec, i);
        for_each_online_cpu(j)
-               seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
+               seq_printf(p, "%10u ", desc->kstat_irqs ?
+                                       *per_cpu_ptr(desc->kstat_irqs, j) : 0);
 
+       raw_spin_lock_irqsave(&desc->lock, flags);
        if (desc->irq_data.chip) {
                if (desc->irq_data.chip->irq_print_chip)
                        desc->irq_data.chip->irq_print_chip(&desc->irq_data, p);
@@ -511,6 +513,7 @@ int show_interrupts(struct seq_file *p, void *v)
        if (desc->name)
                seq_printf(p, "-%-8s", desc->name);
 
+       action = desc->action;
        if (action) {
                seq_printf(p, "  %s", action->name);
                while ((action = action->next) != NULL)
@@ -518,10 +521,9 @@ int show_interrupts(struct seq_file *p, void *v)
        }
 
        seq_putc(p, '\n');
-out:
        raw_spin_unlock_irqrestore(&desc->lock, flags);
 outsparse:
-       irq_unlock_sparse();
+       rcu_read_unlock();
        return 0;
 }
 #endif
index ea619021d9011dca88f0aa10e3263f7dc2177394..ab257be4d92495a52c780d45c519eea608225c9e 100644 (file)
@@ -627,8 +627,8 @@ static void optimize_kprobe(struct kprobe *p)
            (kprobe_disabled(p) || kprobes_all_disarmed))
                return;
 
-       /* Both of break_handler and post_handler are not supported. */
-       if (p->break_handler || p->post_handler)
+       /* kprobes with post_handler can not be optimized */
+       if (p->post_handler)
                return;
 
        op = container_of(p, struct optimized_kprobe, kp);
@@ -710,9 +710,7 @@ static void reuse_unused_kprobe(struct kprobe *ap)
         * there is still a relative jump) and disabled.
         */
        op = container_of(ap, struct optimized_kprobe, kp);
-       if (unlikely(list_empty(&op->list)))
-               printk(KERN_WARNING "Warning: found a stray unused "
-                       "aggrprobe@%p\n", ap->addr);
+       WARN_ON_ONCE(list_empty(&op->list));
        /* Enable the probe again */
        ap->flags &= ~KPROBE_FLAG_DISABLED;
        /* Optimize it again (remove from op->list) */
@@ -985,7 +983,8 @@ static int arm_kprobe_ftrace(struct kprobe *p)
        ret = ftrace_set_filter_ip(&kprobe_ftrace_ops,
                                   (unsigned long)p->addr, 0, 0);
        if (ret) {
-               pr_debug("Failed to arm kprobe-ftrace at %p (%d)\n", p->addr, ret);
+               pr_debug("Failed to arm kprobe-ftrace at %pS (%d)\n",
+                        p->addr, ret);
                return ret;
        }
 
@@ -1025,7 +1024,8 @@ static int disarm_kprobe_ftrace(struct kprobe *p)
 
        ret = ftrace_set_filter_ip(&kprobe_ftrace_ops,
                           (unsigned long)p->addr, 1, 0);
-       WARN(ret < 0, "Failed to disarm kprobe-ftrace at %p (%d)\n", p->addr, ret);
+       WARN_ONCE(ret < 0, "Failed to disarm kprobe-ftrace at %pS (%d)\n",
+                 p->addr, ret);
        return ret;
 }
 #else  /* !CONFIG_KPROBES_ON_FTRACE */
@@ -1116,20 +1116,6 @@ static int aggr_fault_handler(struct kprobe *p, struct pt_regs *regs,
 }
 NOKPROBE_SYMBOL(aggr_fault_handler);
 
-static int aggr_break_handler(struct kprobe *p, struct pt_regs *regs)
-{
-       struct kprobe *cur = __this_cpu_read(kprobe_instance);
-       int ret = 0;
-
-       if (cur && cur->break_handler) {
-               if (cur->break_handler(cur, regs))
-                       ret = 1;
-       }
-       reset_kprobe_instance();
-       return ret;
-}
-NOKPROBE_SYMBOL(aggr_break_handler);
-
 /* Walks the list and increments nmissed count for multiprobe case */
 void kprobes_inc_nmissed_count(struct kprobe *p)
 {
@@ -1270,24 +1256,15 @@ static void cleanup_rp_inst(struct kretprobe *rp)
 }
 NOKPROBE_SYMBOL(cleanup_rp_inst);
 
-/*
-* Add the new probe to ap->list. Fail if this is the
-* second jprobe at the address - two jprobes can't coexist
-*/
+/* Add the new probe to ap->list */
 static int add_new_kprobe(struct kprobe *ap, struct kprobe *p)
 {
        BUG_ON(kprobe_gone(ap) || kprobe_gone(p));
 
-       if (p->break_handler || p->post_handler)
+       if (p->post_handler)
                unoptimize_kprobe(ap, true);    /* Fall back to normal kprobe */
 
-       if (p->break_handler) {
-               if (ap->break_handler)
-                       return -EEXIST;
-               list_add_tail_rcu(&p->list, &ap->list);
-               ap->break_handler = aggr_break_handler;
-       } else
-               list_add_rcu(&p->list, &ap->list);
+       list_add_rcu(&p->list, &ap->list);
        if (p->post_handler && !ap->post_handler)
                ap->post_handler = aggr_post_handler;
 
@@ -1310,8 +1287,6 @@ static void init_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
        /* We don't care the kprobe which has gone. */
        if (p->post_handler && !kprobe_gone(p))
                ap->post_handler = aggr_post_handler;
-       if (p->break_handler && !kprobe_gone(p))
-               ap->break_handler = aggr_break_handler;
 
        INIT_LIST_HEAD(&ap->list);
        INIT_HLIST_NODE(&ap->hlist);
@@ -1706,8 +1681,6 @@ static int __unregister_kprobe_top(struct kprobe *p)
                goto disarmed;
        else {
                /* If disabling probe has special handlers, update aggrprobe */
-               if (p->break_handler && !kprobe_gone(p))
-                       ap->break_handler = NULL;
                if (p->post_handler && !kprobe_gone(p)) {
                        list_for_each_entry_rcu(list_p, &ap->list, list) {
                                if ((list_p != p) && (list_p->post_handler))
@@ -1812,77 +1785,6 @@ unsigned long __weak arch_deref_entry_point(void *entry)
        return (unsigned long)entry;
 }
 
-#if 0
-int register_jprobes(struct jprobe **jps, int num)
-{
-       int ret = 0, i;
-
-       if (num <= 0)
-               return -EINVAL;
-
-       for (i = 0; i < num; i++) {
-               ret = register_jprobe(jps[i]);
-
-               if (ret < 0) {
-                       if (i > 0)
-                               unregister_jprobes(jps, i);
-                       break;
-               }
-       }
-
-       return ret;
-}
-EXPORT_SYMBOL_GPL(register_jprobes);
-
-int register_jprobe(struct jprobe *jp)
-{
-       unsigned long addr, offset;
-       struct kprobe *kp = &jp->kp;
-
-       /*
-        * Verify probepoint as well as the jprobe handler are
-        * valid function entry points.
-        */
-       addr = arch_deref_entry_point(jp->entry);
-
-       if (kallsyms_lookup_size_offset(addr, NULL, &offset) && offset == 0 &&
-           kprobe_on_func_entry(kp->addr, kp->symbol_name, kp->offset)) {
-               kp->pre_handler = setjmp_pre_handler;
-               kp->break_handler = longjmp_break_handler;
-               return register_kprobe(kp);
-       }
-
-       return -EINVAL;
-}
-EXPORT_SYMBOL_GPL(register_jprobe);
-
-void unregister_jprobe(struct jprobe *jp)
-{
-       unregister_jprobes(&jp, 1);
-}
-EXPORT_SYMBOL_GPL(unregister_jprobe);
-
-void unregister_jprobes(struct jprobe **jps, int num)
-{
-       int i;
-
-       if (num <= 0)
-               return;
-       mutex_lock(&kprobe_mutex);
-       for (i = 0; i < num; i++)
-               if (__unregister_kprobe_top(&jps[i]->kp) < 0)
-                       jps[i]->kp.addr = NULL;
-       mutex_unlock(&kprobe_mutex);
-
-       synchronize_sched();
-       for (i = 0; i < num; i++) {
-               if (jps[i]->kp.addr)
-                       __unregister_kprobe_bottom(&jps[i]->kp);
-       }
-}
-EXPORT_SYMBOL_GPL(unregister_jprobes);
-#endif
-
 #ifdef CONFIG_KRETPROBES
 /*
  * This kprobe pre_handler is registered with every kretprobe. When probe
@@ -1982,7 +1884,6 @@ int register_kretprobe(struct kretprobe *rp)
        rp->kp.pre_handler = pre_handler_kretprobe;
        rp->kp.post_handler = NULL;
        rp->kp.fault_handler = NULL;
-       rp->kp.break_handler = NULL;
 
        /* Pre-allocate memory for max kretprobe instances */
        if (rp->maxactive <= 0) {
@@ -2105,7 +2006,6 @@ static void kill_kprobe(struct kprobe *p)
                list_for_each_entry_rcu(kp, &p->list, list)
                        kp->flags |= KPROBE_FLAG_GONE;
                p->post_handler = NULL;
-               p->break_handler = NULL;
                kill_optimized_kprobe(p);
        }
        /*
@@ -2169,11 +2069,12 @@ out:
 }
 EXPORT_SYMBOL_GPL(enable_kprobe);
 
+/* Caller must NOT call this in usual path. This is only for critical case */
 void dump_kprobe(struct kprobe *kp)
 {
-       printk(KERN_WARNING "Dumping kprobe:\n");
-       printk(KERN_WARNING "Name: %s\nAddress: %p\nOffset: %x\n",
-              kp->symbol_name, kp->addr, kp->offset);
+       pr_err("Dumping kprobe:\n");
+       pr_err("Name: %s\nOffset: %x\nAddress: %pS\n",
+              kp->symbol_name, kp->offset, kp->addr);
 }
 NOKPROBE_SYMBOL(dump_kprobe);
 
@@ -2196,11 +2097,8 @@ static int __init populate_kprobe_blacklist(unsigned long *start,
                entry = arch_deref_entry_point((void *)*iter);
 
                if (!kernel_text_address(entry) ||
-                   !kallsyms_lookup_size_offset(entry, &size, &offset)) {
-                       pr_err("Failed to find blacklist at %p\n",
-                               (void *)entry);
+                   !kallsyms_lookup_size_offset(entry, &size, &offset))
                        continue;
-               }
 
                ent = kmalloc(sizeof(*ent), GFP_KERNEL);
                if (!ent)
@@ -2326,21 +2224,23 @@ static void report_probe(struct seq_file *pi, struct kprobe *p,
                const char *sym, int offset, char *modname, struct kprobe *pp)
 {
        char *kprobe_type;
+       void *addr = p->addr;
 
        if (p->pre_handler == pre_handler_kretprobe)
                kprobe_type = "r";
-       else if (p->pre_handler == setjmp_pre_handler)
-               kprobe_type = "j";
        else
                kprobe_type = "k";
 
+       if (!kallsyms_show_value())
+               addr = NULL;
+
        if (sym)
-               seq_printf(pi, "%p  %s  %s+0x%x  %s ",
-                       p->addr, kprobe_type, sym, offset,
+               seq_printf(pi, "%px  %s  %s+0x%x  %s ",
+                       addr, kprobe_type, sym, offset,
                        (modname ? modname : " "));
-       else
-               seq_printf(pi, "%p  %s  %p ",
-                       p->addr, kprobe_type, p->addr);
+       else    /* try to use %pS */
+               seq_printf(pi, "%px  %s  %pS ",
+                       addr, kprobe_type, p->addr);
 
        if (!pp)
                pp = p;
@@ -2428,8 +2328,16 @@ static int kprobe_blacklist_seq_show(struct seq_file *m, void *v)
        struct kprobe_blacklist_entry *ent =
                list_entry(v, struct kprobe_blacklist_entry, list);
 
-       seq_printf(m, "0x%px-0x%px\t%ps\n", (void *)ent->start_addr,
-                  (void *)ent->end_addr, (void *)ent->start_addr);
+       /*
+        * If /proc/kallsyms is not showing kernel address, we won't
+        * show them here either.
+        */
+       if (!kallsyms_show_value())
+               seq_printf(m, "0x%px-0x%px\t%ps\n", NULL, NULL,
+                          (void *)ent->start_addr);
+       else
+               seq_printf(m, "0x%px-0x%px\t%ps\n", (void *)ent->start_addr,
+                          (void *)ent->end_addr, (void *)ent->start_addr);
        return 0;
 }
 
@@ -2611,7 +2519,7 @@ static int __init debugfs_kprobe_init(void)
        if (!dir)
                return -ENOMEM;
 
-       file = debugfs_create_file("list", 0444, dir, NULL,
+       file = debugfs_create_file("list", 0400, dir, NULL,
                                &debugfs_kprobes_operations);
        if (!file)
                goto error;
@@ -2621,7 +2529,7 @@ static int __init debugfs_kprobe_init(void)
        if (!file)
                goto error;
 
-       file = debugfs_create_file("blacklist", 0444, dir, NULL,
+       file = debugfs_create_file("blacklist", 0400, dir, NULL,
                                &debugfs_kprobe_blacklist_ops);
        if (!file)
                goto error;
@@ -2637,6 +2545,3 @@ late_initcall(debugfs_kprobe_init);
 #endif /* CONFIG_DEBUG_FS */
 
 module_init(init_kprobes);
-
-/* defined in arch/.../kernel/kprobes.c */
-EXPORT_SYMBOL_GPL(jprobe_return);
index 481951bf091d49fbe4378bb21504b6482e11919f..087d18d771b537972b4bacf8c5528a3c40fc91e5 100644 (file)
@@ -177,9 +177,20 @@ void *kthread_probe_data(struct task_struct *task)
 static void __kthread_parkme(struct kthread *self)
 {
        for (;;) {
-               set_current_state(TASK_PARKED);
+               /*
+                * TASK_PARKED is a special state; we must serialize against
+                * possible pending wakeups to avoid store-store collisions on
+                * task->state.
+                *
+                * Such a collision might possibly result in the task state
+                * changin from TASK_PARKED and us failing the
+                * wait_task_inactive() in kthread_park().
+                */
+               set_special_state(TASK_PARKED);
                if (!test_bit(KTHREAD_SHOULD_PARK, &self->flags))
                        break;
+
+               complete(&self->parked);
                schedule();
        }
        __set_current_state(TASK_RUNNING);
@@ -191,11 +202,6 @@ void kthread_parkme(void)
 }
 EXPORT_SYMBOL_GPL(kthread_parkme);
 
-void kthread_park_complete(struct task_struct *k)
-{
-       complete_all(&to_kthread(k)->parked);
-}
-
 static int kthread(void *_create)
 {
        /* Copy data: it's on kthread's stack */
@@ -319,8 +325,14 @@ struct task_struct *__kthread_create_on_node(int (*threadfn)(void *data),
        task = create->result;
        if (!IS_ERR(task)) {
                static const struct sched_param param = { .sched_priority = 0 };
+               char name[TASK_COMM_LEN];
 
-               vsnprintf(task->comm, sizeof(task->comm), namefmt, args);
+               /*
+                * task is already visible to other tasks, so updating
+                * COMM must be protected.
+                */
+               vsnprintf(name, sizeof(name), namefmt, args);
+               set_task_comm(task, name);
                /*
                 * root may have changed our (kthreadd's) priority or CPU mask.
                 * The kernel thread should not inherit these properties.
@@ -459,8 +471,10 @@ void kthread_unpark(struct task_struct *k)
        if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags))
                __kthread_bind(k, kthread->cpu, TASK_PARKED);
 
-       reinit_completion(&kthread->parked);
        clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
+       /*
+        * __kthread_parkme() will either see !SHOULD_PARK or get the wakeup.
+        */
        wake_up_state(k, TASK_PARKED);
 }
 EXPORT_SYMBOL_GPL(kthread_unpark);
@@ -484,10 +498,22 @@ int kthread_park(struct task_struct *k)
        if (WARN_ON(k->flags & PF_EXITING))
                return -ENOSYS;
 
+       if (WARN_ON_ONCE(test_bit(KTHREAD_SHOULD_PARK, &kthread->flags)))
+               return -EBUSY;
+
        set_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
        if (k != current) {
                wake_up_process(k);
+               /*
+                * Wait for __kthread_parkme() to complete(), this means we
+                * _will_ have TASK_PARKED and are about to call schedule().
+                */
                wait_for_completion(&kthread->parked);
+               /*
+                * Now wait for that schedule() to complete and the task to
+                * get scheduled out.
+                */
+               WARN_ON_ONCE(!wait_task_inactive(k, TASK_PARKED));
        }
 
        return 0;
index edcac5de7ebcdb489113800c941274d8887f9b56..5fa4d3138bf106cd87f822636652c144e846f3aa 100644 (file)
@@ -1265,11 +1265,11 @@ unsigned long lockdep_count_forward_deps(struct lock_class *class)
        this.parent = NULL;
        this.class = class;
 
-       local_irq_save(flags);
+       raw_local_irq_save(flags);
        arch_spin_lock(&lockdep_lock);
        ret = __lockdep_count_forward_deps(&this);
        arch_spin_unlock(&lockdep_lock);
-       local_irq_restore(flags);
+       raw_local_irq_restore(flags);
 
        return ret;
 }
@@ -1292,11 +1292,11 @@ unsigned long lockdep_count_backward_deps(struct lock_class *class)
        this.parent = NULL;
        this.class = class;
 
-       local_irq_save(flags);
+       raw_local_irq_save(flags);
        arch_spin_lock(&lockdep_lock);
        ret = __lockdep_count_backward_deps(&this);
        arch_spin_unlock(&lockdep_lock);
-       local_irq_restore(flags);
+       raw_local_irq_restore(flags);
 
        return ret;
 }
@@ -4411,7 +4411,7 @@ void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len)
        if (unlikely(!debug_locks))
                return;
 
-       local_irq_save(flags);
+       raw_local_irq_save(flags);
        for (i = 0; i < curr->lockdep_depth; i++) {
                hlock = curr->held_locks + i;
 
@@ -4422,7 +4422,7 @@ void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len)
                print_freed_lock_bug(curr, mem_from, mem_from + mem_len, hlock);
                break;
        }
-       local_irq_restore(flags);
+       raw_local_irq_restore(flags);
 }
 EXPORT_SYMBOL_GPL(debug_check_no_locks_freed);
 
index 8402b3349dca40a53a82a34900165c1054ae2fff..57bef4fbfb31cb65788caaa2e8ee654378aa3246 100644 (file)
@@ -21,6 +21,9 @@
  *          Davidlohr Bueso <dave@stgolabs.net>
  *     Based on kernel/rcu/torture.c.
  */
+
+#define pr_fmt(fmt) fmt
+
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/kthread.h>
@@ -57,7 +60,7 @@ torture_param(int, shutdown_secs, 0, "Shutdown time (j), <= zero to disable.");
 torture_param(int, stat_interval, 60,
             "Number of seconds between stats printk()s");
 torture_param(int, stutter, 5, "Number of jiffies to run/halt test, 0=disable");
-torture_param(bool, verbose, true,
+torture_param(int, verbose, 1,
             "Enable verbose debugging printk()s");
 
 static char *torture_type = "spin_lock";
index 4f014be7a4b8bf9a4de974377592374c75031cab..2823d4163a37c28dcbcfc18994432eeacfbe5f0d 100644 (file)
@@ -1465,6 +1465,29 @@ rt_mutex_fastunlock(struct rt_mutex *lock,
                rt_mutex_postunlock(&wake_q);
 }
 
+static inline void __rt_mutex_lock(struct rt_mutex *lock, unsigned int subclass)
+{
+       might_sleep();
+
+       mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
+       rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, rt_mutex_slowlock);
+}
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+/**
+ * rt_mutex_lock_nested - lock a rt_mutex
+ *
+ * @lock: the rt_mutex to be locked
+ * @subclass: the lockdep subclass
+ */
+void __sched rt_mutex_lock_nested(struct rt_mutex *lock, unsigned int subclass)
+{
+       __rt_mutex_lock(lock, subclass);
+}
+EXPORT_SYMBOL_GPL(rt_mutex_lock_nested);
+#endif
+
+#ifndef CONFIG_DEBUG_LOCK_ALLOC
 /**
  * rt_mutex_lock - lock a rt_mutex
  *
@@ -1472,12 +1495,10 @@ rt_mutex_fastunlock(struct rt_mutex *lock,
  */
 void __sched rt_mutex_lock(struct rt_mutex *lock)
 {
-       might_sleep();
-
-       mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
-       rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, rt_mutex_slowlock);
+       __rt_mutex_lock(lock, 0);
 }
 EXPORT_SYMBOL_GPL(rt_mutex_lock);
+#endif
 
 /**
  * rt_mutex_lock_interruptible - lock a rt_mutex interruptible
index bc1e507be9ff7aea311261e78002d53375f9a6d7..776308d2fa9e9468116f0174eed4b8062475a83f 100644 (file)
@@ -181,6 +181,7 @@ void down_read_non_owner(struct rw_semaphore *sem)
        might_sleep();
 
        __down_read(sem);
+       rwsem_set_reader_owned(sem);
 }
 
 EXPORT_SYMBOL(down_read_non_owner);
index 5857267a4af5dab0cb0eb85ed9b61cdfcbc57dcc..38283363da06d40057ddfe71c3c8ec1a2c7a94d2 100644 (file)
@@ -176,10 +176,27 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
        unsigned long pfn, pgoff, order;
        pgprot_t pgprot = PAGE_KERNEL;
        int error, nid, is_ram;
+       struct dev_pagemap *conflict_pgmap;
 
        align_start = res->start & ~(SECTION_SIZE - 1);
        align_size = ALIGN(res->start + resource_size(res), SECTION_SIZE)
                - align_start;
+       align_end = align_start + align_size - 1;
+
+       conflict_pgmap = get_dev_pagemap(PHYS_PFN(align_start), NULL);
+       if (conflict_pgmap) {
+               dev_WARN(dev, "Conflicting mapping in same section\n");
+               put_dev_pagemap(conflict_pgmap);
+               return ERR_PTR(-ENOMEM);
+       }
+
+       conflict_pgmap = get_dev_pagemap(PHYS_PFN(align_end), NULL);
+       if (conflict_pgmap) {
+               dev_WARN(dev, "Conflicting mapping in same section\n");
+               put_dev_pagemap(conflict_pgmap);
+               return ERR_PTR(-ENOMEM);
+       }
+
        is_ram = region_intersects(align_start, align_size,
                IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE);
 
@@ -199,7 +216,6 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
 
        mutex_lock(&pgmap_lock);
        error = 0;
-       align_end = align_start + align_size - 1;
 
        foreach_order_pgoff(res, order, pgoff) {
                error = __radix_tree_insert(&pgmap_radix,
@@ -305,7 +321,7 @@ EXPORT_SYMBOL_GPL(get_dev_pagemap);
 
 #ifdef CONFIG_DEV_PAGEMAP_OPS
 DEFINE_STATIC_KEY_FALSE(devmap_managed_key);
-EXPORT_SYMBOL_GPL(devmap_managed_key);
+EXPORT_SYMBOL(devmap_managed_key);
 static atomic_t devmap_enable;
 
 /*
@@ -346,5 +362,5 @@ void __put_devmap_managed_page(struct page *page)
        } else if (!count)
                __put_page(page);
 }
-EXPORT_SYMBOL_GPL(__put_devmap_managed_page);
+EXPORT_SYMBOL(__put_devmap_managed_page);
 #endif /* CONFIG_DEV_PAGEMAP_OPS */
index 87331565e5050a296aca3fec37df3dc229b3c0cc..70178f6ffdc4d387681a135a24b1a46ff34cfde2 100644 (file)
@@ -92,7 +92,7 @@ static void s2idle_enter(void)
        /* Push all the CPUs into the idle loop. */
        wake_up_all_idle_cpus();
        /* Make the current CPU wait so it can enter the idle loop too. */
-       swait_event(s2idle_wait_head,
+       swait_event_exclusive(s2idle_wait_head,
                    s2idle_state == S2IDLE_STATE_WAKE);
 
        cpuidle_pause();
@@ -160,7 +160,7 @@ void s2idle_wake(void)
        raw_spin_lock_irqsave(&s2idle_lock, flags);
        if (s2idle_state > S2IDLE_STATE_NONE) {
                s2idle_state = S2IDLE_STATE_WAKE;
-               swake_up(&s2idle_wait_head);
+               swake_up_one(&s2idle_wait_head);
        }
        raw_spin_unlock_irqrestore(&s2idle_lock, flags);
 }
index 40cea6735c2df564655155597d410035309850bd..4d04683c31b2b20d011a4988e9d0e114878798c5 100644 (file)
@@ -91,7 +91,17 @@ static inline void rcu_seq_end(unsigned long *sp)
        WRITE_ONCE(*sp, rcu_seq_endval(sp));
 }
 
-/* Take a snapshot of the update side's sequence number. */
+/*
+ * rcu_seq_snap - Take a snapshot of the update side's sequence number.
+ *
+ * This function returns the earliest value of the grace-period sequence number
+ * that will indicate that a full grace period has elapsed since the current
+ * time.  Once the grace-period sequence number has reached this value, it will
+ * be safe to invoke all callbacks that have been registered prior to the
+ * current time. This value is the current grace-period number plus two to the
+ * power of the number of low-order bits reserved for state, then rounded up to
+ * the next value in which the state bits are all zero.
+ */
 static inline unsigned long rcu_seq_snap(unsigned long *sp)
 {
        unsigned long s;
@@ -107,6 +117,15 @@ static inline unsigned long rcu_seq_current(unsigned long *sp)
        return READ_ONCE(*sp);
 }
 
+/*
+ * Given a snapshot from rcu_seq_snap(), determine whether or not the
+ * corresponding update-side operation has started.
+ */
+static inline bool rcu_seq_started(unsigned long *sp, unsigned long s)
+{
+       return ULONG_CMP_LT((s - 1) & ~RCU_SEQ_STATE_MASK, READ_ONCE(*sp));
+}
+
 /*
  * Given a snapshot from rcu_seq_snap(), determine whether or not a
  * full update-side operation has occurred.
@@ -116,6 +135,45 @@ static inline bool rcu_seq_done(unsigned long *sp, unsigned long s)
        return ULONG_CMP_GE(READ_ONCE(*sp), s);
 }
 
+/*
+ * Has a grace period completed since the time the old gp_seq was collected?
+ */
+static inline bool rcu_seq_completed_gp(unsigned long old, unsigned long new)
+{
+       return ULONG_CMP_LT(old, new & ~RCU_SEQ_STATE_MASK);
+}
+
+/*
+ * Has a grace period started since the time the old gp_seq was collected?
+ */
+static inline bool rcu_seq_new_gp(unsigned long old, unsigned long new)
+{
+       return ULONG_CMP_LT((old + RCU_SEQ_STATE_MASK) & ~RCU_SEQ_STATE_MASK,
+                           new);
+}
+
+/*
+ * Roughly how many full grace periods have elapsed between the collection
+ * of the two specified grace periods?
+ */
+static inline unsigned long rcu_seq_diff(unsigned long new, unsigned long old)
+{
+       unsigned long rnd_diff;
+
+       if (old == new)
+               return 0;
+       /*
+        * Compute the number of grace periods (still shifted up), plus
+        * one if either of new and old is not an exact grace period.
+        */
+       rnd_diff = (new & ~RCU_SEQ_STATE_MASK) -
+                  ((old + RCU_SEQ_STATE_MASK) & ~RCU_SEQ_STATE_MASK) +
+                  ((new & RCU_SEQ_STATE_MASK) || (old & RCU_SEQ_STATE_MASK));
+       if (ULONG_CMP_GE(RCU_SEQ_STATE_MASK, rnd_diff))
+               return 1; /* Definitely no grace period has elapsed. */
+       return ((rnd_diff - RCU_SEQ_STATE_MASK - 1) >> RCU_SEQ_CTR_SHIFT) + 2;
+}
+
 /*
  * debug_rcu_head_queue()/debug_rcu_head_unqueue() are used internally
  * by call_rcu() and rcu callback execution, and are therefore not part of the
@@ -276,6 +334,9 @@ static inline void rcu_init_levelspread(int *levelspread, const int *levelcnt)
 /* Is this rcu_node a leaf? */
 #define rcu_is_leaf_node(rnp) ((rnp)->level == rcu_num_lvls - 1)
 
+/* Is this rcu_node the last leaf? */
+#define rcu_is_last_leaf_node(rsp, rnp) ((rnp) == &(rsp)->node[rcu_num_nodes - 1])
+
 /*
  * Do a full breadth-first scan of the rcu_node structures for the
  * specified rcu_state structure.
@@ -405,8 +466,7 @@ enum rcutorture_type {
 
 #if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU)
 void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,
-                           unsigned long *gpnum, unsigned long *completed);
-void rcutorture_record_test_transition(void);
+                           unsigned long *gp_seq);
 void rcutorture_record_progress(unsigned long vernum);
 void do_trace_rcu_torture_read(const char *rcutorturename,
                               struct rcu_head *rhp,
@@ -415,15 +475,11 @@ void do_trace_rcu_torture_read(const char *rcutorturename,
                               unsigned long c);
 #else
 static inline void rcutorture_get_gp_data(enum rcutorture_type test_type,
-                                         int *flags,
-                                         unsigned long *gpnum,
-                                         unsigned long *completed)
+                                         int *flags, unsigned long *gp_seq)
 {
        *flags = 0;
-       *gpnum = 0;
-       *completed = 0;
+       *gp_seq = 0;
 }
-static inline void rcutorture_record_test_transition(void) { }
 static inline void rcutorture_record_progress(unsigned long vernum) { }
 #ifdef CONFIG_RCU_TRACE
 void do_trace_rcu_torture_read(const char *rcutorturename,
@@ -441,31 +497,26 @@ void do_trace_rcu_torture_read(const char *rcutorturename,
 
 static inline void srcutorture_get_gp_data(enum rcutorture_type test_type,
                                           struct srcu_struct *sp, int *flags,
-                                          unsigned long *gpnum,
-                                          unsigned long *completed)
+                                          unsigned long *gp_seq)
 {
        if (test_type != SRCU_FLAVOR)
                return;
        *flags = 0;
-       *completed = sp->srcu_idx;
-       *gpnum = *completed;
+       *gp_seq = sp->srcu_idx;
 }
 
 #elif defined(CONFIG_TREE_SRCU)
 
 void srcutorture_get_gp_data(enum rcutorture_type test_type,
                             struct srcu_struct *sp, int *flags,
-                            unsigned long *gpnum, unsigned long *completed);
+                            unsigned long *gp_seq);
 
 #endif
 
 #ifdef CONFIG_TINY_RCU
-static inline unsigned long rcu_batches_started(void) { return 0; }
-static inline unsigned long rcu_batches_started_bh(void) { return 0; }
-static inline unsigned long rcu_batches_started_sched(void) { return 0; }
-static inline unsigned long rcu_batches_completed(void) { return 0; }
-static inline unsigned long rcu_batches_completed_bh(void) { return 0; }
-static inline unsigned long rcu_batches_completed_sched(void) { return 0; }
+static inline unsigned long rcu_get_gp_seq(void) { return 0; }
+static inline unsigned long rcu_bh_get_gp_seq(void) { return 0; }
+static inline unsigned long rcu_sched_get_gp_seq(void) { return 0; }
 static inline unsigned long rcu_exp_batches_completed(void) { return 0; }
 static inline unsigned long rcu_exp_batches_completed_sched(void) { return 0; }
 static inline unsigned long
@@ -474,19 +525,16 @@ static inline void rcu_force_quiescent_state(void) { }
 static inline void rcu_bh_force_quiescent_state(void) { }
 static inline void rcu_sched_force_quiescent_state(void) { }
 static inline void show_rcu_gp_kthreads(void) { }
+static inline int rcu_get_gp_kthreads_prio(void) { return 0; }
 #else /* #ifdef CONFIG_TINY_RCU */
-extern unsigned long rcutorture_testseq;
-extern unsigned long rcutorture_vernum;
-unsigned long rcu_batches_started(void);
-unsigned long rcu_batches_started_bh(void);
-unsigned long rcu_batches_started_sched(void);
-unsigned long rcu_batches_completed(void);
-unsigned long rcu_batches_completed_bh(void);
-unsigned long rcu_batches_completed_sched(void);
+unsigned long rcu_get_gp_seq(void);
+unsigned long rcu_bh_get_gp_seq(void);
+unsigned long rcu_sched_get_gp_seq(void);
 unsigned long rcu_exp_batches_completed(void);
 unsigned long rcu_exp_batches_completed_sched(void);
 unsigned long srcu_batches_completed(struct srcu_struct *sp);
 void show_rcu_gp_kthreads(void);
+int rcu_get_gp_kthreads_prio(void);
 void rcu_force_quiescent_state(void);
 void rcu_bh_force_quiescent_state(void);
 void rcu_sched_force_quiescent_state(void);
index e232846516b3b5e8ca22c3ff1aa5b0baea5d5ba3..34244523550e16e498754debe9917b7506c1196f 100644 (file)
@@ -19,6 +19,9 @@
  *
  * Authors: Paul E. McKenney <paulmck@us.ibm.com>
  */
+
+#define pr_fmt(fmt) fmt
+
 #include <linux/types.h>
 #include <linux/kernel.h>
 #include <linux/init.h>
@@ -88,7 +91,7 @@ torture_param(int, nreaders, -1, "Number of RCU reader threads");
 torture_param(int, nwriters, -1, "Number of RCU updater threads");
 torture_param(bool, shutdown, !IS_ENABLED(MODULE),
              "Shutdown at end of performance tests.");
-torture_param(bool, verbose, true, "Enable verbose debugging printk()s");
+torture_param(int, verbose, 1, "Enable verbose debugging printk()s");
 torture_param(int, writer_holdoff, 0, "Holdoff (us) between GPs, zero to disable");
 
 static char *perf_type = "rcu";
@@ -135,8 +138,8 @@ struct rcu_perf_ops {
        void (*cleanup)(void);
        int (*readlock)(void);
        void (*readunlock)(int idx);
-       unsigned long (*started)(void);
-       unsigned long (*completed)(void);
+       unsigned long (*get_gp_seq)(void);
+       unsigned long (*gp_diff)(unsigned long new, unsigned long old);
        unsigned long (*exp_completed)(void);
        void (*async)(struct rcu_head *head, rcu_callback_t func);
        void (*gp_barrier)(void);
@@ -176,8 +179,8 @@ static struct rcu_perf_ops rcu_ops = {
        .init           = rcu_sync_perf_init,
        .readlock       = rcu_perf_read_lock,
        .readunlock     = rcu_perf_read_unlock,
-       .started        = rcu_batches_started,
-       .completed      = rcu_batches_completed,
+       .get_gp_seq     = rcu_get_gp_seq,
+       .gp_diff        = rcu_seq_diff,
        .exp_completed  = rcu_exp_batches_completed,
        .async          = call_rcu,
        .gp_barrier     = rcu_barrier,
@@ -206,8 +209,8 @@ static struct rcu_perf_ops rcu_bh_ops = {
        .init           = rcu_sync_perf_init,
        .readlock       = rcu_bh_perf_read_lock,
        .readunlock     = rcu_bh_perf_read_unlock,
-       .started        = rcu_batches_started_bh,
-       .completed      = rcu_batches_completed_bh,
+       .get_gp_seq     = rcu_bh_get_gp_seq,
+       .gp_diff        = rcu_seq_diff,
        .exp_completed  = rcu_exp_batches_completed_sched,
        .async          = call_rcu_bh,
        .gp_barrier     = rcu_barrier_bh,
@@ -263,8 +266,8 @@ static struct rcu_perf_ops srcu_ops = {
        .init           = rcu_sync_perf_init,
        .readlock       = srcu_perf_read_lock,
        .readunlock     = srcu_perf_read_unlock,
-       .started        = NULL,
-       .completed      = srcu_perf_completed,
+       .get_gp_seq     = srcu_perf_completed,
+       .gp_diff        = rcu_seq_diff,
        .exp_completed  = srcu_perf_completed,
        .async          = srcu_call_rcu,
        .gp_barrier     = srcu_rcu_barrier,
@@ -292,8 +295,8 @@ static struct rcu_perf_ops srcud_ops = {
        .cleanup        = srcu_sync_perf_cleanup,
        .readlock       = srcu_perf_read_lock,
        .readunlock     = srcu_perf_read_unlock,
-       .started        = NULL,
-       .completed      = srcu_perf_completed,
+       .get_gp_seq     = srcu_perf_completed,
+       .gp_diff        = rcu_seq_diff,
        .exp_completed  = srcu_perf_completed,
        .async          = srcu_call_rcu,
        .gp_barrier     = srcu_rcu_barrier,
@@ -322,8 +325,8 @@ static struct rcu_perf_ops sched_ops = {
        .init           = rcu_sync_perf_init,
        .readlock       = sched_perf_read_lock,
        .readunlock     = sched_perf_read_unlock,
-       .started        = rcu_batches_started_sched,
-       .completed      = rcu_batches_completed_sched,
+       .get_gp_seq     = rcu_sched_get_gp_seq,
+       .gp_diff        = rcu_seq_diff,
        .exp_completed  = rcu_exp_batches_completed_sched,
        .async          = call_rcu_sched,
        .gp_barrier     = rcu_barrier_sched,
@@ -350,8 +353,8 @@ static struct rcu_perf_ops tasks_ops = {
        .init           = rcu_sync_perf_init,
        .readlock       = tasks_perf_read_lock,
        .readunlock     = tasks_perf_read_unlock,
-       .started        = rcu_no_completed,
-       .completed      = rcu_no_completed,
+       .get_gp_seq     = rcu_no_completed,
+       .gp_diff        = rcu_seq_diff,
        .async          = call_rcu_tasks,
        .gp_barrier     = rcu_barrier_tasks,
        .sync           = synchronize_rcu_tasks,
@@ -359,9 +362,11 @@ static struct rcu_perf_ops tasks_ops = {
        .name           = "tasks"
 };
 
-static bool __maybe_unused torturing_tasks(void)
+static unsigned long rcuperf_seq_diff(unsigned long new, unsigned long old)
 {
-       return cur_ops == &tasks_ops;
+       if (!cur_ops->gp_diff)
+               return new - old;
+       return cur_ops->gp_diff(new, old);
 }
 
 /*
@@ -444,8 +449,7 @@ rcu_perf_writer(void *arg)
                        b_rcu_perf_writer_started =
                                cur_ops->exp_completed() / 2;
                } else {
-                       b_rcu_perf_writer_started =
-                               cur_ops->completed();
+                       b_rcu_perf_writer_started = cur_ops->get_gp_seq();
                }
        }
 
@@ -502,7 +506,7 @@ retry:
                                                cur_ops->exp_completed() / 2;
                                } else {
                                        b_rcu_perf_writer_finished =
-                                               cur_ops->completed();
+                                               cur_ops->get_gp_seq();
                                }
                                if (shutdown) {
                                        smp_mb(); /* Assign before wake. */
@@ -527,7 +531,7 @@ retry:
        return 0;
 }
 
-static inline void
+static void
 rcu_perf_print_module_parms(struct rcu_perf_ops *cur_ops, const char *tag)
 {
        pr_alert("%s" PERF_FLAG
@@ -582,8 +586,8 @@ rcu_perf_cleanup(void)
                         t_rcu_perf_writer_finished -
                         t_rcu_perf_writer_started,
                         ngps,
-                        b_rcu_perf_writer_finished -
-                        b_rcu_perf_writer_started);
+                        rcuperf_seq_diff(b_rcu_perf_writer_finished,
+                                         b_rcu_perf_writer_started));
                for (i = 0; i < nrealwriters; i++) {
                        if (!writer_durations)
                                break;
@@ -671,12 +675,11 @@ rcu_perf_init(void)
                        break;
        }
        if (i == ARRAY_SIZE(perf_ops)) {
-               pr_alert("rcu-perf: invalid perf type: \"%s\"\n",
-                        perf_type);
+               pr_alert("rcu-perf: invalid perf type: \"%s\"\n", perf_type);
                pr_alert("rcu-perf types:");
                for (i = 0; i < ARRAY_SIZE(perf_ops); i++)
-                       pr_alert(" %s", perf_ops[i]->name);
-               pr_alert("\n");
+                       pr_cont(" %s", perf_ops[i]->name);
+               pr_cont("\n");
                firsterr = -EINVAL;
                goto unwind;
        }
index 42fcb7f05fac27dc6785fab9955db4f3cc2a8efc..c596c6f1e45717af99da5139d3c5078782deafbe 100644 (file)
@@ -22,6 +22,9 @@
  *
  * See also:  Documentation/RCU/torture.txt
  */
+
+#define pr_fmt(fmt) fmt
+
 #include <linux/types.h>
 #include <linux/kernel.h>
 #include <linux/init.h>
@@ -52,6 +55,7 @@
 #include <linux/torture.h>
 #include <linux/vmalloc.h>
 #include <linux/sched/debug.h>
+#include <linux/sched/sysctl.h>
 
 #include "rcu.h"
 
@@ -59,6 +63,19 @@ MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com> and Josh Triplett <josh@joshtriplett.org>");
 
 
+/* Bits for ->extendables field, extendables param, and related definitions. */
+#define RCUTORTURE_RDR_SHIFT    8      /* Put SRCU index in upper bits. */
+#define RCUTORTURE_RDR_MASK     ((1 << RCUTORTURE_RDR_SHIFT) - 1)
+#define RCUTORTURE_RDR_BH       0x1    /* Extend readers by disabling bh. */
+#define RCUTORTURE_RDR_IRQ      0x2    /*  ... disabling interrupts. */
+#define RCUTORTURE_RDR_PREEMPT  0x4    /*  ... disabling preemption. */
+#define RCUTORTURE_RDR_RCU      0x8    /*  ... entering another RCU reader. */
+#define RCUTORTURE_RDR_NBITS    4      /* Number of bits defined above. */
+#define RCUTORTURE_MAX_EXTEND   (RCUTORTURE_RDR_BH | RCUTORTURE_RDR_IRQ | \
+                                 RCUTORTURE_RDR_PREEMPT)
+#define RCUTORTURE_RDR_MAX_LOOPS 0x7   /* Maximum reader extensions. */
+                                       /* Must be power of two minus one. */
+
 torture_param(int, cbflood_inter_holdoff, HZ,
              "Holdoff between floods (jiffies)");
 torture_param(int, cbflood_intra_holdoff, 1,
@@ -66,6 +83,8 @@ torture_param(int, cbflood_intra_holdoff, 1,
 torture_param(int, cbflood_n_burst, 3, "# bursts in flood, zero to disable");
 torture_param(int, cbflood_n_per_burst, 20000,
              "# callbacks per burst in flood");
+torture_param(int, extendables, RCUTORTURE_MAX_EXTEND,
+             "Extend readers by disabling bh (1), irqs (2), or preempt (4)");
 torture_param(int, fqs_duration, 0,
              "Duration of fqs bursts (us), 0 to disable");
 torture_param(int, fqs_holdoff, 0, "Holdoff time within fqs bursts (us)");
@@ -84,7 +103,7 @@ torture_param(int, object_debug, 0,
             "Enable debug-object double call_rcu() testing");
 torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)");
 torture_param(int, onoff_interval, 0,
-            "Time between CPU hotplugs (s), 0=disable");
+            "Time between CPU hotplugs (jiffies), 0=disable");
 torture_param(int, shuffle_interval, 3, "Number of seconds between shuffles");
 torture_param(int, shutdown_secs, 0, "Shutdown time (s), <= zero to disable.");
 torture_param(int, stall_cpu, 0, "Stall duration (s), zero to disable.");
@@ -101,7 +120,7 @@ torture_param(int, test_boost_interval, 7,
             "Interval between boost tests, seconds.");
 torture_param(bool, test_no_idle_hz, true,
             "Test support for tickless idle CPUs");
-torture_param(bool, verbose, true,
+torture_param(int, verbose, 1,
             "Enable verbose debugging printk()s");
 
 static char *torture_type = "rcu";
@@ -148,9 +167,9 @@ static long n_rcu_torture_boost_ktrerror;
 static long n_rcu_torture_boost_rterror;
 static long n_rcu_torture_boost_failure;
 static long n_rcu_torture_boosts;
-static long n_rcu_torture_timers;
+static atomic_long_t n_rcu_torture_timers;
 static long n_barrier_attempts;
-static long n_barrier_successes;
+static long n_barrier_successes; /* did rcu_barrier test succeed? */
 static atomic_long_t n_cbfloods;
 static struct list_head rcu_torture_removed;
 
@@ -261,8 +280,8 @@ struct rcu_torture_ops {
        int (*readlock)(void);
        void (*read_delay)(struct torture_random_state *rrsp);
        void (*readunlock)(int idx);
-       unsigned long (*started)(void);
-       unsigned long (*completed)(void);
+       unsigned long (*get_gp_seq)(void);
+       unsigned long (*gp_diff)(unsigned long new, unsigned long old);
        void (*deferred_free)(struct rcu_torture *p);
        void (*sync)(void);
        void (*exp_sync)(void);
@@ -274,6 +293,8 @@ struct rcu_torture_ops {
        void (*stats)(void);
        int irq_capable;
        int can_boost;
+       int extendables;
+       int ext_irq_conflict;
        const char *name;
 };
 
@@ -302,10 +323,10 @@ static void rcu_read_delay(struct torture_random_state *rrsp)
         * force_quiescent_state. */
 
        if (!(torture_random(rrsp) % (nrealreaders * 2000 * longdelay_ms))) {
-               started = cur_ops->completed();
+               started = cur_ops->get_gp_seq();
                ts = rcu_trace_clock_local();
                mdelay(longdelay_ms);
-               completed = cur_ops->completed();
+               completed = cur_ops->get_gp_seq();
                do_trace_rcu_torture_read(cur_ops->name, NULL, ts,
                                          started, completed);
        }
@@ -397,8 +418,8 @@ static struct rcu_torture_ops rcu_ops = {
        .readlock       = rcu_torture_read_lock,
        .read_delay     = rcu_read_delay,
        .readunlock     = rcu_torture_read_unlock,
-       .started        = rcu_batches_started,
-       .completed      = rcu_batches_completed,
+       .get_gp_seq     = rcu_get_gp_seq,
+       .gp_diff        = rcu_seq_diff,
        .deferred_free  = rcu_torture_deferred_free,
        .sync           = synchronize_rcu,
        .exp_sync       = synchronize_rcu_expedited,
@@ -439,8 +460,8 @@ static struct rcu_torture_ops rcu_bh_ops = {
        .readlock       = rcu_bh_torture_read_lock,
        .read_delay     = rcu_read_delay,  /* just reuse rcu's version. */
        .readunlock     = rcu_bh_torture_read_unlock,
-       .started        = rcu_batches_started_bh,
-       .completed      = rcu_batches_completed_bh,
+       .get_gp_seq     = rcu_bh_get_gp_seq,
+       .gp_diff        = rcu_seq_diff,
        .deferred_free  = rcu_bh_torture_deferred_free,
        .sync           = synchronize_rcu_bh,
        .exp_sync       = synchronize_rcu_bh_expedited,
@@ -449,6 +470,8 @@ static struct rcu_torture_ops rcu_bh_ops = {
        .fqs            = rcu_bh_force_quiescent_state,
        .stats          = NULL,
        .irq_capable    = 1,
+       .extendables    = (RCUTORTURE_RDR_BH | RCUTORTURE_RDR_IRQ),
+       .ext_irq_conflict = RCUTORTURE_RDR_RCU,
        .name           = "rcu_bh"
 };
 
@@ -483,8 +506,7 @@ static struct rcu_torture_ops rcu_busted_ops = {
        .readlock       = rcu_torture_read_lock,
        .read_delay     = rcu_read_delay,  /* just reuse rcu's version. */
        .readunlock     = rcu_torture_read_unlock,
-       .started        = rcu_no_completed,
-       .completed      = rcu_no_completed,
+       .get_gp_seq     = rcu_no_completed,
        .deferred_free  = rcu_busted_torture_deferred_free,
        .sync           = synchronize_rcu_busted,
        .exp_sync       = synchronize_rcu_busted,
@@ -572,8 +594,7 @@ static struct rcu_torture_ops srcu_ops = {
        .readlock       = srcu_torture_read_lock,
        .read_delay     = srcu_read_delay,
        .readunlock     = srcu_torture_read_unlock,
-       .started        = NULL,
-       .completed      = srcu_torture_completed,
+       .get_gp_seq     = srcu_torture_completed,
        .deferred_free  = srcu_torture_deferred_free,
        .sync           = srcu_torture_synchronize,
        .exp_sync       = srcu_torture_synchronize_expedited,
@@ -610,8 +631,7 @@ static struct rcu_torture_ops srcud_ops = {
        .readlock       = srcu_torture_read_lock,
        .read_delay     = srcu_read_delay,
        .readunlock     = srcu_torture_read_unlock,
-       .started        = NULL,
-       .completed      = srcu_torture_completed,
+       .get_gp_seq     = srcu_torture_completed,
        .deferred_free  = srcu_torture_deferred_free,
        .sync           = srcu_torture_synchronize,
        .exp_sync       = srcu_torture_synchronize_expedited,
@@ -622,6 +642,26 @@ static struct rcu_torture_ops srcud_ops = {
        .name           = "srcud"
 };
 
+/* As above, but broken due to inappropriate reader extension. */
+static struct rcu_torture_ops busted_srcud_ops = {
+       .ttype          = SRCU_FLAVOR,
+       .init           = srcu_torture_init,
+       .cleanup        = srcu_torture_cleanup,
+       .readlock       = srcu_torture_read_lock,
+       .read_delay     = rcu_read_delay,
+       .readunlock     = srcu_torture_read_unlock,
+       .get_gp_seq     = srcu_torture_completed,
+       .deferred_free  = srcu_torture_deferred_free,
+       .sync           = srcu_torture_synchronize,
+       .exp_sync       = srcu_torture_synchronize_expedited,
+       .call           = srcu_torture_call,
+       .cb_barrier     = srcu_torture_barrier,
+       .stats          = srcu_torture_stats,
+       .irq_capable    = 1,
+       .extendables    = RCUTORTURE_MAX_EXTEND,
+       .name           = "busted_srcud"
+};
+
 /*
  * Definitions for sched torture testing.
  */
@@ -648,8 +688,8 @@ static struct rcu_torture_ops sched_ops = {
        .readlock       = sched_torture_read_lock,
        .read_delay     = rcu_read_delay,  /* just reuse rcu's version. */
        .readunlock     = sched_torture_read_unlock,
-       .started        = rcu_batches_started_sched,
-       .completed      = rcu_batches_completed_sched,
+       .get_gp_seq     = rcu_sched_get_gp_seq,
+       .gp_diff        = rcu_seq_diff,
        .deferred_free  = rcu_sched_torture_deferred_free,
        .sync           = synchronize_sched,
        .exp_sync       = synchronize_sched_expedited,
@@ -660,6 +700,7 @@ static struct rcu_torture_ops sched_ops = {
        .fqs            = rcu_sched_force_quiescent_state,
        .stats          = NULL,
        .irq_capable    = 1,
+       .extendables    = RCUTORTURE_MAX_EXTEND,
        .name           = "sched"
 };
 
@@ -687,8 +728,7 @@ static struct rcu_torture_ops tasks_ops = {
        .readlock       = tasks_torture_read_lock,
        .read_delay     = rcu_read_delay,  /* just reuse rcu's version. */
        .readunlock     = tasks_torture_read_unlock,
-       .started        = rcu_no_completed,
-       .completed      = rcu_no_completed,
+       .get_gp_seq     = rcu_no_completed,
        .deferred_free  = rcu_tasks_torture_deferred_free,
        .sync           = synchronize_rcu_tasks,
        .exp_sync       = synchronize_rcu_tasks,
@@ -700,6 +740,13 @@ static struct rcu_torture_ops tasks_ops = {
        .name           = "tasks"
 };
 
+static unsigned long rcutorture_seq_diff(unsigned long new, unsigned long old)
+{
+       if (!cur_ops->gp_diff)
+               return new - old;
+       return cur_ops->gp_diff(new, old);
+}
+
 static bool __maybe_unused torturing_tasks(void)
 {
        return cur_ops == &tasks_ops;
@@ -726,6 +773,44 @@ static void rcu_torture_boost_cb(struct rcu_head *head)
        smp_store_release(&rbip->inflight, 0);
 }
 
+static int old_rt_runtime = -1;
+
+static void rcu_torture_disable_rt_throttle(void)
+{
+       /*
+        * Disable RT throttling so that rcutorture's boost threads don't get
+        * throttled. Only possible if rcutorture is built-in otherwise the
+        * user should manually do this by setting the sched_rt_period_us and
+        * sched_rt_runtime sysctls.
+        */
+       if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime != -1)
+               return;
+
+       old_rt_runtime = sysctl_sched_rt_runtime;
+       sysctl_sched_rt_runtime = -1;
+}
+
+static void rcu_torture_enable_rt_throttle(void)
+{
+       if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime == -1)
+               return;
+
+       sysctl_sched_rt_runtime = old_rt_runtime;
+       old_rt_runtime = -1;
+}
+
+static bool rcu_torture_boost_failed(unsigned long start, unsigned long end)
+{
+       if (end - start > test_boost_duration * HZ - HZ / 2) {
+               VERBOSE_TOROUT_STRING("rcu_torture_boost boosting failed");
+               n_rcu_torture_boost_failure++;
+
+               return true; /* failed */
+       }
+
+       return false; /* passed */
+}
+
 static int rcu_torture_boost(void *arg)
 {
        unsigned long call_rcu_time;
@@ -746,6 +831,21 @@ static int rcu_torture_boost(void *arg)
        init_rcu_head_on_stack(&rbi.rcu);
        /* Each pass through the following loop does one boost-test cycle. */
        do {
+               /* Track if the test failed already in this test interval? */
+               bool failed = false;
+
+               /* Increment n_rcu_torture_boosts once per boost-test */
+               while (!kthread_should_stop()) {
+                       if (mutex_trylock(&boost_mutex)) {
+                               n_rcu_torture_boosts++;
+                               mutex_unlock(&boost_mutex);
+                               break;
+                       }
+                       schedule_timeout_uninterruptible(1);
+               }
+               if (kthread_should_stop())
+                       goto checkwait;
+
                /* Wait for the next test interval. */
                oldstarttime = boost_starttime;
                while (ULONG_CMP_LT(jiffies, oldstarttime)) {
@@ -764,11 +864,10 @@ static int rcu_torture_boost(void *arg)
                                /* RCU core before ->inflight = 1. */
                                smp_store_release(&rbi.inflight, 1);
                                call_rcu(&rbi.rcu, rcu_torture_boost_cb);
-                               if (jiffies - call_rcu_time >
-                                        test_boost_duration * HZ - HZ / 2) {
-                                       VERBOSE_TOROUT_STRING("rcu_torture_boost boosting failed");
-                                       n_rcu_torture_boost_failure++;
-                               }
+                               /* Check if the boost test failed */
+                               failed = failed ||
+                                        rcu_torture_boost_failed(call_rcu_time,
+                                                                jiffies);
                                call_rcu_time = jiffies;
                        }
                        stutter_wait("rcu_torture_boost");
@@ -776,6 +875,14 @@ static int rcu_torture_boost(void *arg)
                                goto checkwait;
                }
 
+               /*
+                * If boost never happened, then inflight will always be 1, in
+                * this case the boost check would never happen in the above
+                * loop so do another one here.
+                */
+               if (!failed && smp_load_acquire(&rbi.inflight))
+                       rcu_torture_boost_failed(call_rcu_time, jiffies);
+
                /*
                 * Set the start time of the next test interval.
                 * Yes, this is vulnerable to long delays, but such
@@ -788,7 +895,6 @@ static int rcu_torture_boost(void *arg)
                        if (mutex_trylock(&boost_mutex)) {
                                boost_starttime = jiffies +
                                                  test_boost_interval * HZ;
-                               n_rcu_torture_boosts++;
                                mutex_unlock(&boost_mutex);
                                break;
                        }
@@ -1010,7 +1116,7 @@ rcu_torture_writer(void *arg)
                                break;
                        }
                }
-               rcutorture_record_progress(++rcu_torture_current_version);
+               rcu_torture_current_version++;
                /* Cycle through nesting levels of rcu_expedite_gp() calls. */
                if (can_expedite &&
                    !(torture_random(&rand) & 0xff & (!!expediting - 1))) {
@@ -1084,27 +1190,133 @@ static void rcu_torture_timer_cb(struct rcu_head *rhp)
 }
 
 /*
- * RCU torture reader from timer handler.  Dereferences rcu_torture_current,
- * incrementing the corresponding element of the pipeline array.  The
- * counter in the element should never be greater than 1, otherwise, the
- * RCU implementation is broken.
+ * Do one extension of an RCU read-side critical section using the
+ * current reader state in readstate (set to zero for initial entry
+ * to extended critical section), set the new state as specified by
+ * newstate (set to zero for final exit from extended critical section),
+ * and random-number-generator state in trsp.  If this is neither the
+ * beginning or end of the critical section and if there was actually a
+ * change, do a ->read_delay().
  */
-static void rcu_torture_timer(struct timer_list *unused)
+static void rcutorture_one_extend(int *readstate, int newstate,
+                                 struct torture_random_state *trsp)
+{
+       int idxnew = -1;
+       int idxold = *readstate;
+       int statesnew = ~*readstate & newstate;
+       int statesold = *readstate & ~newstate;
+
+       WARN_ON_ONCE(idxold < 0);
+       WARN_ON_ONCE((idxold >> RCUTORTURE_RDR_SHIFT) > 1);
+
+       /* First, put new protection in place to avoid critical-section gap. */
+       if (statesnew & RCUTORTURE_RDR_BH)
+               local_bh_disable();
+       if (statesnew & RCUTORTURE_RDR_IRQ)
+               local_irq_disable();
+       if (statesnew & RCUTORTURE_RDR_PREEMPT)
+               preempt_disable();
+       if (statesnew & RCUTORTURE_RDR_RCU)
+               idxnew = cur_ops->readlock() << RCUTORTURE_RDR_SHIFT;
+
+       /* Next, remove old protection, irq first due to bh conflict. */
+       if (statesold & RCUTORTURE_RDR_IRQ)
+               local_irq_enable();
+       if (statesold & RCUTORTURE_RDR_BH)
+               local_bh_enable();
+       if (statesold & RCUTORTURE_RDR_PREEMPT)
+               preempt_enable();
+       if (statesold & RCUTORTURE_RDR_RCU)
+               cur_ops->readunlock(idxold >> RCUTORTURE_RDR_SHIFT);
+
+       /* Delay if neither beginning nor end and there was a change. */
+       if ((statesnew || statesold) && *readstate && newstate)
+               cur_ops->read_delay(trsp);
+
+       /* Update the reader state. */
+       if (idxnew == -1)
+               idxnew = idxold & ~RCUTORTURE_RDR_MASK;
+       WARN_ON_ONCE(idxnew < 0);
+       WARN_ON_ONCE((idxnew >> RCUTORTURE_RDR_SHIFT) > 1);
+       *readstate = idxnew | newstate;
+       WARN_ON_ONCE((*readstate >> RCUTORTURE_RDR_SHIFT) < 0);
+       WARN_ON_ONCE((*readstate >> RCUTORTURE_RDR_SHIFT) > 1);
+}
+
+/* Return the biggest extendables mask given current RCU and boot parameters. */
+static int rcutorture_extend_mask_max(void)
+{
+       int mask;
+
+       WARN_ON_ONCE(extendables & ~RCUTORTURE_MAX_EXTEND);
+       mask = extendables & RCUTORTURE_MAX_EXTEND & cur_ops->extendables;
+       mask = mask | RCUTORTURE_RDR_RCU;
+       return mask;
+}
+
+/* Return a random protection state mask, but with at least one bit set. */
+static int
+rcutorture_extend_mask(int oldmask, struct torture_random_state *trsp)
+{
+       int mask = rcutorture_extend_mask_max();
+       unsigned long randmask1 = torture_random(trsp) >> 8;
+       unsigned long randmask2 = randmask1 >> 1;
+
+       WARN_ON_ONCE(mask >> RCUTORTURE_RDR_SHIFT);
+       /* Half the time lots of bits, half the time only one bit. */
+       if (randmask1 & 0x1)
+               mask = mask & randmask2;
+       else
+               mask = mask & (1 << (randmask2 % RCUTORTURE_RDR_NBITS));
+       if ((mask & RCUTORTURE_RDR_IRQ) &&
+           !(mask & RCUTORTURE_RDR_BH) &&
+           (oldmask & RCUTORTURE_RDR_BH))
+               mask |= RCUTORTURE_RDR_BH; /* Can't enable bh w/irq disabled. */
+       if ((mask & RCUTORTURE_RDR_IRQ) &&
+           !(mask & cur_ops->ext_irq_conflict) &&
+           (oldmask & cur_ops->ext_irq_conflict))
+               mask |= cur_ops->ext_irq_conflict; /* Or if readers object. */
+       return mask ?: RCUTORTURE_RDR_RCU;
+}
+
+/*
+ * Do a randomly selected number of extensions of an existing RCU read-side
+ * critical section.
+ */
+static void rcutorture_loop_extend(int *readstate,
+                                  struct torture_random_state *trsp)
+{
+       int i;
+       int mask = rcutorture_extend_mask_max();
+
+       WARN_ON_ONCE(!*readstate); /* -Existing- RCU read-side critsect! */
+       if (!((mask - 1) & mask))
+               return;  /* Current RCU flavor not extendable. */
+       i = (torture_random(trsp) >> 3) & RCUTORTURE_RDR_MAX_LOOPS;
+       while (i--) {
+               mask = rcutorture_extend_mask(*readstate, trsp);
+               rcutorture_one_extend(readstate, mask, trsp);
+       }
+}
+
+/*
+ * Do one read-side critical section, returning false if there was
+ * no data to read.  Can be invoked both from process context and
+ * from a timer handler.
+ */
+static bool rcu_torture_one_read(struct torture_random_state *trsp)
 {
-       int idx;
        unsigned long started;
        unsigned long completed;
-       static DEFINE_TORTURE_RANDOM(rand);
-       static DEFINE_SPINLOCK(rand_lock);
+       int newstate;
        struct rcu_torture *p;
        int pipe_count;
+       int readstate = 0;
        unsigned long long ts;
 
-       idx = cur_ops->readlock();
-       if (cur_ops->started)
-               started = cur_ops->started();
-       else
-               started = cur_ops->completed();
+       newstate = rcutorture_extend_mask(readstate, trsp);
+       rcutorture_one_extend(&readstate, newstate, trsp);
+       started = cur_ops->get_gp_seq();
        ts = rcu_trace_clock_local();
        p = rcu_dereference_check(rcu_torture_current,
                                  rcu_read_lock_bh_held() ||
@@ -1112,39 +1324,50 @@ static void rcu_torture_timer(struct timer_list *unused)
                                  srcu_read_lock_held(srcu_ctlp) ||
                                  torturing_tasks());
        if (p == NULL) {
-               /* Leave because rcu_torture_writer is not yet underway */
-               cur_ops->readunlock(idx);
-               return;
+               /* Wait for rcu_torture_writer to get underway */
+               rcutorture_one_extend(&readstate, 0, trsp);
+               return false;
        }
        if (p->rtort_mbtest == 0)
                atomic_inc(&n_rcu_torture_mberror);
-       spin_lock(&rand_lock);
-       cur_ops->read_delay(&rand);
-       n_rcu_torture_timers++;
-       spin_unlock(&rand_lock);
+       rcutorture_loop_extend(&readstate, trsp);
        preempt_disable();
        pipe_count = p->rtort_pipe_count;
        if (pipe_count > RCU_TORTURE_PIPE_LEN) {
                /* Should not happen, but... */
                pipe_count = RCU_TORTURE_PIPE_LEN;
        }
-       completed = cur_ops->completed();
+       completed = cur_ops->get_gp_seq();
        if (pipe_count > 1) {
-               do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu, ts,
-                                         started, completed);
+               do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu,
+                                         ts, started, completed);
                rcu_ftrace_dump(DUMP_ALL);
        }
        __this_cpu_inc(rcu_torture_count[pipe_count]);
-       completed = completed - started;
-       if (cur_ops->started)
-               completed++;
+       completed = rcutorture_seq_diff(completed, started);
        if (completed > RCU_TORTURE_PIPE_LEN) {
                /* Should not happen, but... */
                completed = RCU_TORTURE_PIPE_LEN;
        }
        __this_cpu_inc(rcu_torture_batch[completed]);
        preempt_enable();
-       cur_ops->readunlock(idx);
+       rcutorture_one_extend(&readstate, 0, trsp);
+       WARN_ON_ONCE(readstate & RCUTORTURE_RDR_MASK);
+       return true;
+}
+
+static DEFINE_TORTURE_RANDOM_PERCPU(rcu_torture_timer_rand);
+
+/*
+ * RCU torture reader from timer handler.  Dereferences rcu_torture_current,
+ * incrementing the corresponding element of the pipeline array.  The
+ * counter in the element should never be greater than 1, otherwise, the
+ * RCU implementation is broken.
+ */
+static void rcu_torture_timer(struct timer_list *unused)
+{
+       atomic_long_inc(&n_rcu_torture_timers);
+       (void)rcu_torture_one_read(this_cpu_ptr(&rcu_torture_timer_rand));
 
        /* Test call_rcu() invocation from interrupt handler. */
        if (cur_ops->call) {
@@ -1164,14 +1387,8 @@ static void rcu_torture_timer(struct timer_list *unused)
 static int
 rcu_torture_reader(void *arg)
 {
-       unsigned long started;
-       unsigned long completed;
-       int idx;
        DEFINE_TORTURE_RANDOM(rand);
-       struct rcu_torture *p;
-       int pipe_count;
        struct timer_list t;
-       unsigned long long ts;
 
        VERBOSE_TOROUT_STRING("rcu_torture_reader task started");
        set_user_nice(current, MAX_NICE);
@@ -1183,49 +1400,8 @@ rcu_torture_reader(void *arg)
                        if (!timer_pending(&t))
                                mod_timer(&t, jiffies + 1);
                }
-               idx = cur_ops->readlock();
-               if (cur_ops->started)
-                       started = cur_ops->started();
-               else
-                       started = cur_ops->completed();
-               ts = rcu_trace_clock_local();
-               p = rcu_dereference_check(rcu_torture_current,
-                                         rcu_read_lock_bh_held() ||
-                                         rcu_read_lock_sched_held() ||
-                                         srcu_read_lock_held(srcu_ctlp) ||
-                                         torturing_tasks());
-               if (p == NULL) {
-                       /* Wait for rcu_torture_writer to get underway */
-                       cur_ops->readunlock(idx);
+               if (!rcu_torture_one_read(&rand))
                        schedule_timeout_interruptible(HZ);
-                       continue;
-               }
-               if (p->rtort_mbtest == 0)
-                       atomic_inc(&n_rcu_torture_mberror);
-               cur_ops->read_delay(&rand);
-               preempt_disable();
-               pipe_count = p->rtort_pipe_count;
-               if (pipe_count > RCU_TORTURE_PIPE_LEN) {
-                       /* Should not happen, but... */
-                       pipe_count = RCU_TORTURE_PIPE_LEN;
-               }
-               completed = cur_ops->completed();
-               if (pipe_count > 1) {
-                       do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu,
-                                                 ts, started, completed);
-                       rcu_ftrace_dump(DUMP_ALL);
-               }
-               __this_cpu_inc(rcu_torture_count[pipe_count]);
-               completed = completed - started;
-               if (cur_ops->started)
-                       completed++;
-               if (completed > RCU_TORTURE_PIPE_LEN) {
-                       /* Should not happen, but... */
-                       completed = RCU_TORTURE_PIPE_LEN;
-               }
-               __this_cpu_inc(rcu_torture_batch[completed]);
-               preempt_enable();
-               cur_ops->readunlock(idx);
                stutter_wait("rcu_torture_reader");
        } while (!torture_must_stop());
        if (irqreader && cur_ops->irq_capable) {
@@ -1282,7 +1458,7 @@ rcu_torture_stats_print(void)
        pr_cont("rtbf: %ld rtb: %ld nt: %ld ",
                n_rcu_torture_boost_failure,
                n_rcu_torture_boosts,
-               n_rcu_torture_timers);
+               atomic_long_read(&n_rcu_torture_timers));
        torture_onoff_stats();
        pr_cont("barrier: %ld/%ld:%ld ",
                n_barrier_successes,
@@ -1324,18 +1500,16 @@ rcu_torture_stats_print(void)
        if (rtcv_snap == rcu_torture_current_version &&
            rcu_torture_current != NULL) {
                int __maybe_unused flags = 0;
-               unsigned long __maybe_unused gpnum = 0;
-               unsigned long __maybe_unused completed = 0;
+               unsigned long __maybe_unused gp_seq = 0;
 
                rcutorture_get_gp_data(cur_ops->ttype,
-                                      &flags, &gpnum, &completed);
+                                      &flags, &gp_seq);
                srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp,
-                                       &flags, &gpnum, &completed);
+                                       &flags, &gp_seq);
                wtp = READ_ONCE(writer_task);
-               pr_alert("??? Writer stall state %s(%d) g%lu c%lu f%#x ->state %#lx cpu %d\n",
+               pr_alert("??? Writer stall state %s(%d) g%lu f%#x ->state %#lx cpu %d\n",
                         rcu_torture_writer_state_getname(),
-                        rcu_torture_writer_state,
-                        gpnum, completed, flags,
+                        rcu_torture_writer_state, gp_seq, flags,
                         wtp == NULL ? ~0UL : wtp->state,
                         wtp == NULL ? -1 : (int)task_cpu(wtp));
                if (!splatted && wtp) {
@@ -1365,7 +1539,7 @@ rcu_torture_stats(void *arg)
        return 0;
 }
 
-static inline void
+static void
 rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, const char *tag)
 {
        pr_alert("%s" TORTURE_FLAG
@@ -1397,6 +1571,7 @@ static int rcutorture_booster_cleanup(unsigned int cpu)
        mutex_lock(&boost_mutex);
        t = boost_tasks[cpu];
        boost_tasks[cpu] = NULL;
+       rcu_torture_enable_rt_throttle();
        mutex_unlock(&boost_mutex);
 
        /* This must be outside of the mutex, otherwise deadlock! */
@@ -1413,6 +1588,7 @@ static int rcutorture_booster_init(unsigned int cpu)
 
        /* Don't allow time recalculation while creating a new task. */
        mutex_lock(&boost_mutex);
+       rcu_torture_disable_rt_throttle();
        VERBOSE_TOROUT_STRING("Creating rcu_torture_boost task");
        boost_tasks[cpu] = kthread_create_on_node(rcu_torture_boost, NULL,
                                                  cpu_to_node(cpu),
@@ -1446,7 +1622,7 @@ static int rcu_torture_stall(void *args)
                VERBOSE_TOROUT_STRING("rcu_torture_stall end holdoff");
        }
        if (!kthread_should_stop()) {
-               stop_at = get_seconds() + stall_cpu;
+               stop_at = ktime_get_seconds() + stall_cpu;
                /* RCU CPU stall is expected behavior in following code. */
                rcu_read_lock();
                if (stall_cpu_irqsoff)
@@ -1455,7 +1631,8 @@ static int rcu_torture_stall(void *args)
                        preempt_disable();
                pr_alert("rcu_torture_stall start on CPU %d.\n",
                         smp_processor_id());
-               while (ULONG_CMP_LT(get_seconds(), stop_at))
+               while (ULONG_CMP_LT((unsigned long)ktime_get_seconds(),
+                                   stop_at))
                        continue;  /* Induce RCU CPU stall warning. */
                if (stall_cpu_irqsoff)
                        local_irq_enable();
@@ -1546,8 +1723,9 @@ static int rcu_torture_barrier(void *arg)
                               atomic_read(&barrier_cbs_invoked),
                               n_barrier_cbs);
                        WARN_ON_ONCE(1);
+               } else {
+                       n_barrier_successes++;
                }
-               n_barrier_successes++;
                schedule_timeout_interruptible(HZ / 10);
        } while (!torture_must_stop());
        torture_kthread_stopping("rcu_torture_barrier");
@@ -1610,17 +1788,39 @@ static void rcu_torture_barrier_cleanup(void)
        }
 }
 
+static bool rcu_torture_can_boost(void)
+{
+       static int boost_warn_once;
+       int prio;
+
+       if (!(test_boost == 1 && cur_ops->can_boost) && test_boost != 2)
+               return false;
+
+       prio = rcu_get_gp_kthreads_prio();
+       if (!prio)
+               return false;
+
+       if (prio < 2) {
+               if (boost_warn_once  == 1)
+                       return false;
+
+               pr_alert("%s: WARN: RCU kthread priority too low to test boosting.  Skipping RCU boost test. Try passing rcutree.kthread_prio > 1 on the kernel command line.\n", KBUILD_MODNAME);
+               boost_warn_once = 1;
+               return false;
+       }
+
+       return true;
+}
+
 static enum cpuhp_state rcutor_hp;
 
 static void
 rcu_torture_cleanup(void)
 {
        int flags = 0;
-       unsigned long gpnum = 0;
-       unsigned long completed = 0;
+       unsigned long gp_seq = 0;
        int i;
 
-       rcutorture_record_test_transition();
        if (torture_cleanup_begin()) {
                if (cur_ops->cb_barrier != NULL)
                        cur_ops->cb_barrier();
@@ -1648,17 +1848,15 @@ rcu_torture_cleanup(void)
                fakewriter_tasks = NULL;
        }
 
-       rcutorture_get_gp_data(cur_ops->ttype, &flags, &gpnum, &completed);
-       srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp,
-                               &flags, &gpnum, &completed);
-       pr_alert("%s:  End-test grace-period state: g%lu c%lu f%#x\n",
-                cur_ops->name, gpnum, completed, flags);
+       rcutorture_get_gp_data(cur_ops->ttype, &flags, &gp_seq);
+       srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp, &flags, &gp_seq);
+       pr_alert("%s:  End-test grace-period state: g%lu f%#x\n",
+                cur_ops->name, gp_seq, flags);
        torture_stop_kthread(rcu_torture_stats, stats_task);
        torture_stop_kthread(rcu_torture_fqs, fqs_task);
        for (i = 0; i < ncbflooders; i++)
                torture_stop_kthread(rcu_torture_cbflood, cbflood_task[i]);
-       if ((test_boost == 1 && cur_ops->can_boost) ||
-           test_boost == 2)
+       if (rcu_torture_can_boost())
                cpuhp_remove_state(rcutor_hp);
 
        /*
@@ -1746,7 +1944,7 @@ rcu_torture_init(void)
        int firsterr = 0;
        static struct rcu_torture_ops *torture_ops[] = {
                &rcu_ops, &rcu_bh_ops, &rcu_busted_ops, &srcu_ops, &srcud_ops,
-               &sched_ops, &tasks_ops,
+               &busted_srcud_ops, &sched_ops, &tasks_ops,
        };
 
        if (!torture_init_begin(torture_type, verbose))
@@ -1763,8 +1961,8 @@ rcu_torture_init(void)
                         torture_type);
                pr_alert("rcu-torture types:");
                for (i = 0; i < ARRAY_SIZE(torture_ops); i++)
-                       pr_alert(" %s", torture_ops[i]->name);
-               pr_alert("\n");
+                       pr_cont(" %s", torture_ops[i]->name);
+               pr_cont("\n");
                firsterr = -EINVAL;
                goto unwind;
        }
@@ -1882,8 +2080,7 @@ rcu_torture_init(void)
                test_boost_interval = 1;
        if (test_boost_duration < 2)
                test_boost_duration = 2;
-       if ((test_boost == 1 && cur_ops->can_boost) ||
-           test_boost == 2) {
+       if (rcu_torture_can_boost()) {
 
                boost_starttime = jiffies + test_boost_interval * HZ;
 
@@ -1897,7 +2094,7 @@ rcu_torture_init(void)
        firsterr = torture_shutdown_init(shutdown_secs, rcu_torture_cleanup);
        if (firsterr)
                goto unwind;
-       firsterr = torture_onoff_init(onoff_holdoff * HZ, onoff_interval * HZ);
+       firsterr = torture_onoff_init(onoff_holdoff * HZ, onoff_interval);
        if (firsterr)
                goto unwind;
        firsterr = rcu_torture_stall_init();
@@ -1926,7 +2123,6 @@ rcu_torture_init(void)
                                goto unwind;
                }
        }
-       rcutorture_record_test_transition();
        torture_init_end();
        return 0;
 
index 622792abe41a244643dd5ce4f10f48930ff7a5c8..04fc2ed71af8e9d62dae4b1a74499d1da0b25bed 100644 (file)
@@ -110,7 +110,7 @@ void __srcu_read_unlock(struct srcu_struct *sp, int idx)
 
        WRITE_ONCE(sp->srcu_lock_nesting[idx], newval);
        if (!newval && READ_ONCE(sp->srcu_gp_waiting))
-               swake_up(&sp->srcu_wq);
+               swake_up_one(&sp->srcu_wq);
 }
 EXPORT_SYMBOL_GPL(__srcu_read_unlock);
 
@@ -140,7 +140,7 @@ void srcu_drive_gp(struct work_struct *wp)
        idx = sp->srcu_idx;
        WRITE_ONCE(sp->srcu_idx, !sp->srcu_idx);
        WRITE_ONCE(sp->srcu_gp_waiting, true);  /* srcu_read_unlock() wakes! */
-       swait_event(sp->srcu_wq, !READ_ONCE(sp->srcu_lock_nesting[idx]));
+       swait_event_exclusive(sp->srcu_wq, !READ_ONCE(sp->srcu_lock_nesting[idx]));
        WRITE_ONCE(sp->srcu_gp_waiting, false); /* srcu_read_unlock() cheap. */
 
        /* Invoke the callbacks we removed above. */
index b4123d7a2cec4f9010178d9b0049c2e43bbf833b..6c9866a854b111b2ee5245ef2b6ca18af994b4c5 100644 (file)
@@ -26,6 +26,8 @@
  *
  */
 
+#define pr_fmt(fmt) "rcu: " fmt
+
 #include <linux/export.h>
 #include <linux/mutex.h>
 #include <linux/percpu.h>
@@ -390,7 +392,8 @@ void _cleanup_srcu_struct(struct srcu_struct *sp, bool quiesced)
                }
        if (WARN_ON(rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)) != SRCU_STATE_IDLE) ||
            WARN_ON(srcu_readers_active(sp))) {
-               pr_info("%s: Active srcu_struct %p state: %d\n", __func__, sp, rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)));
+               pr_info("%s: Active srcu_struct %p state: %d\n",
+                       __func__, sp, rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)));
                return; /* Caller forgot to stop doing call_srcu()? */
        }
        free_percpu(sp->sda);
@@ -641,6 +644,9 @@ static void srcu_funnel_exp_start(struct srcu_struct *sp, struct srcu_node *snp,
  * period s.  Losers must either ensure that their desired grace-period
  * number is recorded on at least their leaf srcu_node structure, or they
  * must take steps to invoke their own callbacks.
+ *
+ * Note that this function also does the work of srcu_funnel_exp_start(),
+ * in some cases by directly invoking it.
  */
 static void srcu_funnel_gp_start(struct srcu_struct *sp, struct srcu_data *sdp,
                                 unsigned long s, bool do_norm)
@@ -823,17 +829,17 @@ static void srcu_leak_callback(struct rcu_head *rhp)
  * more than one CPU, this means that when "func()" is invoked, each CPU
  * is guaranteed to have executed a full memory barrier since the end of
  * its last corresponding SRCU read-side critical section whose beginning
- * preceded the call to call_rcu().  It also means that each CPU executing
+ * preceded the call to call_srcu().  It also means that each CPU executing
  * an SRCU read-side critical section that continues beyond the start of
- * "func()" must have executed a memory barrier after the call_rcu()
+ * "func()" must have executed a memory barrier after the call_srcu()
  * but before the beginning of that SRCU read-side critical section.
  * Note that these guarantees include CPUs that are offline, idle, or
  * executing in user mode, as well as CPUs that are executing in the kernel.
  *
- * Furthermore, if CPU A invoked call_rcu() and CPU B invoked the
+ * Furthermore, if CPU A invoked call_srcu() and CPU B invoked the
  * resulting SRCU callback function "func()", then both CPU A and CPU
  * B are guaranteed to execute a full memory barrier during the time
- * interval between the call to call_rcu() and the invocation of "func()".
+ * interval between the call to call_srcu() and the invocation of "func()".
  * This guarantee applies even if CPU A and CPU B are the same CPU (but
  * again only if the system has more than one CPU).
  *
@@ -1246,13 +1252,12 @@ static void process_srcu(struct work_struct *work)
 
 void srcutorture_get_gp_data(enum rcutorture_type test_type,
                             struct srcu_struct *sp, int *flags,
-                            unsigned long *gpnum, unsigned long *completed)
+                            unsigned long *gp_seq)
 {
        if (test_type != SRCU_FLAVOR)
                return;
        *flags = 0;
-       *completed = rcu_seq_ctr(sp->srcu_gp_seq);
-       *gpnum = rcu_seq_ctr(sp->srcu_gp_seq_needed);
+       *gp_seq = rcu_seq_current(&sp->srcu_gp_seq);
 }
 EXPORT_SYMBOL_GPL(srcutorture_get_gp_data);
 
@@ -1263,16 +1268,17 @@ void srcu_torture_stats_print(struct srcu_struct *sp, char *tt, char *tf)
        unsigned long s0 = 0, s1 = 0;
 
        idx = sp->srcu_idx & 0x1;
-       pr_alert("%s%s Tree SRCU per-CPU(idx=%d):", tt, tf, idx);
+       pr_alert("%s%s Tree SRCU g%ld per-CPU(idx=%d):",
+                tt, tf, rcu_seq_current(&sp->srcu_gp_seq), idx);
        for_each_possible_cpu(cpu) {
                unsigned long l0, l1;
                unsigned long u0, u1;
                long c0, c1;
-               struct srcu_data *counts;
+               struct srcu_data *sdp;
 
-               counts = per_cpu_ptr(sp->sda, cpu);
-               u0 = counts->srcu_unlock_count[!idx];
-               u1 = counts->srcu_unlock_count[idx];
+               sdp = per_cpu_ptr(sp->sda, cpu);
+               u0 = sdp->srcu_unlock_count[!idx];
+               u1 = sdp->srcu_unlock_count[idx];
 
                /*
                 * Make sure that a lock is always counted if the corresponding
@@ -1280,12 +1286,13 @@ void srcu_torture_stats_print(struct srcu_struct *sp, char *tt, char *tf)
                 */
                smp_rmb();
 
-               l0 = counts->srcu_lock_count[!idx];
-               l1 = counts->srcu_lock_count[idx];
+               l0 = sdp->srcu_lock_count[!idx];
+               l1 = sdp->srcu_lock_count[idx];
 
                c0 = l0 - u0;
                c1 = l1 - u1;
-               pr_cont(" %d(%ld,%ld)", cpu, c0, c1);
+               pr_cont(" %d(%ld,%ld %1p)",
+                       cpu, c0, c1, rcu_segcblist_head(&sdp->srcu_cblist));
                s0 += c0;
                s1 += c1;
        }
index a64eee0db39e3642c33e69c55535619e363c0b55..befc9321a89c22cfbf34d3e8a34e56685ff2801a 100644 (file)
@@ -122,10 +122,8 @@ void rcu_check_callbacks(int user)
 {
        if (user)
                rcu_sched_qs();
-       else if (!in_softirq())
+       if (user || !in_softirq())
                rcu_bh_qs();
-       if (user)
-               rcu_note_voluntary_context_switch(current);
 }
 
 /*
index aa7cade1b9f399abcac792e08a97114305a241ad..0b760c1369f76e758b44b68c2d3384782cdacdcc 100644 (file)
@@ -27,6 +27,9 @@
  * For detailed explanation of Read-Copy Update mechanism see -
  *     Documentation/RCU
  */
+
+#define pr_fmt(fmt) "rcu: " fmt
+
 #include <linux/types.h>
 #include <linux/kernel.h>
 #include <linux/init.h>
@@ -95,13 +98,13 @@ struct rcu_state sname##_state = { \
        .rda = &sname##_data, \
        .call = cr, \
        .gp_state = RCU_GP_IDLE, \
-       .gpnum = 0UL - 300UL, \
-       .completed = 0UL - 300UL, \
+       .gp_seq = (0UL - 300UL) << RCU_SEQ_CTR_SHIFT, \
        .barrier_mutex = __MUTEX_INITIALIZER(sname##_state.barrier_mutex), \
        .name = RCU_STATE_NAME(sname), \
        .abbr = sabbr, \
        .exp_mutex = __MUTEX_INITIALIZER(sname##_state.exp_mutex), \
        .exp_wake_mutex = __MUTEX_INITIALIZER(sname##_state.exp_wake_mutex), \
+       .ofl_lock = __SPIN_LOCK_UNLOCKED(sname##_state.ofl_lock), \
 }
 
 RCU_STATE_INITIALIZER(rcu_sched, 's', call_rcu_sched);
@@ -155,6 +158,9 @@ EXPORT_SYMBOL_GPL(rcu_scheduler_active);
  */
 static int rcu_scheduler_fully_active __read_mostly;
 
+static void
+rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
+                 struct rcu_node *rnp, unsigned long gps, unsigned long flags);
 static void rcu_init_new_rnp(struct rcu_node *rnp_leaf);
 static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf);
 static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu);
@@ -177,6 +183,13 @@ module_param(gp_init_delay, int, 0444);
 static int gp_cleanup_delay;
 module_param(gp_cleanup_delay, int, 0444);
 
+/* Retreive RCU kthreads priority for rcutorture */
+int rcu_get_gp_kthreads_prio(void)
+{
+       return kthread_prio;
+}
+EXPORT_SYMBOL_GPL(rcu_get_gp_kthreads_prio);
+
 /*
  * Number of grace periods between delays, normalized by the duration of
  * the delay.  The longer the delay, the more the grace periods between
@@ -188,18 +201,6 @@ module_param(gp_cleanup_delay, int, 0444);
  */
 #define PER_RCU_NODE_PERIOD 3  /* Number of grace periods between delays. */
 
-/*
- * Track the rcutorture test sequence number and the update version
- * number within a given test.  The rcutorture_testseq is incremented
- * on every rcutorture module load and unload, so has an odd value
- * when a test is running.  The rcutorture_vernum is set to zero
- * when rcutorture starts and is incremented on each rcutorture update.
- * These variables enable correlating rcutorture output with the
- * RCU tracing information.
- */
-unsigned long rcutorture_testseq;
-unsigned long rcutorture_vernum;
-
 /*
  * Compute the mask of online CPUs for the specified rcu_node structure.
  * This will not be stable unless the rcu_node structure's ->lock is
@@ -218,7 +219,7 @@ unsigned long rcu_rnp_online_cpus(struct rcu_node *rnp)
  */
 static int rcu_gp_in_progress(struct rcu_state *rsp)
 {
-       return READ_ONCE(rsp->completed) != READ_ONCE(rsp->gpnum);
+       return rcu_seq_state(rcu_seq_current(&rsp->gp_seq));
 }
 
 /*
@@ -233,7 +234,7 @@ void rcu_sched_qs(void)
        if (!__this_cpu_read(rcu_sched_data.cpu_no_qs.s))
                return;
        trace_rcu_grace_period(TPS("rcu_sched"),
-                              __this_cpu_read(rcu_sched_data.gpnum),
+                              __this_cpu_read(rcu_sched_data.gp_seq),
                               TPS("cpuqs"));
        __this_cpu_write(rcu_sched_data.cpu_no_qs.b.norm, false);
        if (!__this_cpu_read(rcu_sched_data.cpu_no_qs.b.exp))
@@ -248,7 +249,7 @@ void rcu_bh_qs(void)
        RCU_LOCKDEP_WARN(preemptible(), "rcu_bh_qs() invoked with preemption enabled!!!");
        if (__this_cpu_read(rcu_bh_data.cpu_no_qs.s)) {
                trace_rcu_grace_period(TPS("rcu_bh"),
-                                      __this_cpu_read(rcu_bh_data.gpnum),
+                                      __this_cpu_read(rcu_bh_data.gp_seq),
                                       TPS("cpuqs"));
                __this_cpu_write(rcu_bh_data.cpu_no_qs.b.norm, false);
        }
@@ -379,20 +380,6 @@ static bool rcu_dynticks_in_eqs_since(struct rcu_dynticks *rdtp, int snap)
        return snap != rcu_dynticks_snap(rdtp);
 }
 
-/*
- * Do a double-increment of the ->dynticks counter to emulate a
- * momentary idle-CPU quiescent state.
- */
-static void rcu_dynticks_momentary_idle(void)
-{
-       struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
-       int special = atomic_add_return(2 * RCU_DYNTICK_CTRL_CTR,
-                                       &rdtp->dynticks);
-
-       /* It is illegal to call this from idle state. */
-       WARN_ON_ONCE(!(special & RCU_DYNTICK_CTRL_CTR));
-}
-
 /*
  * Set the special (bottom) bit of the specified CPU so that it
  * will take special action (such as flushing its TLB) on the
@@ -424,12 +411,17 @@ bool rcu_eqs_special_set(int cpu)
  *
  * We inform the RCU core by emulating a zero-duration dyntick-idle period.
  *
- * The caller must have disabled interrupts.
+ * The caller must have disabled interrupts and must not be idle.
  */
 static void rcu_momentary_dyntick_idle(void)
 {
+       struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
+       int special;
+
        raw_cpu_write(rcu_dynticks.rcu_need_heavy_qs, false);
-       rcu_dynticks_momentary_idle();
+       special = atomic_add_return(2 * RCU_DYNTICK_CTRL_CTR, &rdtp->dynticks);
+       /* It is illegal to call this from idle state. */
+       WARN_ON_ONCE(!(special & RCU_DYNTICK_CTRL_CTR));
 }
 
 /*
@@ -451,7 +443,7 @@ void rcu_note_context_switch(bool preempt)
                rcu_momentary_dyntick_idle();
        this_cpu_inc(rcu_dynticks.rcu_qs_ctr);
        if (!preempt)
-               rcu_note_voluntary_context_switch_lite(current);
+               rcu_tasks_qs(current);
 out:
        trace_rcu_utilization(TPS("End context switch"));
        barrier(); /* Avoid RCU read-side critical sections leaking up. */
@@ -513,8 +505,38 @@ static ulong jiffies_till_first_fqs = ULONG_MAX;
 static ulong jiffies_till_next_fqs = ULONG_MAX;
 static bool rcu_kick_kthreads;
 
-module_param(jiffies_till_first_fqs, ulong, 0644);
-module_param(jiffies_till_next_fqs, ulong, 0644);
+static int param_set_first_fqs_jiffies(const char *val, const struct kernel_param *kp)
+{
+       ulong j;
+       int ret = kstrtoul(val, 0, &j);
+
+       if (!ret)
+               WRITE_ONCE(*(ulong *)kp->arg, (j > HZ) ? HZ : j);
+       return ret;
+}
+
+static int param_set_next_fqs_jiffies(const char *val, const struct kernel_param *kp)
+{
+       ulong j;
+       int ret = kstrtoul(val, 0, &j);
+
+       if (!ret)
+               WRITE_ONCE(*(ulong *)kp->arg, (j > HZ) ? HZ : (j ?: 1));
+       return ret;
+}
+
+static struct kernel_param_ops first_fqs_jiffies_ops = {
+       .set = param_set_first_fqs_jiffies,
+       .get = param_get_ulong,
+};
+
+static struct kernel_param_ops next_fqs_jiffies_ops = {
+       .set = param_set_next_fqs_jiffies,
+       .get = param_get_ulong,
+};
+
+module_param_cb(jiffies_till_first_fqs, &first_fqs_jiffies_ops, &jiffies_till_first_fqs, 0644);
+module_param_cb(jiffies_till_next_fqs, &next_fqs_jiffies_ops, &jiffies_till_next_fqs, 0644);
 module_param(rcu_kick_kthreads, bool, 0644);
 
 /*
@@ -529,58 +551,31 @@ static void force_quiescent_state(struct rcu_state *rsp);
 static int rcu_pending(void);
 
 /*
- * Return the number of RCU batches started thus far for debug & stats.
+ * Return the number of RCU GPs completed thus far for debug & stats.
  */
-unsigned long rcu_batches_started(void)
+unsigned long rcu_get_gp_seq(void)
 {
-       return rcu_state_p->gpnum;
+       return READ_ONCE(rcu_state_p->gp_seq);
 }
-EXPORT_SYMBOL_GPL(rcu_batches_started);
+EXPORT_SYMBOL_GPL(rcu_get_gp_seq);
 
 /*
- * Return the number of RCU-sched batches started thus far for debug & stats.
+ * Return the number of RCU-sched GPs completed thus far for debug & stats.
  */
-unsigned long rcu_batches_started_sched(void)
+unsigned long rcu_sched_get_gp_seq(void)
 {
-       return rcu_sched_state.gpnum;
+       return READ_ONCE(rcu_sched_state.gp_seq);
 }
-EXPORT_SYMBOL_GPL(rcu_batches_started_sched);
+EXPORT_SYMBOL_GPL(rcu_sched_get_gp_seq);
 
 /*
- * Return the number of RCU BH batches started thus far for debug & stats.
+ * Return the number of RCU-bh GPs completed thus far for debug & stats.
  */
-unsigned long rcu_batches_started_bh(void)
+unsigned long rcu_bh_get_gp_seq(void)
 {
-       return rcu_bh_state.gpnum;
+       return READ_ONCE(rcu_bh_state.gp_seq);
 }
-EXPORT_SYMBOL_GPL(rcu_batches_started_bh);
-
-/*
- * Return the number of RCU batches completed thus far for debug & stats.
- */
-unsigned long rcu_batches_completed(void)
-{
-       return rcu_state_p->completed;
-}
-EXPORT_SYMBOL_GPL(rcu_batches_completed);
-
-/*
- * Return the number of RCU-sched batches completed thus far for debug & stats.
- */
-unsigned long rcu_batches_completed_sched(void)
-{
-       return rcu_sched_state.completed;
-}
-EXPORT_SYMBOL_GPL(rcu_batches_completed_sched);
-
-/*
- * Return the number of RCU BH batches completed thus far for debug & stats.
- */
-unsigned long rcu_batches_completed_bh(void)
-{
-       return rcu_bh_state.completed;
-}
-EXPORT_SYMBOL_GPL(rcu_batches_completed_bh);
+EXPORT_SYMBOL_GPL(rcu_bh_get_gp_seq);
 
 /*
  * Return the number of RCU expedited batches completed thus far for
@@ -636,35 +631,42 @@ EXPORT_SYMBOL_GPL(rcu_sched_force_quiescent_state);
  */
 void show_rcu_gp_kthreads(void)
 {
+       int cpu;
+       struct rcu_data *rdp;
+       struct rcu_node *rnp;
        struct rcu_state *rsp;
 
        for_each_rcu_flavor(rsp) {
                pr_info("%s: wait state: %d ->state: %#lx\n",
                        rsp->name, rsp->gp_state, rsp->gp_kthread->state);
+               rcu_for_each_node_breadth_first(rsp, rnp) {
+                       if (ULONG_CMP_GE(rsp->gp_seq, rnp->gp_seq_needed))
+                               continue;
+                       pr_info("\trcu_node %d:%d ->gp_seq %lu ->gp_seq_needed %lu\n",
+                               rnp->grplo, rnp->grphi, rnp->gp_seq,
+                               rnp->gp_seq_needed);
+                       if (!rcu_is_leaf_node(rnp))
+                               continue;
+                       for_each_leaf_node_possible_cpu(rnp, cpu) {
+                               rdp = per_cpu_ptr(rsp->rda, cpu);
+                               if (rdp->gpwrap ||
+                                   ULONG_CMP_GE(rsp->gp_seq,
+                                                rdp->gp_seq_needed))
+                                       continue;
+                               pr_info("\tcpu %d ->gp_seq_needed %lu\n",
+                                       cpu, rdp->gp_seq_needed);
+                       }
+               }
                /* sched_show_task(rsp->gp_kthread); */
        }
 }
 EXPORT_SYMBOL_GPL(show_rcu_gp_kthreads);
 
-/*
- * Record the number of times rcutorture tests have been initiated and
- * terminated.  This information allows the debugfs tracing stats to be
- * correlated to the rcutorture messages, even when the rcutorture module
- * is being repeatedly loaded and unloaded.  In other words, we cannot
- * store this state in rcutorture itself.
- */
-void rcutorture_record_test_transition(void)
-{
-       rcutorture_testseq++;
-       rcutorture_vernum = 0;
-}
-EXPORT_SYMBOL_GPL(rcutorture_record_test_transition);
-
 /*
  * Send along grace-period-related data for rcutorture diagnostics.
  */
 void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,
-                           unsigned long *gpnum, unsigned long *completed)
+                           unsigned long *gp_seq)
 {
        struct rcu_state *rsp = NULL;
 
@@ -684,22 +686,10 @@ void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,
        if (rsp == NULL)
                return;
        *flags = READ_ONCE(rsp->gp_flags);
-       *gpnum = READ_ONCE(rsp->gpnum);
-       *completed = READ_ONCE(rsp->completed);
+       *gp_seq = rcu_seq_current(&rsp->gp_seq);
 }
 EXPORT_SYMBOL_GPL(rcutorture_get_gp_data);
 
-/*
- * Record the number of writer passes through the current rcutorture test.
- * This is also used to correlate debugfs tracing stats with the rcutorture
- * messages.
- */
-void rcutorture_record_progress(unsigned long vernum)
-{
-       rcutorture_vernum++;
-}
-EXPORT_SYMBOL_GPL(rcutorture_record_progress);
-
 /*
  * Return the root node of the specified rcu_state structure.
  */
@@ -1059,41 +1049,41 @@ void rcu_request_urgent_qs_task(struct task_struct *t)
 #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU)
 
 /*
- * Is the current CPU online?  Disable preemption to avoid false positives
- * that could otherwise happen due to the current CPU number being sampled,
- * this task being preempted, its old CPU being taken offline, resuming
- * on some other CPU, then determining that its old CPU is now offline.
- * It is OK to use RCU on an offline processor during initial boot, hence
- * the check for rcu_scheduler_fully_active.  Note also that it is OK
- * for a CPU coming online to use RCU for one jiffy prior to marking itself
- * online in the cpu_online_mask.  Similarly, it is OK for a CPU going
- * offline to continue to use RCU for one jiffy after marking itself
- * offline in the cpu_online_mask.  This leniency is necessary given the
- * non-atomic nature of the online and offline processing, for example,
- * the fact that a CPU enters the scheduler after completing the teardown
- * of the CPU.
+ * Is the current CPU online as far as RCU is concerned?
  *
- * This is also why RCU internally marks CPUs online during in the
- * preparation phase and offline after the CPU has been taken down.
+ * Disable preemption to avoid false positives that could otherwise
+ * happen due to the current CPU number being sampled, this task being
+ * preempted, its old CPU being taken offline, resuming on some other CPU,
+ * then determining that its old CPU is now offline.  Because there are
+ * multiple flavors of RCU, and because this function can be called in the
+ * midst of updating the flavors while a given CPU coming online or going
+ * offline, it is necessary to check all flavors.  If any of the flavors
+ * believe that given CPU is online, it is considered to be online.
  *
- * Disable checking if in an NMI handler because we cannot safely report
- * errors from NMI handlers anyway.
+ * Disable checking if in an NMI handler because we cannot safely
+ * report errors from NMI handlers anyway.  In addition, it is OK to use
+ * RCU on an offline processor during initial boot, hence the check for
+ * rcu_scheduler_fully_active.
  */
 bool rcu_lockdep_current_cpu_online(void)
 {
        struct rcu_data *rdp;
        struct rcu_node *rnp;
-       bool ret;
+       struct rcu_state *rsp;
 
-       if (in_nmi())
+       if (in_nmi() || !rcu_scheduler_fully_active)
                return true;
        preempt_disable();
-       rdp = this_cpu_ptr(&rcu_sched_data);
-       rnp = rdp->mynode;
-       ret = (rdp->grpmask & rcu_rnp_online_cpus(rnp)) ||
-             !rcu_scheduler_fully_active;
+       for_each_rcu_flavor(rsp) {
+               rdp = this_cpu_ptr(rsp->rda);
+               rnp = rdp->mynode;
+               if (rdp->grpmask & rcu_rnp_online_cpus(rnp)) {
+                       preempt_enable();
+                       return true;
+               }
+       }
        preempt_enable();
-       return ret;
+       return false;
 }
 EXPORT_SYMBOL_GPL(rcu_lockdep_current_cpu_online);
 
@@ -1115,17 +1105,18 @@ static int rcu_is_cpu_rrupt_from_idle(void)
 /*
  * We are reporting a quiescent state on behalf of some other CPU, so
  * it is our responsibility to check for and handle potential overflow
- * of the rcu_node ->gpnum counter with respect to the rcu_data counters.
+ * of the rcu_node ->gp_seq counter with respect to the rcu_data counters.
  * After all, the CPU might be in deep idle state, and thus executing no
  * code whatsoever.
  */
 static void rcu_gpnum_ovf(struct rcu_node *rnp, struct rcu_data *rdp)
 {
        raw_lockdep_assert_held_rcu_node(rnp);
-       if (ULONG_CMP_LT(READ_ONCE(rdp->gpnum) + ULONG_MAX / 4, rnp->gpnum))
+       if (ULONG_CMP_LT(rcu_seq_current(&rdp->gp_seq) + ULONG_MAX / 4,
+                        rnp->gp_seq))
                WRITE_ONCE(rdp->gpwrap, true);
-       if (ULONG_CMP_LT(rdp->rcu_iw_gpnum + ULONG_MAX / 4, rnp->gpnum))
-               rdp->rcu_iw_gpnum = rnp->gpnum + ULONG_MAX / 4;
+       if (ULONG_CMP_LT(rdp->rcu_iw_gp_seq + ULONG_MAX / 4, rnp->gp_seq))
+               rdp->rcu_iw_gp_seq = rnp->gp_seq + ULONG_MAX / 4;
 }
 
 /*
@@ -1137,7 +1128,7 @@ static int dyntick_save_progress_counter(struct rcu_data *rdp)
 {
        rdp->dynticks_snap = rcu_dynticks_snap(rdp->dynticks);
        if (rcu_dynticks_in_eqs(rdp->dynticks_snap)) {
-               trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti"));
+               trace_rcu_fqs(rdp->rsp->name, rdp->gp_seq, rdp->cpu, TPS("dti"));
                rcu_gpnum_ovf(rdp->mynode, rdp);
                return 1;
        }
@@ -1159,7 +1150,7 @@ static void rcu_iw_handler(struct irq_work *iwp)
        rnp = rdp->mynode;
        raw_spin_lock_rcu_node(rnp);
        if (!WARN_ON_ONCE(!rdp->rcu_iw_pending)) {
-               rdp->rcu_iw_gpnum = rnp->gpnum;
+               rdp->rcu_iw_gp_seq = rnp->gp_seq;
                rdp->rcu_iw_pending = false;
        }
        raw_spin_unlock_rcu_node(rnp);
@@ -1187,7 +1178,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
         * of the current RCU grace period.
         */
        if (rcu_dynticks_in_eqs_since(rdp->dynticks, rdp->dynticks_snap)) {
-               trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti"));
+               trace_rcu_fqs(rdp->rsp->name, rdp->gp_seq, rdp->cpu, TPS("dti"));
                rdp->dynticks_fqs++;
                rcu_gpnum_ovf(rnp, rdp);
                return 1;
@@ -1203,8 +1194,8 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
        ruqp = per_cpu_ptr(&rcu_dynticks.rcu_urgent_qs, rdp->cpu);
        if (time_after(jiffies, rdp->rsp->gp_start + jtsq) &&
            READ_ONCE(rdp->rcu_qs_ctr_snap) != per_cpu(rcu_dynticks.rcu_qs_ctr, rdp->cpu) &&
-           READ_ONCE(rdp->gpnum) == rnp->gpnum && !rdp->gpwrap) {
-               trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("rqc"));
+           rcu_seq_current(&rdp->gp_seq) == rnp->gp_seq && !rdp->gpwrap) {
+               trace_rcu_fqs(rdp->rsp->name, rdp->gp_seq, rdp->cpu, TPS("rqc"));
                rcu_gpnum_ovf(rnp, rdp);
                return 1;
        } else if (time_after(jiffies, rdp->rsp->gp_start + jtsq)) {
@@ -1212,12 +1203,25 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
                smp_store_release(ruqp, true);
        }
 
-       /* Check for the CPU being offline. */
-       if (!(rdp->grpmask & rcu_rnp_online_cpus(rnp))) {
-               trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("ofl"));
-               rdp->offline_fqs++;
-               rcu_gpnum_ovf(rnp, rdp);
-               return 1;
+       /* If waiting too long on an offline CPU, complain. */
+       if (!(rdp->grpmask & rcu_rnp_online_cpus(rnp)) &&
+           time_after(jiffies, rdp->rsp->gp_start + HZ)) {
+               bool onl;
+               struct rcu_node *rnp1;
+
+               WARN_ON(1);  /* Offline CPUs are supposed to report QS! */
+               pr_info("%s: grp: %d-%d level: %d ->gp_seq %ld ->completedqs %ld\n",
+                       __func__, rnp->grplo, rnp->grphi, rnp->level,
+                       (long)rnp->gp_seq, (long)rnp->completedqs);
+               for (rnp1 = rnp; rnp1; rnp1 = rnp1->parent)
+                       pr_info("%s: %d:%d ->qsmask %#lx ->qsmaskinit %#lx ->qsmaskinitnext %#lx ->rcu_gp_init_mask %#lx\n",
+                               __func__, rnp1->grplo, rnp1->grphi, rnp1->qsmask, rnp1->qsmaskinit, rnp1->qsmaskinitnext, rnp1->rcu_gp_init_mask);
+               onl = !!(rdp->grpmask & rcu_rnp_online_cpus(rnp));
+               pr_info("%s %d: %c online: %ld(%d) offline: %ld(%d)\n",
+                       __func__, rdp->cpu, ".o"[onl],
+                       (long)rdp->rcu_onl_gp_seq, rdp->rcu_onl_gp_flags,
+                       (long)rdp->rcu_ofl_gp_seq, rdp->rcu_ofl_gp_flags);
+               return 1; /* Break things loose after complaining. */
        }
 
        /*
@@ -1256,11 +1260,11 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
        if (jiffies - rdp->rsp->gp_start > rcu_jiffies_till_stall_check() / 2) {
                resched_cpu(rdp->cpu);
                if (IS_ENABLED(CONFIG_IRQ_WORK) &&
-                   !rdp->rcu_iw_pending && rdp->rcu_iw_gpnum != rnp->gpnum &&
+                   !rdp->rcu_iw_pending && rdp->rcu_iw_gp_seq != rnp->gp_seq &&
                    (rnp->ffmask & rdp->grpmask)) {
                        init_irq_work(&rdp->rcu_iw, rcu_iw_handler);
                        rdp->rcu_iw_pending = true;
-                       rdp->rcu_iw_gpnum = rnp->gpnum;
+                       rdp->rcu_iw_gp_seq = rnp->gp_seq;
                        irq_work_queue_on(&rdp->rcu_iw, rdp->cpu);
                }
        }
@@ -1274,9 +1278,9 @@ static void record_gp_stall_check_time(struct rcu_state *rsp)
        unsigned long j1;
 
        rsp->gp_start = j;
-       smp_wmb(); /* Record start time before stall time. */
        j1 = rcu_jiffies_till_stall_check();
-       WRITE_ONCE(rsp->jiffies_stall, j + j1);
+       /* Record ->gp_start before ->jiffies_stall. */
+       smp_store_release(&rsp->jiffies_stall, j + j1); /* ^^^ */
        rsp->jiffies_resched = j + j1 / 2;
        rsp->n_force_qs_gpstart = READ_ONCE(rsp->n_force_qs);
 }
@@ -1302,9 +1306,9 @@ static void rcu_check_gp_kthread_starvation(struct rcu_state *rsp)
        j = jiffies;
        gpa = READ_ONCE(rsp->gp_activity);
        if (j - gpa > 2 * HZ) {
-               pr_err("%s kthread starved for %ld jiffies! g%lu c%lu f%#x %s(%d) ->state=%#lx ->cpu=%d\n",
+               pr_err("%s kthread starved for %ld jiffies! g%ld f%#x %s(%d) ->state=%#lx ->cpu=%d\n",
                       rsp->name, j - gpa,
-                      rsp->gpnum, rsp->completed,
+                      (long)rcu_seq_current(&rsp->gp_seq),
                       rsp->gp_flags,
                       gp_state_getname(rsp->gp_state), rsp->gp_state,
                       rsp->gp_kthread ? rsp->gp_kthread->state : ~0,
@@ -1359,16 +1363,15 @@ static void rcu_stall_kick_kthreads(struct rcu_state *rsp)
        }
 }
 
-static inline void panic_on_rcu_stall(void)
+static void panic_on_rcu_stall(void)
 {
        if (sysctl_panic_on_rcu_stall)
                panic("RCU Stall\n");
 }
 
-static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gpnum)
+static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gp_seq)
 {
        int cpu;
-       long delta;
        unsigned long flags;
        unsigned long gpa;
        unsigned long j;
@@ -1381,25 +1384,12 @@ static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gpnum)
        if (rcu_cpu_stall_suppress)
                return;
 
-       /* Only let one CPU complain about others per time interval. */
-
-       raw_spin_lock_irqsave_rcu_node(rnp, flags);
-       delta = jiffies - READ_ONCE(rsp->jiffies_stall);
-       if (delta < RCU_STALL_RAT_DELAY || !rcu_gp_in_progress(rsp)) {
-               raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
-               return;
-       }
-       WRITE_ONCE(rsp->jiffies_stall,
-                  jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
-       raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
-
        /*
         * OK, time to rat on our buddy...
         * See Documentation/RCU/stallwarn.txt for info on how to debug
         * RCU CPU stall warnings.
         */
-       pr_err("INFO: %s detected stalls on CPUs/tasks:",
-              rsp->name);
+       pr_err("INFO: %s detected stalls on CPUs/tasks:", rsp->name);
        print_cpu_stall_info_begin();
        rcu_for_each_leaf_node(rsp, rnp) {
                raw_spin_lock_irqsave_rcu_node(rnp, flags);
@@ -1418,17 +1408,16 @@ static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gpnum)
        for_each_possible_cpu(cpu)
                totqlen += rcu_segcblist_n_cbs(&per_cpu_ptr(rsp->rda,
                                                            cpu)->cblist);
-       pr_cont("(detected by %d, t=%ld jiffies, g=%ld, c=%ld, q=%lu)\n",
+       pr_cont("(detected by %d, t=%ld jiffies, g=%ld, q=%lu)\n",
               smp_processor_id(), (long)(jiffies - rsp->gp_start),
-              (long)rsp->gpnum, (long)rsp->completed, totqlen);
+              (long)rcu_seq_current(&rsp->gp_seq), totqlen);
        if (ndetected) {
                rcu_dump_cpu_stacks(rsp);
 
                /* Complain about tasks blocking the grace period. */
                rcu_print_detail_task_stall(rsp);
        } else {
-               if (READ_ONCE(rsp->gpnum) != gpnum ||
-                   READ_ONCE(rsp->completed) == gpnum) {
+               if (rcu_seq_current(&rsp->gp_seq) != gp_seq) {
                        pr_err("INFO: Stall ended before state dump start\n");
                } else {
                        j = jiffies;
@@ -1441,6 +1430,10 @@ static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gpnum)
                        sched_show_task(current);
                }
        }
+       /* Rewrite if needed in case of slow consoles. */
+       if (ULONG_CMP_GE(jiffies, READ_ONCE(rsp->jiffies_stall)))
+               WRITE_ONCE(rsp->jiffies_stall,
+                          jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
 
        rcu_check_gp_kthread_starvation(rsp);
 
@@ -1476,15 +1469,16 @@ static void print_cpu_stall(struct rcu_state *rsp)
        for_each_possible_cpu(cpu)
                totqlen += rcu_segcblist_n_cbs(&per_cpu_ptr(rsp->rda,
                                                            cpu)->cblist);
-       pr_cont(" (t=%lu jiffies g=%ld c=%ld q=%lu)\n",
+       pr_cont(" (t=%lu jiffies g=%ld q=%lu)\n",
                jiffies - rsp->gp_start,
-               (long)rsp->gpnum, (long)rsp->completed, totqlen);
+               (long)rcu_seq_current(&rsp->gp_seq), totqlen);
 
        rcu_check_gp_kthread_starvation(rsp);
 
        rcu_dump_cpu_stacks(rsp);
 
        raw_spin_lock_irqsave_rcu_node(rnp, flags);
+       /* Rewrite if needed in case of slow consoles. */
        if (ULONG_CMP_GE(jiffies, READ_ONCE(rsp->jiffies_stall)))
                WRITE_ONCE(rsp->jiffies_stall,
                           jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
@@ -1504,10 +1498,11 @@ static void print_cpu_stall(struct rcu_state *rsp)
 
 static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
 {
-       unsigned long completed;
-       unsigned long gpnum;
+       unsigned long gs1;
+       unsigned long gs2;
        unsigned long gps;
        unsigned long j;
+       unsigned long jn;
        unsigned long js;
        struct rcu_node *rnp;
 
@@ -1520,43 +1515,46 @@ static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
        /*
         * Lots of memory barriers to reject false positives.
         *
-        * The idea is to pick up rsp->gpnum, then rsp->jiffies_stall,
-        * then rsp->gp_start, and finally rsp->completed.  These values
-        * are updated in the opposite order with memory barriers (or
-        * equivalent) during grace-period initialization and cleanup.
-        * Now, a false positive can occur if we get an new value of
-        * rsp->gp_start and a old value of rsp->jiffies_stall.  But given
-        * the memory barriers, the only way that this can happen is if one
-        * grace period ends and another starts between these two fetches.
-        * Detect this by comparing rsp->completed with the previous fetch
-        * from rsp->gpnum.
+        * The idea is to pick up rsp->gp_seq, then rsp->jiffies_stall,
+        * then rsp->gp_start, and finally another copy of rsp->gp_seq.
+        * These values are updated in the opposite order with memory
+        * barriers (or equivalent) during grace-period initialization
+        * and cleanup.  Now, a false positive can occur if we get an new
+        * value of rsp->gp_start and a old value of rsp->jiffies_stall.
+        * But given the memory barriers, the only way that this can happen
+        * is if one grace period ends and another starts between these
+        * two fetches.  This is detected by comparing the second fetch
+        * of rsp->gp_seq with the previous fetch from rsp->gp_seq.
         *
         * Given this check, comparisons of jiffies, rsp->jiffies_stall,
         * and rsp->gp_start suffice to forestall false positives.
         */
-       gpnum = READ_ONCE(rsp->gpnum);
-       smp_rmb(); /* Pick up ->gpnum first... */
+       gs1 = READ_ONCE(rsp->gp_seq);
+       smp_rmb(); /* Pick up ->gp_seq first... */
        js = READ_ONCE(rsp->jiffies_stall);
        smp_rmb(); /* ...then ->jiffies_stall before the rest... */
        gps = READ_ONCE(rsp->gp_start);
-       smp_rmb(); /* ...and finally ->gp_start before ->completed. */
-       completed = READ_ONCE(rsp->completed);
-       if (ULONG_CMP_GE(completed, gpnum) ||
+       smp_rmb(); /* ...and finally ->gp_start before ->gp_seq again. */
+       gs2 = READ_ONCE(rsp->gp_seq);
+       if (gs1 != gs2 ||
            ULONG_CMP_LT(j, js) ||
            ULONG_CMP_GE(gps, js))
                return; /* No stall or GP completed since entering function. */
        rnp = rdp->mynode;
+       jn = jiffies + 3 * rcu_jiffies_till_stall_check() + 3;
        if (rcu_gp_in_progress(rsp) &&
-           (READ_ONCE(rnp->qsmask) & rdp->grpmask)) {
+           (READ_ONCE(rnp->qsmask) & rdp->grpmask) &&
+           cmpxchg(&rsp->jiffies_stall, js, jn) == js) {
 
                /* We haven't checked in, so go dump stack. */
                print_cpu_stall(rsp);
 
        } else if (rcu_gp_in_progress(rsp) &&
-                  ULONG_CMP_GE(j, js + RCU_STALL_RAT_DELAY)) {
+                  ULONG_CMP_GE(j, js + RCU_STALL_RAT_DELAY) &&
+                  cmpxchg(&rsp->jiffies_stall, js, jn) == js) {
 
                /* They had a few time units to dump stack, so complain. */
-               print_other_cpu_stall(rsp, gpnum);
+               print_other_cpu_stall(rsp, gs2);
        }
 }
 
@@ -1577,123 +1575,99 @@ void rcu_cpu_stall_reset(void)
                WRITE_ONCE(rsp->jiffies_stall, jiffies + ULONG_MAX / 2);
 }
 
-/*
- * Determine the value that ->completed will have at the end of the
- * next subsequent grace period.  This is used to tag callbacks so that
- * a CPU can invoke callbacks in a timely fashion even if that CPU has
- * been dyntick-idle for an extended period with callbacks under the
- * influence of RCU_FAST_NO_HZ.
- *
- * The caller must hold rnp->lock with interrupts disabled.
- */
-static unsigned long rcu_cbs_completed(struct rcu_state *rsp,
-                                      struct rcu_node *rnp)
-{
-       raw_lockdep_assert_held_rcu_node(rnp);
-
-       /*
-        * If RCU is idle, we just wait for the next grace period.
-        * But we can only be sure that RCU is idle if we are looking
-        * at the root rcu_node structure -- otherwise, a new grace
-        * period might have started, but just not yet gotten around
-        * to initializing the current non-root rcu_node structure.
-        */
-       if (rcu_get_root(rsp) == rnp && rnp->gpnum == rnp->completed)
-               return rnp->completed + 1;
-
-       /*
-        * If the current rcu_node structure believes that RCU is
-        * idle, and if the rcu_state structure does not yet reflect
-        * the start of a new grace period, then the next grace period
-        * will suffice.  The memory barrier is needed to accurately
-        * sample the rsp->gpnum, and pairs with the second lock
-        * acquisition in rcu_gp_init(), which is augmented with
-        * smp_mb__after_unlock_lock() for this purpose.
-        */
-       if (rnp->gpnum == rnp->completed) {
-               smp_mb(); /* See above block comment. */
-               if (READ_ONCE(rsp->gpnum) == rnp->completed)
-                       return rnp->completed + 1;
-       }
-
-       /*
-        * Otherwise, wait for a possible partial grace period and
-        * then the subsequent full grace period.
-        */
-       return rnp->completed + 2;
-}
-
 /* Trace-event wrapper function for trace_rcu_future_grace_period.  */
 static void trace_rcu_this_gp(struct rcu_node *rnp, struct rcu_data *rdp,
-                             unsigned long c, const char *s)
+                             unsigned long gp_seq_req, const char *s)
 {
-       trace_rcu_future_grace_period(rdp->rsp->name, rnp->gpnum,
-                                     rnp->completed, c, rnp->level,
-                                     rnp->grplo, rnp->grphi, s);
+       trace_rcu_future_grace_period(rdp->rsp->name, rnp->gp_seq, gp_seq_req,
+                                     rnp->level, rnp->grplo, rnp->grphi, s);
 }
 
 /*
+ * rcu_start_this_gp - Request the start of a particular grace period
+ * @rnp_start: The leaf node of the CPU from which to start.
+ * @rdp: The rcu_data corresponding to the CPU from which to start.
+ * @gp_seq_req: The gp_seq of the grace period to start.
+ *
  * Start the specified grace period, as needed to handle newly arrived
  * callbacks.  The required future grace periods are recorded in each
- * rcu_node structure's ->need_future_gp[] field.  Returns true if there
+ * rcu_node structure's ->gp_seq_needed field.  Returns true if there
  * is reason to awaken the grace-period kthread.
  *
  * The caller must hold the specified rcu_node structure's ->lock, which
  * is why the caller is responsible for waking the grace-period kthread.
+ *
+ * Returns true if the GP thread needs to be awakened else false.
  */
-static bool rcu_start_this_gp(struct rcu_node *rnp, struct rcu_data *rdp,
-                             unsigned long c)
+static bool rcu_start_this_gp(struct rcu_node *rnp_start, struct rcu_data *rdp,
+                             unsigned long gp_seq_req)
 {
        bool ret = false;
        struct rcu_state *rsp = rdp->rsp;
-       struct rcu_node *rnp_root;
+       struct rcu_node *rnp;
 
        /*
         * Use funnel locking to either acquire the root rcu_node
         * structure's lock or bail out if the need for this grace period
-        * has already been recorded -- or has already started.  If there
-        * is already a grace period in progress in a non-leaf node, no
-        * recording is needed because the end of the grace period will
-        * scan the leaf rcu_node structures.  Note that rnp->lock must
-        * not be released.
+        * has already been recorded -- or if that grace period has in
+        * fact already started.  If there is already a grace period in
+        * progress in a non-leaf node, no recording is needed because the
+        * end of the grace period will scan the leaf rcu_node structures.
+        * Note that rnp_start->lock must not be released.
         */
-       raw_lockdep_assert_held_rcu_node(rnp);
-       trace_rcu_this_gp(rnp, rdp, c, TPS("Startleaf"));
-       for (rnp_root = rnp; 1; rnp_root = rnp_root->parent) {
-               if (rnp_root != rnp)
-                       raw_spin_lock_rcu_node(rnp_root);
-               WARN_ON_ONCE(ULONG_CMP_LT(rnp_root->gpnum +
-                                         need_future_gp_mask(), c));
-               if (need_future_gp_element(rnp_root, c) ||
-                   ULONG_CMP_GE(rnp_root->gpnum, c) ||
-                   (rnp != rnp_root &&
-                    rnp_root->gpnum != rnp_root->completed)) {
-                       trace_rcu_this_gp(rnp_root, rdp, c, TPS("Prestarted"));
+       raw_lockdep_assert_held_rcu_node(rnp_start);
+       trace_rcu_this_gp(rnp_start, rdp, gp_seq_req, TPS("Startleaf"));
+       for (rnp = rnp_start; 1; rnp = rnp->parent) {
+               if (rnp != rnp_start)
+                       raw_spin_lock_rcu_node(rnp);
+               if (ULONG_CMP_GE(rnp->gp_seq_needed, gp_seq_req) ||
+                   rcu_seq_started(&rnp->gp_seq, gp_seq_req) ||
+                   (rnp != rnp_start &&
+                    rcu_seq_state(rcu_seq_current(&rnp->gp_seq)))) {
+                       trace_rcu_this_gp(rnp, rdp, gp_seq_req,
+                                         TPS("Prestarted"));
                        goto unlock_out;
                }
-               need_future_gp_element(rnp_root, c) = true;
-               if (rnp_root != rnp && rnp_root->parent != NULL)
-                       raw_spin_unlock_rcu_node(rnp_root);
-               if (!rnp_root->parent)
+               rnp->gp_seq_needed = gp_seq_req;
+               if (rcu_seq_state(rcu_seq_current(&rnp->gp_seq))) {
+                       /*
+                        * We just marked the leaf or internal node, and a
+                        * grace period is in progress, which means that
+                        * rcu_gp_cleanup() will see the marking.  Bail to
+                        * reduce contention.
+                        */
+                       trace_rcu_this_gp(rnp_start, rdp, gp_seq_req,
+                                         TPS("Startedleaf"));
+                       goto unlock_out;
+               }
+               if (rnp != rnp_start && rnp->parent != NULL)
+                       raw_spin_unlock_rcu_node(rnp);
+               if (!rnp->parent)
                        break;  /* At root, and perhaps also leaf. */
        }
 
        /* If GP already in progress, just leave, otherwise start one. */
-       if (rnp_root->gpnum != rnp_root->completed) {
-               trace_rcu_this_gp(rnp_root, rdp, c, TPS("Startedleafroot"));
+       if (rcu_gp_in_progress(rsp)) {
+               trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedleafroot"));
                goto unlock_out;
        }
-       trace_rcu_this_gp(rnp_root, rdp, c, TPS("Startedroot"));
+       trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedroot"));
        WRITE_ONCE(rsp->gp_flags, rsp->gp_flags | RCU_GP_FLAG_INIT);
+       rsp->gp_req_activity = jiffies;
        if (!rsp->gp_kthread) {
-               trace_rcu_this_gp(rnp_root, rdp, c, TPS("NoGPkthread"));
+               trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("NoGPkthread"));
                goto unlock_out;
        }
-       trace_rcu_grace_period(rsp->name, READ_ONCE(rsp->gpnum), TPS("newreq"));
+       trace_rcu_grace_period(rsp->name, READ_ONCE(rsp->gp_seq), TPS("newreq"));
        ret = true;  /* Caller must wake GP kthread. */
 unlock_out:
-       if (rnp != rnp_root)
-               raw_spin_unlock_rcu_node(rnp_root);
+       /* Push furthest requested GP to leaf node and rcu_data structure. */
+       if (ULONG_CMP_LT(gp_seq_req, rnp->gp_seq_needed)) {
+               rnp_start->gp_seq_needed = rnp->gp_seq_needed;
+               rdp->gp_seq_needed = rnp->gp_seq_needed;
+       }
+       if (rnp != rnp_start)
+               raw_spin_unlock_rcu_node(rnp);
        return ret;
 }
 
@@ -1703,13 +1677,13 @@ unlock_out:
  */
 static bool rcu_future_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp)
 {
-       unsigned long c = rnp->completed;
        bool needmore;
        struct rcu_data *rdp = this_cpu_ptr(rsp->rda);
 
-       need_future_gp_element(rnp, c) = false;
-       needmore = need_any_future_gp(rnp);
-       trace_rcu_this_gp(rnp, rdp, c,
+       needmore = ULONG_CMP_LT(rnp->gp_seq, rnp->gp_seq_needed);
+       if (!needmore)
+               rnp->gp_seq_needed = rnp->gp_seq; /* Avoid counter wrap. */
+       trace_rcu_this_gp(rnp, rdp, rnp->gp_seq,
                          needmore ? TPS("CleanupMore") : TPS("Cleanup"));
        return needmore;
 }
@@ -1727,25 +1701,25 @@ static void rcu_gp_kthread_wake(struct rcu_state *rsp)
            !READ_ONCE(rsp->gp_flags) ||
            !rsp->gp_kthread)
                return;
-       swake_up(&rsp->gp_wq);
+       swake_up_one(&rsp->gp_wq);
 }
 
 /*
- * If there is room, assign a ->completed number to any callbacks on
- * this CPU that have not already been assigned.  Also accelerate any
- * callbacks that were previously assigned a ->completed number that has
- * since proven to be too conservative, which can happen if callbacks get
- * assigned a ->completed number while RCU is idle, but with reference to
- * a non-root rcu_node structure.  This function is idempotent, so it does
- * not hurt to call it repeatedly.  Returns an flag saying that we should
- * awaken the RCU grace-period kthread.
+ * If there is room, assign a ->gp_seq number to any callbacks on this
+ * CPU that have not already been assigned.  Also accelerate any callbacks
+ * that were previously assigned a ->gp_seq number that has since proven
+ * to be too conservative, which can happen if callbacks get assigned a
+ * ->gp_seq number while RCU is idle, but with reference to a non-root
+ * rcu_node structure.  This function is idempotent, so it does not hurt
+ * to call it repeatedly.  Returns an flag saying that we should awaken
+ * the RCU grace-period kthread.
  *
  * The caller must hold rnp->lock with interrupts disabled.
  */
 static bool rcu_accelerate_cbs(struct rcu_state *rsp, struct rcu_node *rnp,
                               struct rcu_data *rdp)
 {
-       unsigned long c;
+       unsigned long gp_seq_req;
        bool ret = false;
 
        raw_lockdep_assert_held_rcu_node(rnp);
@@ -1764,22 +1738,50 @@ static bool rcu_accelerate_cbs(struct rcu_state *rsp, struct rcu_node *rnp,
         * accelerating callback invocation to an earlier grace-period
         * number.
         */
-       c = rcu_cbs_completed(rsp, rnp);
-       if (rcu_segcblist_accelerate(&rdp->cblist, c))
-               ret = rcu_start_this_gp(rnp, rdp, c);
+       gp_seq_req = rcu_seq_snap(&rsp->gp_seq);
+       if (rcu_segcblist_accelerate(&rdp->cblist, gp_seq_req))
+               ret = rcu_start_this_gp(rnp, rdp, gp_seq_req);
 
        /* Trace depending on how much we were able to accelerate. */
        if (rcu_segcblist_restempty(&rdp->cblist, RCU_WAIT_TAIL))
-               trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("AccWaitCB"));
+               trace_rcu_grace_period(rsp->name, rdp->gp_seq, TPS("AccWaitCB"));
        else
-               trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("AccReadyCB"));
+               trace_rcu_grace_period(rsp->name, rdp->gp_seq, TPS("AccReadyCB"));
        return ret;
 }
 
+/*
+ * Similar to rcu_accelerate_cbs(), but does not require that the leaf
+ * rcu_node structure's ->lock be held.  It consults the cached value
+ * of ->gp_seq_needed in the rcu_data structure, and if that indicates
+ * that a new grace-period request be made, invokes rcu_accelerate_cbs()
+ * while holding the leaf rcu_node structure's ->lock.
+ */
+static void rcu_accelerate_cbs_unlocked(struct rcu_state *rsp,
+                                       struct rcu_node *rnp,
+                                       struct rcu_data *rdp)
+{
+       unsigned long c;
+       bool needwake;
+
+       lockdep_assert_irqs_disabled();
+       c = rcu_seq_snap(&rsp->gp_seq);
+       if (!rdp->gpwrap && ULONG_CMP_GE(rdp->gp_seq_needed, c)) {
+               /* Old request still live, so mark recent callbacks. */
+               (void)rcu_segcblist_accelerate(&rdp->cblist, c);
+               return;
+       }
+       raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
+       needwake = rcu_accelerate_cbs(rsp, rnp, rdp);
+       raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
+       if (needwake)
+               rcu_gp_kthread_wake(rsp);
+}
+
 /*
  * Move any callbacks whose grace period has completed to the
  * RCU_DONE_TAIL sublist, then compact the remaining sublists and
- * assign ->completed numbers to any callbacks in the RCU_NEXT_TAIL
+ * assign ->gp_seq numbers to any callbacks in the RCU_NEXT_TAIL
  * sublist.  This function is idempotent, so it does not hurt to
  * invoke it repeatedly.  As long as it is not invoked -too- often...
  * Returns true if the RCU grace-period kthread needs to be awakened.
@@ -1796,10 +1798,10 @@ static bool rcu_advance_cbs(struct rcu_state *rsp, struct rcu_node *rnp,
                return false;
 
        /*
-        * Find all callbacks whose ->completed numbers indicate that they
+        * Find all callbacks whose ->gp_seq numbers indicate that they
         * are ready to invoke, and put them into the RCU_DONE_TAIL sublist.
         */
-       rcu_segcblist_advance(&rdp->cblist, rnp->completed);
+       rcu_segcblist_advance(&rdp->cblist, rnp->gp_seq);
 
        /* Classify any remaining callbacks. */
        return rcu_accelerate_cbs(rsp, rnp, rdp);
@@ -1819,39 +1821,38 @@ static bool __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp,
 
        raw_lockdep_assert_held_rcu_node(rnp);
 
-       /* Handle the ends of any preceding grace periods first. */
-       if (rdp->completed == rnp->completed &&
-           !unlikely(READ_ONCE(rdp->gpwrap))) {
-
-               /* No grace period end, so just accelerate recent callbacks. */
-               ret = rcu_accelerate_cbs(rsp, rnp, rdp);
+       if (rdp->gp_seq == rnp->gp_seq)
+               return false; /* Nothing to do. */
 
+       /* Handle the ends of any preceding grace periods first. */
+       if (rcu_seq_completed_gp(rdp->gp_seq, rnp->gp_seq) ||
+           unlikely(READ_ONCE(rdp->gpwrap))) {
+               ret = rcu_advance_cbs(rsp, rnp, rdp); /* Advance callbacks. */
+               trace_rcu_grace_period(rsp->name, rdp->gp_seq, TPS("cpuend"));
        } else {
-
-               /* Advance callbacks. */
-               ret = rcu_advance_cbs(rsp, rnp, rdp);
-
-               /* Remember that we saw this grace-period completion. */
-               rdp->completed = rnp->completed;
-               trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuend"));
+               ret = rcu_accelerate_cbs(rsp, rnp, rdp); /* Recent callbacks. */
        }
 
-       if (rdp->gpnum != rnp->gpnum || unlikely(READ_ONCE(rdp->gpwrap))) {
+       /* Now handle the beginnings of any new-to-this-CPU grace periods. */
+       if (rcu_seq_new_gp(rdp->gp_seq, rnp->gp_seq) ||
+           unlikely(READ_ONCE(rdp->gpwrap))) {
                /*
                 * If the current grace period is waiting for this CPU,
                 * set up to detect a quiescent state, otherwise don't
                 * go looking for one.
                 */
-               rdp->gpnum = rnp->gpnum;
-               trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpustart"));
+               trace_rcu_grace_period(rsp->name, rnp->gp_seq, TPS("cpustart"));
                need_gp = !!(rnp->qsmask & rdp->grpmask);
                rdp->cpu_no_qs.b.norm = need_gp;
                rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_dynticks.rcu_qs_ctr);
                rdp->core_needs_qs = need_gp;
                zero_cpu_stall_ticks(rdp);
-               WRITE_ONCE(rdp->gpwrap, false);
-               rcu_gpnum_ovf(rnp, rdp);
        }
+       rdp->gp_seq = rnp->gp_seq;  /* Remember new grace-period state. */
+       if (ULONG_CMP_GE(rnp->gp_seq_needed, rdp->gp_seq_needed) || rdp->gpwrap)
+               rdp->gp_seq_needed = rnp->gp_seq_needed;
+       WRITE_ONCE(rdp->gpwrap, false);
+       rcu_gpnum_ovf(rnp, rdp);
        return ret;
 }
 
@@ -1863,8 +1864,7 @@ static void note_gp_changes(struct rcu_state *rsp, struct rcu_data *rdp)
 
        local_irq_save(flags);
        rnp = rdp->mynode;
-       if ((rdp->gpnum == READ_ONCE(rnp->gpnum) &&
-            rdp->completed == READ_ONCE(rnp->completed) &&
+       if ((rdp->gp_seq == rcu_seq_current(&rnp->gp_seq) &&
             !unlikely(READ_ONCE(rdp->gpwrap))) || /* w/out lock. */
            !raw_spin_trylock_rcu_node(rnp)) { /* irqs already off, so later. */
                local_irq_restore(flags);
@@ -1879,7 +1879,8 @@ static void note_gp_changes(struct rcu_state *rsp, struct rcu_data *rdp)
 static void rcu_gp_slow(struct rcu_state *rsp, int delay)
 {
        if (delay > 0 &&
-           !(rsp->gpnum % (rcu_num_nodes * PER_RCU_NODE_PERIOD * delay)))
+           !(rcu_seq_ctr(rsp->gp_seq) %
+             (rcu_num_nodes * PER_RCU_NODE_PERIOD * delay)))
                schedule_timeout_uninterruptible(delay);
 }
 
@@ -1888,7 +1889,9 @@ static void rcu_gp_slow(struct rcu_state *rsp, int delay)
  */
 static bool rcu_gp_init(struct rcu_state *rsp)
 {
+       unsigned long flags;
        unsigned long oldmask;
+       unsigned long mask;
        struct rcu_data *rdp;
        struct rcu_node *rnp = rcu_get_root(rsp);
 
@@ -1912,9 +1915,9 @@ static bool rcu_gp_init(struct rcu_state *rsp)
 
        /* Advance to a new grace period and initialize state. */
        record_gp_stall_check_time(rsp);
-       /* Record GP times before starting GP, hence smp_store_release(). */
-       smp_store_release(&rsp->gpnum, rsp->gpnum + 1);
-       trace_rcu_grace_period(rsp->name, rsp->gpnum, TPS("start"));
+       /* Record GP times before starting GP, hence rcu_seq_start(). */
+       rcu_seq_start(&rsp->gp_seq);
+       trace_rcu_grace_period(rsp->name, rsp->gp_seq, TPS("start"));
        raw_spin_unlock_irq_rcu_node(rnp);
 
        /*
@@ -1923,13 +1926,15 @@ static bool rcu_gp_init(struct rcu_state *rsp)
         * for subsequent online CPUs, and that quiescent-state forcing
         * will handle subsequent offline CPUs.
         */
+       rsp->gp_state = RCU_GP_ONOFF;
        rcu_for_each_leaf_node(rsp, rnp) {
-               rcu_gp_slow(rsp, gp_preinit_delay);
+               spin_lock(&rsp->ofl_lock);
                raw_spin_lock_irq_rcu_node(rnp);
                if (rnp->qsmaskinit == rnp->qsmaskinitnext &&
                    !rnp->wait_blkd_tasks) {
                        /* Nothing to do on this leaf rcu_node structure. */
                        raw_spin_unlock_irq_rcu_node(rnp);
+                       spin_unlock(&rsp->ofl_lock);
                        continue;
                }
 
@@ -1939,12 +1944,14 @@ static bool rcu_gp_init(struct rcu_state *rsp)
 
                /* If zero-ness of ->qsmaskinit changed, propagate up tree. */
                if (!oldmask != !rnp->qsmaskinit) {
-                       if (!oldmask) /* First online CPU for this rcu_node. */
-                               rcu_init_new_rnp(rnp);
-                       else if (rcu_preempt_has_tasks(rnp)) /* blocked tasks */
-                               rnp->wait_blkd_tasks = true;
-                       else /* Last offline CPU and can propagate. */
+                       if (!oldmask) { /* First online CPU for rcu_node. */
+                               if (!rnp->wait_blkd_tasks) /* Ever offline? */
+                                       rcu_init_new_rnp(rnp);
+                       } else if (rcu_preempt_has_tasks(rnp)) {
+                               rnp->wait_blkd_tasks = true; /* blocked tasks */
+                       } else { /* Last offline CPU and can propagate. */
                                rcu_cleanup_dead_rnp(rnp);
+                       }
                }
 
                /*
@@ -1953,18 +1960,19 @@ static bool rcu_gp_init(struct rcu_state *rsp)
                 * still offline, propagate up the rcu_node tree and
                 * clear ->wait_blkd_tasks.  Otherwise, if one of this
                 * rcu_node structure's CPUs has since come back online,
-                * simply clear ->wait_blkd_tasks (but rcu_cleanup_dead_rnp()
-                * checks for this, so just call it unconditionally).
+                * simply clear ->wait_blkd_tasks.
                 */
                if (rnp->wait_blkd_tasks &&
-                   (!rcu_preempt_has_tasks(rnp) ||
-                    rnp->qsmaskinit)) {
+                   (!rcu_preempt_has_tasks(rnp) || rnp->qsmaskinit)) {
                        rnp->wait_blkd_tasks = false;
-                       rcu_cleanup_dead_rnp(rnp);
+                       if (!rnp->qsmaskinit)
+                               rcu_cleanup_dead_rnp(rnp);
                }
 
                raw_spin_unlock_irq_rcu_node(rnp);
+               spin_unlock(&rsp->ofl_lock);
        }
+       rcu_gp_slow(rsp, gp_preinit_delay); /* Races with CPU hotplug. */
 
        /*
         * Set the quiescent-state-needed bits in all the rcu_node
@@ -1978,22 +1986,27 @@ static bool rcu_gp_init(struct rcu_state *rsp)
         * The grace period cannot complete until the initialization
         * process finishes, because this kthread handles both.
         */
+       rsp->gp_state = RCU_GP_INIT;
        rcu_for_each_node_breadth_first(rsp, rnp) {
                rcu_gp_slow(rsp, gp_init_delay);
-               raw_spin_lock_irq_rcu_node(rnp);
+               raw_spin_lock_irqsave_rcu_node(rnp, flags);
                rdp = this_cpu_ptr(rsp->rda);
-               rcu_preempt_check_blocked_tasks(rnp);
+               rcu_preempt_check_blocked_tasks(rsp, rnp);
                rnp->qsmask = rnp->qsmaskinit;
-               WRITE_ONCE(rnp->gpnum, rsp->gpnum);
-               if (WARN_ON_ONCE(rnp->completed != rsp->completed))
-                       WRITE_ONCE(rnp->completed, rsp->completed);
+               WRITE_ONCE(rnp->gp_seq, rsp->gp_seq);
                if (rnp == rdp->mynode)
                        (void)__note_gp_changes(rsp, rnp, rdp);
                rcu_preempt_boost_start_gp(rnp);
-               trace_rcu_grace_period_init(rsp->name, rnp->gpnum,
+               trace_rcu_grace_period_init(rsp->name, rnp->gp_seq,
                                            rnp->level, rnp->grplo,
                                            rnp->grphi, rnp->qsmask);
-               raw_spin_unlock_irq_rcu_node(rnp);
+               /* Quiescent states for tasks on any now-offline CPUs. */
+               mask = rnp->qsmask & ~rnp->qsmaskinitnext;
+               rnp->rcu_gp_init_mask = mask;
+               if ((mask || rnp->wait_blkd_tasks) && rcu_is_leaf_node(rnp))
+                       rcu_report_qs_rnp(mask, rsp, rnp, rnp->gp_seq, flags);
+               else
+                       raw_spin_unlock_irq_rcu_node(rnp);
                cond_resched_tasks_rcu_qs();
                WRITE_ONCE(rsp->gp_activity, jiffies);
        }
@@ -2002,7 +2015,7 @@ static bool rcu_gp_init(struct rcu_state *rsp)
 }
 
 /*
- * Helper function for swait_event_idle() wakeup at force-quiescent-state
+ * Helper function for swait_event_idle_exclusive() wakeup at force-quiescent-state
  * time.
  */
 static bool rcu_gp_fqs_check_wake(struct rcu_state *rsp, int *gfp)
@@ -2053,6 +2066,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
 {
        unsigned long gp_duration;
        bool needgp = false;
+       unsigned long new_gp_seq;
        struct rcu_data *rdp;
        struct rcu_node *rnp = rcu_get_root(rsp);
        struct swait_queue_head *sq;
@@ -2074,19 +2088,22 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
        raw_spin_unlock_irq_rcu_node(rnp);
 
        /*
-        * Propagate new ->completed value to rcu_node structures so
-        * that other CPUs don't have to wait until the start of the next
-        * grace period to process their callbacks.  This also avoids
-        * some nasty RCU grace-period initialization races by forcing
-        * the end of the current grace period to be completely recorded in
-        * all of the rcu_node structures before the beginning of the next
-        * grace period is recorded in any of the rcu_node structures.
+        * Propagate new ->gp_seq value to rcu_node structures so that
+        * other CPUs don't have to wait until the start of the next grace
+        * period to process their callbacks.  This also avoids some nasty
+        * RCU grace-period initialization races by forcing the end of
+        * the current grace period to be completely recorded in all of
+        * the rcu_node structures before the beginning of the next grace
+        * period is recorded in any of the rcu_node structures.
         */
+       new_gp_seq = rsp->gp_seq;
+       rcu_seq_end(&new_gp_seq);
        rcu_for_each_node_breadth_first(rsp, rnp) {
                raw_spin_lock_irq_rcu_node(rnp);
-               WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp));
+               if (WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)))
+                       dump_blkd_tasks(rsp, rnp, 10);
                WARN_ON_ONCE(rnp->qsmask);
-               WRITE_ONCE(rnp->completed, rsp->gpnum);
+               WRITE_ONCE(rnp->gp_seq, new_gp_seq);
                rdp = this_cpu_ptr(rsp->rda);
                if (rnp == rdp->mynode)
                        needgp = __note_gp_changes(rsp, rnp, rdp) || needgp;
@@ -2100,26 +2117,28 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
                rcu_gp_slow(rsp, gp_cleanup_delay);
        }
        rnp = rcu_get_root(rsp);
-       raw_spin_lock_irq_rcu_node(rnp); /* Order GP before ->completed update. */
+       raw_spin_lock_irq_rcu_node(rnp); /* GP before rsp->gp_seq update. */
 
        /* Declare grace period done. */
-       WRITE_ONCE(rsp->completed, rsp->gpnum);
-       trace_rcu_grace_period(rsp->name, rsp->completed, TPS("end"));
+       rcu_seq_end(&rsp->gp_seq);
+       trace_rcu_grace_period(rsp->name, rsp->gp_seq, TPS("end"));
        rsp->gp_state = RCU_GP_IDLE;
        /* Check for GP requests since above loop. */
        rdp = this_cpu_ptr(rsp->rda);
-       if (need_any_future_gp(rnp)) {
-               trace_rcu_this_gp(rnp, rdp, rsp->completed - 1,
+       if (!needgp && ULONG_CMP_LT(rnp->gp_seq, rnp->gp_seq_needed)) {
+               trace_rcu_this_gp(rnp, rdp, rnp->gp_seq_needed,
                                  TPS("CleanupMore"));
                needgp = true;
        }
        /* Advance CBs to reduce false positives below. */
        if (!rcu_accelerate_cbs(rsp, rnp, rdp) && needgp) {
                WRITE_ONCE(rsp->gp_flags, RCU_GP_FLAG_INIT);
-               trace_rcu_grace_period(rsp->name, READ_ONCE(rsp->gpnum),
+               rsp->gp_req_activity = jiffies;
+               trace_rcu_grace_period(rsp->name, READ_ONCE(rsp->gp_seq),
                                       TPS("newreq"));
+       } else {
+               WRITE_ONCE(rsp->gp_flags, rsp->gp_flags & RCU_GP_FLAG_INIT);
        }
-       WRITE_ONCE(rsp->gp_flags, rsp->gp_flags & RCU_GP_FLAG_INIT);
        raw_spin_unlock_irq_rcu_node(rnp);
 }
 
@@ -2141,10 +2160,10 @@ static int __noreturn rcu_gp_kthread(void *arg)
                /* Handle grace-period start. */
                for (;;) {
                        trace_rcu_grace_period(rsp->name,
-                                              READ_ONCE(rsp->gpnum),
+                                              READ_ONCE(rsp->gp_seq),
                                               TPS("reqwait"));
                        rsp->gp_state = RCU_GP_WAIT_GPS;
-                       swait_event_idle(rsp->gp_wq, READ_ONCE(rsp->gp_flags) &
+                       swait_event_idle_exclusive(rsp->gp_wq, READ_ONCE(rsp->gp_flags) &
                                                     RCU_GP_FLAG_INIT);
                        rsp->gp_state = RCU_GP_DONE_GPS;
                        /* Locking provides needed memory barrier. */
@@ -2154,17 +2173,13 @@ static int __noreturn rcu_gp_kthread(void *arg)
                        WRITE_ONCE(rsp->gp_activity, jiffies);
                        WARN_ON(signal_pending(current));
                        trace_rcu_grace_period(rsp->name,
-                                              READ_ONCE(rsp->gpnum),
+                                              READ_ONCE(rsp->gp_seq),
                                               TPS("reqwaitsig"));
                }
 
                /* Handle quiescent-state forcing. */
                first_gp_fqs = true;
                j = jiffies_till_first_fqs;
-               if (j > HZ) {
-                       j = HZ;
-                       jiffies_till_first_fqs = HZ;
-               }
                ret = 0;
                for (;;) {
                        if (!ret) {
@@ -2173,10 +2188,10 @@ static int __noreturn rcu_gp_kthread(void *arg)
                                           jiffies + 3 * j);
                        }
                        trace_rcu_grace_period(rsp->name,
-                                              READ_ONCE(rsp->gpnum),
+                                              READ_ONCE(rsp->gp_seq),
                                               TPS("fqswait"));
                        rsp->gp_state = RCU_GP_WAIT_FQS;
-                       ret = swait_event_idle_timeout(rsp->gp_wq,
+                       ret = swait_event_idle_timeout_exclusive(rsp->gp_wq,
                                        rcu_gp_fqs_check_wake(rsp, &gf), j);
                        rsp->gp_state = RCU_GP_DOING_FQS;
                        /* Locking provides needed memory barriers. */
@@ -2188,31 +2203,24 @@ static int __noreturn rcu_gp_kthread(void *arg)
                        if (ULONG_CMP_GE(jiffies, rsp->jiffies_force_qs) ||
                            (gf & RCU_GP_FLAG_FQS)) {
                                trace_rcu_grace_period(rsp->name,
-                                                      READ_ONCE(rsp->gpnum),
+                                                      READ_ONCE(rsp->gp_seq),
                                                       TPS("fqsstart"));
                                rcu_gp_fqs(rsp, first_gp_fqs);
                                first_gp_fqs = false;
                                trace_rcu_grace_period(rsp->name,
-                                                      READ_ONCE(rsp->gpnum),
+                                                      READ_ONCE(rsp->gp_seq),
                                                       TPS("fqsend"));
                                cond_resched_tasks_rcu_qs();
                                WRITE_ONCE(rsp->gp_activity, jiffies);
                                ret = 0; /* Force full wait till next FQS. */
                                j = jiffies_till_next_fqs;
-                               if (j > HZ) {
-                                       j = HZ;
-                                       jiffies_till_next_fqs = HZ;
-                               } else if (j < 1) {
-                                       j = 1;
-                                       jiffies_till_next_fqs = 1;
-                               }
                        } else {
                                /* Deal with stray signal. */
                                cond_resched_tasks_rcu_qs();
                                WRITE_ONCE(rsp->gp_activity, jiffies);
                                WARN_ON(signal_pending(current));
                                trace_rcu_grace_period(rsp->name,
-                                                      READ_ONCE(rsp->gpnum),
+                                                      READ_ONCE(rsp->gp_seq),
                                                       TPS("fqswaitsig"));
                                ret = 1; /* Keep old FQS timing. */
                                j = jiffies;
@@ -2256,8 +2264,12 @@ static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags)
  * must be represented by the same rcu_node structure (which need not be a
  * leaf rcu_node structure, though it often will be).  The gps parameter
  * is the grace-period snapshot, which means that the quiescent states
- * are valid only if rnp->gpnum is equal to gps.  That structure's lock
+ * are valid only if rnp->gp_seq is equal to gps.  That structure's lock
  * must be held upon entry, and it is released before return.
+ *
+ * As a special case, if mask is zero, the bit-already-cleared check is
+ * disabled.  This allows propagating quiescent state due to resumed tasks
+ * during grace-period initialization.
  */
 static void
 rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
@@ -2271,7 +2283,7 @@ rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
 
        /* Walk up the rcu_node hierarchy. */
        for (;;) {
-               if (!(rnp->qsmask & mask) || rnp->gpnum != gps) {
+               if ((!(rnp->qsmask & mask) && mask) || rnp->gp_seq != gps) {
 
                        /*
                         * Our bit has already been cleared, or the
@@ -2284,7 +2296,7 @@ rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
                WARN_ON_ONCE(!rcu_is_leaf_node(rnp) &&
                             rcu_preempt_blocked_readers_cgp(rnp));
                rnp->qsmask &= ~mask;
-               trace_rcu_quiescent_state_report(rsp->name, rnp->gpnum,
+               trace_rcu_quiescent_state_report(rsp->name, rnp->gp_seq,
                                                 mask, rnp->qsmask, rnp->level,
                                                 rnp->grplo, rnp->grphi,
                                                 !!rnp->gp_tasks);
@@ -2294,6 +2306,7 @@ rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
                        raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
                        return;
                }
+               rnp->completedqs = rnp->gp_seq;
                mask = rnp->grpmask;
                if (rnp->parent == NULL) {
 
@@ -2323,8 +2336,9 @@ rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
  * irqs disabled, and this lock is released upon return, but irqs remain
  * disabled.
  */
-static void rcu_report_unblock_qs_rnp(struct rcu_state *rsp,
-                                     struct rcu_node *rnp, unsigned long flags)
+static void __maybe_unused
+rcu_report_unblock_qs_rnp(struct rcu_state *rsp,
+                         struct rcu_node *rnp, unsigned long flags)
        __releases(rnp->lock)
 {
        unsigned long gps;
@@ -2332,12 +2346,15 @@ static void rcu_report_unblock_qs_rnp(struct rcu_state *rsp,
        struct rcu_node *rnp_p;
 
        raw_lockdep_assert_held_rcu_node(rnp);
-       if (rcu_state_p == &rcu_sched_state || rsp != rcu_state_p ||
-           rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
+       if (WARN_ON_ONCE(rcu_state_p == &rcu_sched_state) ||
+           WARN_ON_ONCE(rsp != rcu_state_p) ||
+           WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)) ||
+           rnp->qsmask != 0) {
                raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
                return;  /* Still need more quiescent states! */
        }
 
+       rnp->completedqs = rnp->gp_seq;
        rnp_p = rnp->parent;
        if (rnp_p == NULL) {
                /*
@@ -2348,8 +2365,8 @@ static void rcu_report_unblock_qs_rnp(struct rcu_state *rsp,
                return;
        }
 
-       /* Report up the rest of the hierarchy, tracking current ->gpnum. */
-       gps = rnp->gpnum;
+       /* Report up the rest of the hierarchy, tracking current ->gp_seq. */
+       gps = rnp->gp_seq;
        mask = rnp->grpmask;
        raw_spin_unlock_rcu_node(rnp);  /* irqs remain disabled. */
        raw_spin_lock_rcu_node(rnp_p);  /* irqs already disabled. */
@@ -2370,8 +2387,8 @@ rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp)
 
        rnp = rdp->mynode;
        raw_spin_lock_irqsave_rcu_node(rnp, flags);
-       if (rdp->cpu_no_qs.b.norm || rdp->gpnum != rnp->gpnum ||
-           rnp->completed == rnp->gpnum || rdp->gpwrap) {
+       if (rdp->cpu_no_qs.b.norm || rdp->gp_seq != rnp->gp_seq ||
+           rdp->gpwrap) {
 
                /*
                 * The grace period in which this quiescent state was
@@ -2396,7 +2413,7 @@ rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp)
                 */
                needwake = rcu_accelerate_cbs(rsp, rnp, rdp);
 
-               rcu_report_qs_rnp(mask, rsp, rnp, rnp->gpnum, flags);
+               rcu_report_qs_rnp(mask, rsp, rnp, rnp->gp_seq, flags);
                /* ^^^ Released rnp->lock */
                if (needwake)
                        rcu_gp_kthread_wake(rsp);
@@ -2441,17 +2458,16 @@ rcu_check_quiescent_state(struct rcu_state *rsp, struct rcu_data *rdp)
  */
 static void rcu_cleanup_dying_cpu(struct rcu_state *rsp)
 {
-       RCU_TRACE(unsigned long mask;)
+       RCU_TRACE(bool blkd;)
        RCU_TRACE(struct rcu_data *rdp = this_cpu_ptr(rsp->rda);)
        RCU_TRACE(struct rcu_node *rnp = rdp->mynode;)
 
        if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
                return;
 
-       RCU_TRACE(mask = rdp->grpmask;)
-       trace_rcu_grace_period(rsp->name,
-                              rnp->gpnum + 1 - !!(rnp->qsmask & mask),
-                              TPS("cpuofl"));
+       RCU_TRACE(blkd = !!(rnp->qsmask & rdp->grpmask);)
+       trace_rcu_grace_period(rsp->name, rnp->gp_seq,
+                              blkd ? TPS("cpuofl") : TPS("cpuofl-bgp"));
 }
 
 /*
@@ -2463,7 +2479,7 @@ static void rcu_cleanup_dying_cpu(struct rcu_state *rsp)
  * This function therefore goes up the tree of rcu_node structures,
  * clearing the corresponding bits in the ->qsmaskinit fields.  Note that
  * the leaf rcu_node structure's ->qsmaskinit field has already been
- * updated
+ * updated.
  *
  * This function does check that the specified rcu_node structure has
  * all CPUs offline and no blocked tasks, so it is OK to invoke it
@@ -2476,9 +2492,10 @@ static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf)
        long mask;
        struct rcu_node *rnp = rnp_leaf;
 
-       raw_lockdep_assert_held_rcu_node(rnp);
+       raw_lockdep_assert_held_rcu_node(rnp_leaf);
        if (!IS_ENABLED(CONFIG_HOTPLUG_CPU) ||
-           rnp->qsmaskinit || rcu_preempt_has_tasks(rnp))
+           WARN_ON_ONCE(rnp_leaf->qsmaskinit) ||
+           WARN_ON_ONCE(rcu_preempt_has_tasks(rnp_leaf)))
                return;
        for (;;) {
                mask = rnp->grpmask;
@@ -2487,7 +2504,8 @@ static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf)
                        break;
                raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
                rnp->qsmaskinit &= ~mask;
-               rnp->qsmask &= ~mask;
+               /* Between grace periods, so better already be zero! */
+               WARN_ON_ONCE(rnp->qsmask);
                if (rnp->qsmaskinit) {
                        raw_spin_unlock_rcu_node(rnp);
                        /* irqs remain disabled. */
@@ -2630,6 +2648,7 @@ void rcu_check_callbacks(int user)
 
                rcu_sched_qs();
                rcu_bh_qs();
+               rcu_note_voluntary_context_switch(current);
 
        } else if (!in_softirq()) {
 
@@ -2645,8 +2664,7 @@ void rcu_check_callbacks(int user)
        rcu_preempt_check_callbacks();
        if (rcu_pending())
                invoke_rcu_core();
-       if (user)
-               rcu_note_voluntary_context_switch(current);
+
        trace_rcu_utilization(TPS("End scheduler-tick"));
 }
 
@@ -2681,17 +2699,8 @@ static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *rsp))
                                /* rcu_initiate_boost() releases rnp->lock */
                                continue;
                        }
-                       if (rnp->parent &&
-                           (rnp->parent->qsmask & rnp->grpmask)) {
-                               /*
-                                * Race between grace-period
-                                * initialization and task exiting RCU
-                                * read-side critical section: Report.
-                                */
-                               rcu_report_unblock_qs_rnp(rsp, rnp, flags);
-                               /* rcu_report_unblock_qs_rnp() rlses ->lock */
-                               continue;
-                       }
+                       raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
+                       continue;
                }
                for_each_leaf_node_possible_cpu(rnp, cpu) {
                        unsigned long bit = leaf_node_cpu_bit(rnp, cpu);
@@ -2701,8 +2710,8 @@ static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *rsp))
                        }
                }
                if (mask != 0) {
-                       /* Idle/offline CPUs, report (releases rnp->lock. */
-                       rcu_report_qs_rnp(mask, rsp, rnp, rnp->gpnum, flags);
+                       /* Idle/offline CPUs, report (releases rnp->lock). */
+                       rcu_report_qs_rnp(mask, rsp, rnp, rnp->gp_seq, flags);
                } else {
                        /* Nothing to do here, so just drop the lock. */
                        raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
@@ -2746,6 +2755,65 @@ static void force_quiescent_state(struct rcu_state *rsp)
        rcu_gp_kthread_wake(rsp);
 }
 
+/*
+ * This function checks for grace-period requests that fail to motivate
+ * RCU to come out of its idle mode.
+ */
+static void
+rcu_check_gp_start_stall(struct rcu_state *rsp, struct rcu_node *rnp,
+                        struct rcu_data *rdp)
+{
+       const unsigned long gpssdelay = rcu_jiffies_till_stall_check() * HZ;
+       unsigned long flags;
+       unsigned long j;
+       struct rcu_node *rnp_root = rcu_get_root(rsp);
+       static atomic_t warned = ATOMIC_INIT(0);
+
+       if (!IS_ENABLED(CONFIG_PROVE_RCU) || rcu_gp_in_progress(rsp) ||
+           ULONG_CMP_GE(rnp_root->gp_seq, rnp_root->gp_seq_needed))
+               return;
+       j = jiffies; /* Expensive access, and in common case don't get here. */
+       if (time_before(j, READ_ONCE(rsp->gp_req_activity) + gpssdelay) ||
+           time_before(j, READ_ONCE(rsp->gp_activity) + gpssdelay) ||
+           atomic_read(&warned))
+               return;
+
+       raw_spin_lock_irqsave_rcu_node(rnp, flags);
+       j = jiffies;
+       if (rcu_gp_in_progress(rsp) ||
+           ULONG_CMP_GE(rnp_root->gp_seq, rnp_root->gp_seq_needed) ||
+           time_before(j, READ_ONCE(rsp->gp_req_activity) + gpssdelay) ||
+           time_before(j, READ_ONCE(rsp->gp_activity) + gpssdelay) ||
+           atomic_read(&warned)) {
+               raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
+               return;
+       }
+       /* Hold onto the leaf lock to make others see warned==1. */
+
+       if (rnp_root != rnp)
+               raw_spin_lock_rcu_node(rnp_root); /* irqs already disabled. */
+       j = jiffies;
+       if (rcu_gp_in_progress(rsp) ||
+           ULONG_CMP_GE(rnp_root->gp_seq, rnp_root->gp_seq_needed) ||
+           time_before(j, rsp->gp_req_activity + gpssdelay) ||
+           time_before(j, rsp->gp_activity + gpssdelay) ||
+           atomic_xchg(&warned, 1)) {
+               raw_spin_unlock_rcu_node(rnp_root); /* irqs remain disabled. */
+               raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
+               return;
+       }
+       pr_alert("%s: g%ld->%ld gar:%lu ga:%lu f%#x gs:%d %s->state:%#lx\n",
+                __func__, (long)READ_ONCE(rsp->gp_seq),
+                (long)READ_ONCE(rnp_root->gp_seq_needed),
+                j - rsp->gp_req_activity, j - rsp->gp_activity,
+                rsp->gp_flags, rsp->gp_state, rsp->name,
+                rsp->gp_kthread ? rsp->gp_kthread->state : 0x1ffffL);
+       WARN_ON(1);
+       if (rnp_root != rnp)
+               raw_spin_unlock_rcu_node(rnp_root);
+       raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
+}
+
 /*
  * This does the RCU core processing work for the specified rcu_state
  * and rcu_data structures.  This may be called only from the CPU to
@@ -2755,9 +2823,8 @@ static void
 __rcu_process_callbacks(struct rcu_state *rsp)
 {
        unsigned long flags;
-       bool needwake;
        struct rcu_data *rdp = raw_cpu_ptr(rsp->rda);
-       struct rcu_node *rnp;
+       struct rcu_node *rnp = rdp->mynode;
 
        WARN_ON_ONCE(!rdp->beenonline);
 
@@ -2768,18 +2835,13 @@ __rcu_process_callbacks(struct rcu_state *rsp)
        if (!rcu_gp_in_progress(rsp) &&
            rcu_segcblist_is_enabled(&rdp->cblist)) {
                local_irq_save(flags);
-               if (rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL)) {
-                       local_irq_restore(flags);
-               } else {
-                       rnp = rdp->mynode;
-                       raw_spin_lock_rcu_node(rnp); /* irqs disabled. */
-                       needwake = rcu_accelerate_cbs(rsp, rnp, rdp);
-                       raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
-                       if (needwake)
-                               rcu_gp_kthread_wake(rsp);
-               }
+               if (!rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL))
+                       rcu_accelerate_cbs_unlocked(rsp, rnp, rdp);
+               local_irq_restore(flags);
        }
 
+       rcu_check_gp_start_stall(rsp, rnp, rdp);
+
        /* If there are callbacks ready, invoke them. */
        if (rcu_segcblist_ready_cbs(&rdp->cblist))
                invoke_rcu_callbacks(rsp, rdp);
@@ -2833,8 +2895,6 @@ static void invoke_rcu_core(void)
 static void __call_rcu_core(struct rcu_state *rsp, struct rcu_data *rdp,
                            struct rcu_head *head, unsigned long flags)
 {
-       bool needwake;
-
        /*
         * If called from an extended quiescent state, invoke the RCU
         * core in order to force a re-evaluation of RCU's idleness.
@@ -2861,13 +2921,7 @@ static void __call_rcu_core(struct rcu_state *rsp, struct rcu_data *rdp,
 
                /* Start a new grace period if one not already started. */
                if (!rcu_gp_in_progress(rsp)) {
-                       struct rcu_node *rnp = rdp->mynode;
-
-                       raw_spin_lock_rcu_node(rnp);
-                       needwake = rcu_accelerate_cbs(rsp, rnp, rdp);
-                       raw_spin_unlock_rcu_node(rnp);
-                       if (needwake)
-                               rcu_gp_kthread_wake(rsp);
+                       rcu_accelerate_cbs_unlocked(rsp, rdp->mynode, rdp);
                } else {
                        /* Give the grace period a kick. */
                        rdp->blimit = LONG_MAX;
@@ -3037,7 +3091,7 @@ EXPORT_SYMBOL_GPL(kfree_call_rcu);
  * when there was in fact only one the whole time, as this just adds
  * some overhead: RCU still operates correctly.
  */
-static inline int rcu_blocking_is_gp(void)
+static int rcu_blocking_is_gp(void)
 {
        int ret;
 
@@ -3136,16 +3190,10 @@ unsigned long get_state_synchronize_rcu(void)
 {
        /*
         * Any prior manipulation of RCU-protected data must happen
-        * before the load from ->gpnum.
+        * before the load from ->gp_seq.
         */
        smp_mb();  /* ^^^ */
-
-       /*
-        * Make sure this load happens before the purportedly
-        * time-consuming work between get_state_synchronize_rcu()
-        * and cond_synchronize_rcu().
-        */
-       return smp_load_acquire(&rcu_state_p->gpnum);
+       return rcu_seq_snap(&rcu_state_p->gp_seq);
 }
 EXPORT_SYMBOL_GPL(get_state_synchronize_rcu);
 
@@ -3165,15 +3213,10 @@ EXPORT_SYMBOL_GPL(get_state_synchronize_rcu);
  */
 void cond_synchronize_rcu(unsigned long oldstate)
 {
-       unsigned long newstate;
-
-       /*
-        * Ensure that this load happens before any RCU-destructive
-        * actions the caller might carry out after we return.
-        */
-       newstate = smp_load_acquire(&rcu_state_p->completed);
-       if (ULONG_CMP_GE(oldstate, newstate))
+       if (!rcu_seq_done(&rcu_state_p->gp_seq, oldstate))
                synchronize_rcu();
+       else
+               smp_mb(); /* Ensure GP ends before subsequent accesses. */
 }
 EXPORT_SYMBOL_GPL(cond_synchronize_rcu);
 
@@ -3188,16 +3231,10 @@ unsigned long get_state_synchronize_sched(void)
 {
        /*
         * Any prior manipulation of RCU-protected data must happen
-        * before the load from ->gpnum.
+        * before the load from ->gp_seq.
         */
        smp_mb();  /* ^^^ */
-
-       /*
-        * Make sure this load happens before the purportedly
-        * time-consuming work between get_state_synchronize_sched()
-        * and cond_synchronize_sched().
-        */
-       return smp_load_acquire(&rcu_sched_state.gpnum);
+       return rcu_seq_snap(&rcu_sched_state.gp_seq);
 }
 EXPORT_SYMBOL_GPL(get_state_synchronize_sched);
 
@@ -3217,15 +3254,10 @@ EXPORT_SYMBOL_GPL(get_state_synchronize_sched);
  */
 void cond_synchronize_sched(unsigned long oldstate)
 {
-       unsigned long newstate;
-
-       /*
-        * Ensure that this load happens before any RCU-destructive
-        * actions the caller might carry out after we return.
-        */
-       newstate = smp_load_acquire(&rcu_sched_state.completed);
-       if (ULONG_CMP_GE(oldstate, newstate))
+       if (!rcu_seq_done(&rcu_sched_state.gp_seq, oldstate))
                synchronize_sched();
+       else
+               smp_mb(); /* Ensure GP ends before subsequent accesses. */
 }
 EXPORT_SYMBOL_GPL(cond_synchronize_sched);
 
@@ -3261,12 +3293,8 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
            !rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL))
                return 1;
 
-       /* Has another RCU grace period completed?  */
-       if (READ_ONCE(rnp->completed) != rdp->completed) /* outside lock */
-               return 1;
-
-       /* Has a new RCU grace period started? */
-       if (READ_ONCE(rnp->gpnum) != rdp->gpnum ||
+       /* Have RCU grace period completed or started?  */
+       if (rcu_seq_current(&rnp->gp_seq) != rdp->gp_seq ||
            unlikely(READ_ONCE(rdp->gpwrap))) /* outside lock */
                return 1;
 
@@ -3298,7 +3326,7 @@ static int rcu_pending(void)
  * non-NULL, store an indication of whether all callbacks are lazy.
  * (If there are no callbacks, all of them are deemed to be lazy.)
  */
-static bool __maybe_unused rcu_cpu_has_callbacks(bool *all_lazy)
+static bool rcu_cpu_has_callbacks(bool *all_lazy)
 {
        bool al = true;
        bool hc = false;
@@ -3484,17 +3512,22 @@ EXPORT_SYMBOL_GPL(rcu_barrier_sched);
 static void rcu_init_new_rnp(struct rcu_node *rnp_leaf)
 {
        long mask;
+       long oldmask;
        struct rcu_node *rnp = rnp_leaf;
 
-       raw_lockdep_assert_held_rcu_node(rnp);
+       raw_lockdep_assert_held_rcu_node(rnp_leaf);
+       WARN_ON_ONCE(rnp->wait_blkd_tasks);
        for (;;) {
                mask = rnp->grpmask;
                rnp = rnp->parent;
                if (rnp == NULL)
                        return;
                raw_spin_lock_rcu_node(rnp); /* Interrupts already disabled. */
+               oldmask = rnp->qsmaskinit;
                rnp->qsmaskinit |= mask;
                raw_spin_unlock_rcu_node(rnp); /* Interrupts remain disabled. */
+               if (oldmask)
+                       return;
        }
 }
 
@@ -3511,6 +3544,10 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
        rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
        WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != 1);
        WARN_ON_ONCE(rcu_dynticks_in_eqs(rcu_dynticks_snap(rdp->dynticks)));
+       rdp->rcu_ofl_gp_seq = rsp->gp_seq;
+       rdp->rcu_ofl_gp_flags = RCU_GP_CLEANED;
+       rdp->rcu_onl_gp_seq = rsp->gp_seq;
+       rdp->rcu_onl_gp_flags = RCU_GP_CLEANED;
        rdp->cpu = cpu;
        rdp->rsp = rsp;
        rcu_boot_init_nocb_percpu_data(rdp);
@@ -3518,9 +3555,9 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
 
 /*
  * Initialize a CPU's per-CPU RCU data.  Note that only one online or
- * offline event can be happening at a given time.  Note also that we
- * can accept some slop in the rsp->completed access due to the fact
- * that this CPU cannot possibly have any RCU callbacks in flight yet.
+ * offline event can be happening at a given time.  Note also that we can
+ * accept some slop in the rsp->gp_seq access due to the fact that this
+ * CPU cannot possibly have any RCU callbacks in flight yet.
  */
 static void
 rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
@@ -3549,14 +3586,14 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
        rnp = rdp->mynode;
        raw_spin_lock_rcu_node(rnp);            /* irqs already disabled. */
        rdp->beenonline = true;  /* We have now been online. */
-       rdp->gpnum = rnp->completed; /* Make CPU later note any new GP. */
-       rdp->completed = rnp->completed;
+       rdp->gp_seq = rnp->gp_seq;
+       rdp->gp_seq_needed = rnp->gp_seq;
        rdp->cpu_no_qs.b.norm = true;
        rdp->rcu_qs_ctr_snap = per_cpu(rcu_dynticks.rcu_qs_ctr, cpu);
        rdp->core_needs_qs = false;
        rdp->rcu_iw_pending = false;
-       rdp->rcu_iw_gpnum = rnp->gpnum - 1;
-       trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuonl"));
+       rdp->rcu_iw_gp_seq = rnp->gp_seq - 1;
+       trace_rcu_grace_period(rsp->name, rdp->gp_seq, TPS("cpuonl"));
        raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 }
 
@@ -3705,7 +3742,15 @@ void rcu_cpu_starting(unsigned int cpu)
                nbits = bitmap_weight(&oldmask, BITS_PER_LONG);
                /* Allow lockless access for expedited grace periods. */
                smp_store_release(&rsp->ncpus, rsp->ncpus + nbits); /* ^^^ */
-               raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
+               rcu_gpnum_ovf(rnp, rdp); /* Offline-induced counter wrap? */
+               rdp->rcu_onl_gp_seq = READ_ONCE(rsp->gp_seq);
+               rdp->rcu_onl_gp_flags = READ_ONCE(rsp->gp_flags);
+               if (rnp->qsmask & mask) { /* RCU waiting on incoming CPU? */
+                       /* Report QS -after- changing ->qsmaskinitnext! */
+                       rcu_report_qs_rnp(mask, rsp, rnp, rnp->gp_seq, flags);
+               } else {
+                       raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
+               }
        }
        smp_mb(); /* Ensure RCU read-side usage follows above initialization. */
 }
@@ -3713,7 +3758,7 @@ void rcu_cpu_starting(unsigned int cpu)
 #ifdef CONFIG_HOTPLUG_CPU
 /*
  * The CPU is exiting the idle loop into the arch_cpu_idle_dead()
- * function.  We now remove it from the rcu_node tree's ->qsmaskinit
+ * function.  We now remove it from the rcu_node tree's ->qsmaskinitnext
  * bit masks.
  */
 static void rcu_cleanup_dying_idle_cpu(int cpu, struct rcu_state *rsp)
@@ -3725,9 +3770,18 @@ static void rcu_cleanup_dying_idle_cpu(int cpu, struct rcu_state *rsp)
 
        /* Remove outgoing CPU from mask in the leaf rcu_node structure. */
        mask = rdp->grpmask;
+       spin_lock(&rsp->ofl_lock);
        raw_spin_lock_irqsave_rcu_node(rnp, flags); /* Enforce GP memory-order guarantee. */
+       rdp->rcu_ofl_gp_seq = READ_ONCE(rsp->gp_seq);
+       rdp->rcu_ofl_gp_flags = READ_ONCE(rsp->gp_flags);
+       if (rnp->qsmask & mask) { /* RCU waiting on outgoing CPU? */
+               /* Report quiescent state -before- changing ->qsmaskinitnext! */
+               rcu_report_qs_rnp(mask, rsp, rnp, rnp->gp_seq, flags);
+               raw_spin_lock_irqsave_rcu_node(rnp, flags);
+       }
        rnp->qsmaskinitnext &= ~mask;
        raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
+       spin_unlock(&rsp->ofl_lock);
 }
 
 /*
@@ -3839,12 +3893,16 @@ static int __init rcu_spawn_gp_kthread(void)
        struct task_struct *t;
 
        /* Force priority into range. */
-       if (IS_ENABLED(CONFIG_RCU_BOOST) && kthread_prio < 1)
+       if (IS_ENABLED(CONFIG_RCU_BOOST) && kthread_prio < 2
+           && IS_BUILTIN(CONFIG_RCU_TORTURE_TEST))
+               kthread_prio = 2;
+       else if (IS_ENABLED(CONFIG_RCU_BOOST) && kthread_prio < 1)
                kthread_prio = 1;
        else if (kthread_prio < 0)
                kthread_prio = 0;
        else if (kthread_prio > 99)
                kthread_prio = 99;
+
        if (kthread_prio != kthread_prio_in)
                pr_alert("rcu_spawn_gp_kthread(): Limited prio to %d from %d\n",
                         kthread_prio, kthread_prio_in);
@@ -3928,8 +3986,9 @@ static void __init rcu_init_one(struct rcu_state *rsp)
                        raw_spin_lock_init(&rnp->fqslock);
                        lockdep_set_class_and_name(&rnp->fqslock,
                                                   &rcu_fqs_class[i], fqs[i]);
-                       rnp->gpnum = rsp->gpnum;
-                       rnp->completed = rsp->completed;
+                       rnp->gp_seq = rsp->gp_seq;
+                       rnp->gp_seq_needed = rsp->gp_seq;
+                       rnp->completedqs = rsp->gp_seq;
                        rnp->qsmask = 0;
                        rnp->qsmaskinit = 0;
                        rnp->grplo = j * cpustride;
@@ -3997,7 +4056,7 @@ static void __init rcu_init_geometry(void)
        if (rcu_fanout_leaf == RCU_FANOUT_LEAF &&
            nr_cpu_ids == NR_CPUS)
                return;
-       pr_info("RCU: Adjusting geometry for rcu_fanout_leaf=%d, nr_cpu_ids=%u\n",
+       pr_info("Adjusting geometry for rcu_fanout_leaf=%d, nr_cpu_ids=%u\n",
                rcu_fanout_leaf, nr_cpu_ids);
 
        /*
index 78e051dffc5bf6aab3e56b8d5966996269eaf1a3..4e74df768c579d9fe356081b0f421bdbab731275 100644 (file)
@@ -81,18 +81,16 @@ struct rcu_node {
        raw_spinlock_t __private lock;  /* Root rcu_node's lock protects */
                                        /*  some rcu_state fields as well as */
                                        /*  following. */
-       unsigned long gpnum;    /* Current grace period for this node. */
-                               /*  This will either be equal to or one */
-                               /*  behind the root rcu_node's gpnum. */
-       unsigned long completed; /* Last GP completed for this node. */
-                               /*  This will either be equal to or one */
-                               /*  behind the root rcu_node's gpnum. */
+       unsigned long gp_seq;   /* Track rsp->rcu_gp_seq. */
+       unsigned long gp_seq_needed; /* Track rsp->rcu_gp_seq_needed. */
+       unsigned long completedqs; /* All QSes done for this node. */
        unsigned long qsmask;   /* CPUs or groups that need to switch in */
                                /*  order for current grace period to proceed.*/
                                /*  In leaf rcu_node, each bit corresponds to */
                                /*  an rcu_data structure, otherwise, each */
                                /*  bit corresponds to a child rcu_node */
                                /*  structure. */
+       unsigned long rcu_gp_init_mask; /* Mask of offline CPUs at GP init. */
        unsigned long qsmaskinit;
                                /* Per-GP initial value for qsmask. */
                                /*  Initialized from ->qsmaskinitnext at the */
@@ -158,7 +156,6 @@ struct rcu_node {
        struct swait_queue_head nocb_gp_wq[2];
                                /* Place for rcu_nocb_kthread() to wait GP. */
 #endif /* #ifdef CONFIG_RCU_NOCB_CPU */
-       u8 need_future_gp[4];   /* Counts of upcoming GP requests. */
        raw_spinlock_t fqslock ____cacheline_internodealigned_in_smp;
 
        spinlock_t exp_lock ____cacheline_internodealigned_in_smp;
@@ -168,22 +165,6 @@ struct rcu_node {
        bool exp_need_flush;    /* Need to flush workitem? */
 } ____cacheline_internodealigned_in_smp;
 
-/* Accessors for ->need_future_gp[] array. */
-#define need_future_gp_mask() \
-       (ARRAY_SIZE(((struct rcu_node *)NULL)->need_future_gp) - 1)
-#define need_future_gp_element(rnp, c) \
-       ((rnp)->need_future_gp[(c) & need_future_gp_mask()])
-#define need_any_future_gp(rnp)                                                \
-({                                                                     \
-       int __i;                                                        \
-       bool __nonzero = false;                                         \
-                                                                       \
-       for (__i = 0; __i < ARRAY_SIZE((rnp)->need_future_gp); __i++)   \
-               __nonzero = __nonzero ||                                \
-                           READ_ONCE((rnp)->need_future_gp[__i]);      \
-       __nonzero;                                                      \
-})
-
 /*
  * Bitmasks in an rcu_node cover the interval [grplo, grphi] of CPU IDs, and
  * are indexed relative to this interval rather than the global CPU ID space.
@@ -206,16 +187,14 @@ union rcu_noqs {
 /* Per-CPU data for read-copy update. */
 struct rcu_data {
        /* 1) quiescent-state and grace-period handling : */
-       unsigned long   completed;      /* Track rsp->completed gp number */
-                                       /*  in order to detect GP end. */
-       unsigned long   gpnum;          /* Highest gp number that this CPU */
-                                       /*  is aware of having started. */
+       unsigned long   gp_seq;         /* Track rsp->rcu_gp_seq counter. */
+       unsigned long   gp_seq_needed;  /* Track rsp->rcu_gp_seq_needed ctr. */
        unsigned long   rcu_qs_ctr_snap;/* Snapshot of rcu_qs_ctr to check */
                                        /*  for rcu_all_qs() invocations. */
        union rcu_noqs  cpu_no_qs;      /* No QSes yet for this CPU. */
        bool            core_needs_qs;  /* Core waits for quiesc state. */
        bool            beenonline;     /* CPU online at least once. */
-       bool            gpwrap;         /* Possible gpnum/completed wrap. */
+       bool            gpwrap;         /* Possible ->gp_seq wrap. */
        struct rcu_node *mynode;        /* This CPU's leaf of hierarchy */
        unsigned long grpmask;          /* Mask to apply to leaf qsmask. */
        unsigned long   ticks_this_gp;  /* The number of scheduling-clock */
@@ -239,7 +218,6 @@ struct rcu_data {
 
        /* 4) reasons this CPU needed to be kicked by force_quiescent_state */
        unsigned long dynticks_fqs;     /* Kicked due to dynticks idle. */
-       unsigned long offline_fqs;      /* Kicked due to being offline. */
        unsigned long cond_resched_completed;
                                        /* Grace period that needs help */
                                        /*  from cond_resched(). */
@@ -278,12 +256,16 @@ struct rcu_data {
                                        /* Leader CPU takes GP-end wakeups. */
 #endif /* #ifdef CONFIG_RCU_NOCB_CPU */
 
-       /* 7) RCU CPU stall data. */
+       /* 7) Diagnostic data, including RCU CPU stall warnings. */
        unsigned int softirq_snap;      /* Snapshot of softirq activity. */
        /* ->rcu_iw* fields protected by leaf rcu_node ->lock. */
        struct irq_work rcu_iw;         /* Check for non-irq activity. */
        bool rcu_iw_pending;            /* Is ->rcu_iw pending? */
-       unsigned long rcu_iw_gpnum;     /* ->gpnum associated with ->rcu_iw. */
+       unsigned long rcu_iw_gp_seq;    /* ->gp_seq associated with ->rcu_iw. */
+       unsigned long rcu_ofl_gp_seq;   /* ->gp_seq at last offline. */
+       short rcu_ofl_gp_flags;         /* ->gp_flags at last offline. */
+       unsigned long rcu_onl_gp_seq;   /* ->gp_seq at last online. */
+       short rcu_onl_gp_flags;         /* ->gp_flags at last online. */
 
        int cpu;
        struct rcu_state *rsp;
@@ -340,8 +322,7 @@ struct rcu_state {
 
        u8      boost ____cacheline_internodealigned_in_smp;
                                                /* Subject to priority boost. */
-       unsigned long gpnum;                    /* Current gp number. */
-       unsigned long completed;                /* # of last completed gp. */
+       unsigned long gp_seq;                   /* Grace-period sequence #. */
        struct task_struct *gp_kthread;         /* Task for grace periods. */
        struct swait_queue_head gp_wq;          /* Where GP task waits. */
        short gp_flags;                         /* Commands for GP task. */
@@ -373,6 +354,8 @@ struct rcu_state {
                                                /*  but in jiffies. */
        unsigned long gp_activity;              /* Time of last GP kthread */
                                                /*  activity in jiffies. */
+       unsigned long gp_req_activity;          /* Time of last GP request */
+                                               /*  in jiffies. */
        unsigned long jiffies_stall;            /* Time at which to check */
                                                /*  for CPU stalls. */
        unsigned long jiffies_resched;          /* Time at which to resched */
@@ -384,6 +367,10 @@ struct rcu_state {
        const char *name;                       /* Name of structure. */
        char abbr;                              /* Abbreviated name. */
        struct list_head flavors;               /* List of RCU flavors. */
+
+       spinlock_t ofl_lock ____cacheline_internodealigned_in_smp;
+                                               /* Synchronize offline with */
+                                               /*  GP pre-initialization. */
 };
 
 /* Values for rcu_state structure's gp_flags field. */
@@ -394,16 +381,20 @@ struct rcu_state {
 #define RCU_GP_IDLE     0      /* Initial state and no GP in progress. */
 #define RCU_GP_WAIT_GPS  1     /* Wait for grace-period start. */
 #define RCU_GP_DONE_GPS  2     /* Wait done for grace-period start. */
-#define RCU_GP_WAIT_FQS  3     /* Wait for force-quiescent-state time. */
-#define RCU_GP_DOING_FQS 4     /* Wait done for force-quiescent-state time. */
-#define RCU_GP_CLEANUP   5     /* Grace-period cleanup started. */
-#define RCU_GP_CLEANED   6     /* Grace-period cleanup complete. */
+#define RCU_GP_ONOFF     3     /* Grace-period initialization hotplug. */
+#define RCU_GP_INIT      4     /* Grace-period initialization. */
+#define RCU_GP_WAIT_FQS  5     /* Wait for force-quiescent-state time. */
+#define RCU_GP_DOING_FQS 6     /* Wait done for force-quiescent-state time. */
+#define RCU_GP_CLEANUP   7     /* Grace-period cleanup started. */
+#define RCU_GP_CLEANED   8     /* Grace-period cleanup complete. */
 
 #ifndef RCU_TREE_NONCORE
 static const char * const gp_state_names[] = {
        "RCU_GP_IDLE",
        "RCU_GP_WAIT_GPS",
        "RCU_GP_DONE_GPS",
+       "RCU_GP_ONOFF",
+       "RCU_GP_INIT",
        "RCU_GP_WAIT_FQS",
        "RCU_GP_DOING_FQS",
        "RCU_GP_CLEANUP",
@@ -449,10 +440,13 @@ static bool rcu_preempt_has_tasks(struct rcu_node *rnp);
 static void rcu_print_detail_task_stall(struct rcu_state *rsp);
 static int rcu_print_task_stall(struct rcu_node *rnp);
 static int rcu_print_task_exp_stall(struct rcu_node *rnp);
-static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp);
+static void rcu_preempt_check_blocked_tasks(struct rcu_state *rsp,
+                                           struct rcu_node *rnp);
 static void rcu_preempt_check_callbacks(void);
 void call_rcu(struct rcu_head *head, rcu_callback_t func);
 static void __init __rcu_init_preempt(void);
+static void dump_blkd_tasks(struct rcu_state *rsp, struct rcu_node *rnp,
+                           int ncheck);
 static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags);
 static void rcu_preempt_boost_start_gp(struct rcu_node *rnp);
 static void invoke_rcu_callbacks_kthread(void);
@@ -489,7 +483,6 @@ static void __init rcu_spawn_nocb_kthreads(void);
 #ifdef CONFIG_RCU_NOCB_CPU
 static void __init rcu_organize_nocb_kthreads(struct rcu_state *rsp);
 #endif /* #ifdef CONFIG_RCU_NOCB_CPU */
-static void __maybe_unused rcu_kick_nohz_cpu(int cpu);
 static bool init_nocb_callback_list(struct rcu_data *rdp);
 static void rcu_bind_gp_kthread(void);
 static bool rcu_nohz_full_cpu(struct rcu_state *rsp);
index d40708e8c5d6e1f234ad508aa2cf60afa3e4e83c..0b2c2ad69629cdc5ee4fd74984d90179a9f7eafd 100644 (file)
@@ -212,7 +212,7 @@ static void __rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
                        raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
                        if (wake) {
                                smp_mb(); /* EGP done before wake_up(). */
-                               swake_up(&rsp->expedited_wq);
+                               swake_up_one(&rsp->expedited_wq);
                        }
                        break;
                }
@@ -472,6 +472,7 @@ retry_ipi:
 static void sync_rcu_exp_select_cpus(struct rcu_state *rsp,
                                     smp_call_func_t func)
 {
+       int cpu;
        struct rcu_node *rnp;
 
        trace_rcu_exp_grace_period(rsp->name, rcu_exp_gp_seq_endval(rsp), TPS("reset"));
@@ -486,13 +487,20 @@ static void sync_rcu_exp_select_cpus(struct rcu_state *rsp,
                rnp->rew.rew_func = func;
                rnp->rew.rew_rsp = rsp;
                if (!READ_ONCE(rcu_par_gp_wq) ||
-                   rcu_scheduler_active != RCU_SCHEDULER_RUNNING) {
-                       /* No workqueues yet. */
+                   rcu_scheduler_active != RCU_SCHEDULER_RUNNING ||
+                   rcu_is_last_leaf_node(rsp, rnp)) {
+                       /* No workqueues yet or last leaf, do direct call. */
                        sync_rcu_exp_select_node_cpus(&rnp->rew.rew_work);
                        continue;
                }
                INIT_WORK(&rnp->rew.rew_work, sync_rcu_exp_select_node_cpus);
-               queue_work_on(rnp->grplo, rcu_par_gp_wq, &rnp->rew.rew_work);
+               preempt_disable();
+               cpu = cpumask_next(rnp->grplo - 1, cpu_online_mask);
+               /* If all offline, queue the work on an unbound CPU. */
+               if (unlikely(cpu > rnp->grphi))
+                       cpu = WORK_CPU_UNBOUND;
+               queue_work_on(cpu, rcu_par_gp_wq, &rnp->rew.rew_work);
+               preempt_enable();
                rnp->exp_need_flush = true;
        }
 
@@ -518,7 +526,7 @@ static void synchronize_sched_expedited_wait(struct rcu_state *rsp)
        jiffies_start = jiffies;
 
        for (;;) {
-               ret = swait_event_timeout(
+               ret = swait_event_timeout_exclusive(
                                rsp->expedited_wq,
                                sync_rcu_preempt_exp_done_unlocked(rnp_root),
                                jiffies_stall);
index 7fd12039e512664626590e0a188260403d4578cc..a97c20ea9bce65c7a6126effc271ce979f6e9f33 100644 (file)
@@ -74,8 +74,8 @@ static void __init rcu_bootup_announce_oddness(void)
                pr_info("\tRCU event tracing is enabled.\n");
        if ((IS_ENABLED(CONFIG_64BIT) && RCU_FANOUT != 64) ||
            (!IS_ENABLED(CONFIG_64BIT) && RCU_FANOUT != 32))
-               pr_info("\tCONFIG_RCU_FANOUT set to non-default value of %d\n",
-                      RCU_FANOUT);
+               pr_info("\tCONFIG_RCU_FANOUT set to non-default value of %d.\n",
+                       RCU_FANOUT);
        if (rcu_fanout_exact)
                pr_info("\tHierarchical RCU autobalancing is disabled.\n");
        if (IS_ENABLED(CONFIG_RCU_FAST_NO_HZ))
@@ -88,11 +88,13 @@ static void __init rcu_bootup_announce_oddness(void)
                pr_info("\tBuild-time adjustment of leaf fanout to %d.\n",
                        RCU_FANOUT_LEAF);
        if (rcu_fanout_leaf != RCU_FANOUT_LEAF)
-               pr_info("\tBoot-time adjustment of leaf fanout to %d.\n", rcu_fanout_leaf);
+               pr_info("\tBoot-time adjustment of leaf fanout to %d.\n",
+                       rcu_fanout_leaf);
        if (nr_cpu_ids != NR_CPUS)
                pr_info("\tRCU restricting CPUs from NR_CPUS=%d to nr_cpu_ids=%u.\n", NR_CPUS, nr_cpu_ids);
 #ifdef CONFIG_RCU_BOOST
-       pr_info("\tRCU priority boosting: priority %d delay %d ms.\n", kthread_prio, CONFIG_RCU_BOOST_DELAY);
+       pr_info("\tRCU priority boosting: priority %d delay %d ms.\n",
+               kthread_prio, CONFIG_RCU_BOOST_DELAY);
 #endif
        if (blimit != DEFAULT_RCU_BLIMIT)
                pr_info("\tBoot-time adjustment of callback invocation limit to %ld.\n", blimit);
@@ -127,6 +129,7 @@ static struct rcu_data __percpu *const rcu_data_p = &rcu_preempt_data;
 
 static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
                               bool wake);
+static void rcu_read_unlock_special(struct task_struct *t);
 
 /*
  * Tell them what RCU they are running.
@@ -183,6 +186,9 @@ static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp)
        raw_lockdep_assert_held_rcu_node(rnp);
        WARN_ON_ONCE(rdp->mynode != rnp);
        WARN_ON_ONCE(!rcu_is_leaf_node(rnp));
+       /* RCU better not be waiting on newly onlined CPUs! */
+       WARN_ON_ONCE(rnp->qsmaskinitnext & ~rnp->qsmaskinit & rnp->qsmask &
+                    rdp->grpmask);
 
        /*
         * Decide where to queue the newly blocked task.  In theory,
@@ -260,8 +266,10 @@ static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp)
         * ->exp_tasks pointers, respectively, to reference the newly
         * blocked tasks.
         */
-       if (!rnp->gp_tasks && (blkd_state & RCU_GP_BLKD))
+       if (!rnp->gp_tasks && (blkd_state & RCU_GP_BLKD)) {
                rnp->gp_tasks = &t->rcu_node_entry;
+               WARN_ON_ONCE(rnp->completedqs == rnp->gp_seq);
+       }
        if (!rnp->exp_tasks && (blkd_state & RCU_EXP_BLKD))
                rnp->exp_tasks = &t->rcu_node_entry;
        WARN_ON_ONCE(!(blkd_state & RCU_GP_BLKD) !=
@@ -286,20 +294,24 @@ static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp)
 }
 
 /*
- * Record a preemptible-RCU quiescent state for the specified CPU.  Note
- * that this just means that the task currently running on the CPU is
- * not in a quiescent state.  There might be any number of tasks blocked
- * while in an RCU read-side critical section.
+ * Record a preemptible-RCU quiescent state for the specified CPU.
+ * Note that this does not necessarily mean that the task currently running
+ * on the CPU is in a quiescent state:  Instead, it means that the current
+ * grace period need not wait on any RCU read-side critical section that
+ * starts later on this CPU.  It also means that if the current task is
+ * in an RCU read-side critical section, it has already added itself to
+ * some leaf rcu_node structure's ->blkd_tasks list.  In addition to the
+ * current task, there might be any number of other tasks blocked while
+ * in an RCU read-side critical section.
  *
- * As with the other rcu_*_qs() functions, callers to this function
- * must disable preemption.
+ * Callers to this function must disable preemption.
  */
 static void rcu_preempt_qs(void)
 {
        RCU_LOCKDEP_WARN(preemptible(), "rcu_preempt_qs() invoked with preemption enabled!!!\n");
        if (__this_cpu_read(rcu_data_p->cpu_no_qs.s)) {
                trace_rcu_grace_period(TPS("rcu_preempt"),
-                                      __this_cpu_read(rcu_data_p->gpnum),
+                                      __this_cpu_read(rcu_data_p->gp_seq),
                                       TPS("cpuqs"));
                __this_cpu_write(rcu_data_p->cpu_no_qs.b.norm, false);
                barrier(); /* Coordinate with rcu_preempt_check_callbacks(). */
@@ -348,8 +360,8 @@ static void rcu_preempt_note_context_switch(bool preempt)
                trace_rcu_preempt_task(rdp->rsp->name,
                                       t->pid,
                                       (rnp->qsmask & rdp->grpmask)
-                                      ? rnp->gpnum
-                                      : rnp->gpnum + 1);
+                                      ? rnp->gp_seq
+                                      : rcu_seq_snap(&rnp->gp_seq));
                rcu_preempt_ctxt_queue(rnp, rdp);
        } else if (t->rcu_read_lock_nesting < 0 &&
                   t->rcu_read_unlock_special.s) {
@@ -456,7 +468,7 @@ static bool rcu_preempt_has_tasks(struct rcu_node *rnp)
  * notify RCU core processing or task having blocked during the RCU
  * read-side critical section.
  */
-void rcu_read_unlock_special(struct task_struct *t)
+static void rcu_read_unlock_special(struct task_struct *t)
 {
        bool empty_exp;
        bool empty_norm;
@@ -535,13 +547,15 @@ void rcu_read_unlock_special(struct task_struct *t)
                WARN_ON_ONCE(rnp != t->rcu_blocked_node);
                WARN_ON_ONCE(!rcu_is_leaf_node(rnp));
                empty_norm = !rcu_preempt_blocked_readers_cgp(rnp);
+               WARN_ON_ONCE(rnp->completedqs == rnp->gp_seq &&
+                            (!empty_norm || rnp->qsmask));
                empty_exp = sync_rcu_preempt_exp_done(rnp);
                smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */
                np = rcu_next_node_entry(t, rnp);
                list_del_init(&t->rcu_node_entry);
                t->rcu_blocked_node = NULL;
                trace_rcu_unlock_preempted_task(TPS("rcu_preempt"),
-                                               rnp->gpnum, t->pid);
+                                               rnp->gp_seq, t->pid);
                if (&t->rcu_node_entry == rnp->gp_tasks)
                        rnp->gp_tasks = np;
                if (&t->rcu_node_entry == rnp->exp_tasks)
@@ -562,7 +576,7 @@ void rcu_read_unlock_special(struct task_struct *t)
                empty_exp_now = sync_rcu_preempt_exp_done(rnp);
                if (!empty_norm && !rcu_preempt_blocked_readers_cgp(rnp)) {
                        trace_rcu_quiescent_state_report(TPS("preempt_rcu"),
-                                                        rnp->gpnum,
+                                                        rnp->gp_seq,
                                                         0, rnp->qsmask,
                                                         rnp->level,
                                                         rnp->grplo,
@@ -686,24 +700,27 @@ static int rcu_print_task_exp_stall(struct rcu_node *rnp)
  * Check that the list of blocked tasks for the newly completed grace
  * period is in fact empty.  It is a serious bug to complete a grace
  * period that still has RCU readers blocked!  This function must be
- * invoked -before- updating this rnp's ->gpnum, and the rnp's ->lock
+ * invoked -before- updating this rnp's ->gp_seq, and the rnp's ->lock
  * must be held by the caller.
  *
  * Also, if there are blocked tasks on the list, they automatically
  * block the newly created grace period, so set up ->gp_tasks accordingly.
  */
-static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
+static void
+rcu_preempt_check_blocked_tasks(struct rcu_state *rsp, struct rcu_node *rnp)
 {
        struct task_struct *t;
 
        RCU_LOCKDEP_WARN(preemptible(), "rcu_preempt_check_blocked_tasks() invoked with preemption enabled!!!\n");
-       WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp));
-       if (rcu_preempt_has_tasks(rnp)) {
+       if (WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)))
+               dump_blkd_tasks(rsp, rnp, 10);
+       if (rcu_preempt_has_tasks(rnp) &&
+           (rnp->qsmaskinit || rnp->wait_blkd_tasks)) {
                rnp->gp_tasks = rnp->blkd_tasks.next;
                t = container_of(rnp->gp_tasks, struct task_struct,
                                 rcu_node_entry);
                trace_rcu_unlock_preempted_task(TPS("rcu_preempt-GPS"),
-                                               rnp->gpnum, t->pid);
+                                               rnp->gp_seq, t->pid);
        }
        WARN_ON_ONCE(rnp->qsmask);
 }
@@ -717,6 +734,7 @@ static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
  */
 static void rcu_preempt_check_callbacks(void)
 {
+       struct rcu_state *rsp = &rcu_preempt_state;
        struct task_struct *t = current;
 
        if (t->rcu_read_lock_nesting == 0) {
@@ -725,7 +743,9 @@ static void rcu_preempt_check_callbacks(void)
        }
        if (t->rcu_read_lock_nesting > 0 &&
            __this_cpu_read(rcu_data_p->core_needs_qs) &&
-           __this_cpu_read(rcu_data_p->cpu_no_qs.b.norm))
+           __this_cpu_read(rcu_data_p->cpu_no_qs.b.norm) &&
+           !t->rcu_read_unlock_special.b.need_qs &&
+           time_after(jiffies, rsp->gp_start + HZ))
                t->rcu_read_unlock_special.b.need_qs = true;
 }
 
@@ -841,6 +861,47 @@ void exit_rcu(void)
        __rcu_read_unlock();
 }
 
+/*
+ * Dump the blocked-tasks state, but limit the list dump to the
+ * specified number of elements.
+ */
+static void
+dump_blkd_tasks(struct rcu_state *rsp, struct rcu_node *rnp, int ncheck)
+{
+       int cpu;
+       int i;
+       struct list_head *lhp;
+       bool onl;
+       struct rcu_data *rdp;
+       struct rcu_node *rnp1;
+
+       raw_lockdep_assert_held_rcu_node(rnp);
+       pr_info("%s: grp: %d-%d level: %d ->gp_seq %ld ->completedqs %ld\n",
+               __func__, rnp->grplo, rnp->grphi, rnp->level,
+               (long)rnp->gp_seq, (long)rnp->completedqs);
+       for (rnp1 = rnp; rnp1; rnp1 = rnp1->parent)
+               pr_info("%s: %d:%d ->qsmask %#lx ->qsmaskinit %#lx ->qsmaskinitnext %#lx\n",
+                       __func__, rnp1->grplo, rnp1->grphi, rnp1->qsmask, rnp1->qsmaskinit, rnp1->qsmaskinitnext);
+       pr_info("%s: ->gp_tasks %p ->boost_tasks %p ->exp_tasks %p\n",
+               __func__, rnp->gp_tasks, rnp->boost_tasks, rnp->exp_tasks);
+       pr_info("%s: ->blkd_tasks", __func__);
+       i = 0;
+       list_for_each(lhp, &rnp->blkd_tasks) {
+               pr_cont(" %p", lhp);
+               if (++i >= 10)
+                       break;
+       }
+       pr_cont("\n");
+       for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++) {
+               rdp = per_cpu_ptr(rsp->rda, cpu);
+               onl = !!(rdp->grpmask & rcu_rnp_online_cpus(rnp));
+               pr_info("\t%d: %c online: %ld(%d) offline: %ld(%d)\n",
+                       cpu, ".o"[onl],
+                       (long)rdp->rcu_onl_gp_seq, rdp->rcu_onl_gp_flags,
+                       (long)rdp->rcu_ofl_gp_seq, rdp->rcu_ofl_gp_flags);
+       }
+}
+
 #else /* #ifdef CONFIG_PREEMPT_RCU */
 
 static struct rcu_state *const rcu_state_p = &rcu_sched_state;
@@ -911,7 +972,8 @@ static int rcu_print_task_exp_stall(struct rcu_node *rnp)
  * so there is no need to check for blocked tasks.  So check only for
  * bogus qsmask values.
  */
-static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
+static void
+rcu_preempt_check_blocked_tasks(struct rcu_state *rsp, struct rcu_node *rnp)
 {
        WARN_ON_ONCE(rnp->qsmask);
 }
@@ -949,6 +1011,15 @@ void exit_rcu(void)
 {
 }
 
+/*
+ * Dump the guaranteed-empty blocked-tasks state.  Trust but verify.
+ */
+static void
+dump_blkd_tasks(struct rcu_state *rsp, struct rcu_node *rnp, int ncheck)
+{
+       WARN_ON_ONCE(!list_empty(&rnp->blkd_tasks));
+}
+
 #endif /* #else #ifdef CONFIG_PREEMPT_RCU */
 
 #ifdef CONFIG_RCU_BOOST
@@ -1433,7 +1504,8 @@ static bool __maybe_unused rcu_try_advance_all_cbs(void)
                 * completed since we last checked and there are
                 * callbacks not yet ready to invoke.
                 */
-               if ((rdp->completed != rnp->completed ||
+               if ((rcu_seq_completed_gp(rdp->gp_seq,
+                                         rcu_seq_current(&rnp->gp_seq)) ||
                     unlikely(READ_ONCE(rdp->gpwrap))) &&
                    rcu_segcblist_pend_cbs(&rdp->cblist))
                        note_gp_changes(rsp, rdp);
@@ -1720,16 +1792,16 @@ static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
         */
        touch_nmi_watchdog();
 
-       if (rsp->gpnum == rdp->gpnum) {
+       ticks_value = rcu_seq_ctr(rsp->gp_seq - rdp->gp_seq);
+       if (ticks_value) {
+               ticks_title = "GPs behind";
+       } else {
                ticks_title = "ticks this GP";
                ticks_value = rdp->ticks_this_gp;
-       } else {
-               ticks_title = "GPs behind";
-               ticks_value = rsp->gpnum - rdp->gpnum;
        }
        print_cpu_stall_fast_no_hz(fast_no_hz, cpu);
-       delta = rdp->mynode->gpnum - rdp->rcu_iw_gpnum;
-       pr_err("\t%d-%c%c%c%c: (%lu %s) idle=%03x/%ld/%ld softirq=%u/%u fqs=%ld %s\n",
+       delta = rcu_seq_ctr(rdp->mynode->gp_seq - rdp->rcu_iw_gp_seq);
+       pr_err("\t%d-%c%c%c%c: (%lu %s) idle=%03x/%ld/%#lx softirq=%u/%u fqs=%ld %s\n",
               cpu,
               "O."[!!cpu_online(cpu)],
               "o."[!!(rdp->grpmask & rdp->mynode->qsmaskinit)],
@@ -1817,7 +1889,7 @@ static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq)
 
 static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp)
 {
-       return &rnp->nocb_gp_wq[rnp->completed & 0x1];
+       return &rnp->nocb_gp_wq[rcu_seq_ctr(rnp->gp_seq) & 0x1];
 }
 
 static void rcu_init_one_nocb(struct rcu_node *rnp)
@@ -1854,8 +1926,8 @@ static void __wake_nocb_leader(struct rcu_data *rdp, bool force,
                WRITE_ONCE(rdp_leader->nocb_leader_sleep, false);
                del_timer(&rdp->nocb_timer);
                raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
-               smp_mb(); /* ->nocb_leader_sleep before swake_up(). */
-               swake_up(&rdp_leader->nocb_wq);
+               smp_mb(); /* ->nocb_leader_sleep before swake_up_one(). */
+               swake_up_one(&rdp_leader->nocb_wq);
        } else {
                raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
        }
@@ -2069,12 +2141,17 @@ static void rcu_nocb_wait_gp(struct rcu_data *rdp)
        bool needwake;
        struct rcu_node *rnp = rdp->mynode;
 
-       raw_spin_lock_irqsave_rcu_node(rnp, flags);
-       c = rcu_cbs_completed(rdp->rsp, rnp);
-       needwake = rcu_start_this_gp(rnp, rdp, c);
-       raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
-       if (needwake)
-               rcu_gp_kthread_wake(rdp->rsp);
+       local_irq_save(flags);
+       c = rcu_seq_snap(&rdp->rsp->gp_seq);
+       if (!rdp->gpwrap && ULONG_CMP_GE(rdp->gp_seq_needed, c)) {
+               local_irq_restore(flags);
+       } else {
+               raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
+               needwake = rcu_start_this_gp(rnp, rdp, c);
+               raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
+               if (needwake)
+                       rcu_gp_kthread_wake(rdp->rsp);
+       }
 
        /*
         * Wait for the grace period.  Do so interruptibly to avoid messing
@@ -2082,9 +2159,9 @@ static void rcu_nocb_wait_gp(struct rcu_data *rdp)
         */
        trace_rcu_this_gp(rnp, rdp, c, TPS("StartWait"));
        for (;;) {
-               swait_event_interruptible(
-                       rnp->nocb_gp_wq[c & 0x1],
-                       (d = ULONG_CMP_GE(READ_ONCE(rnp->completed), c)));
+               swait_event_interruptible_exclusive(
+                       rnp->nocb_gp_wq[rcu_seq_ctr(c) & 0x1],
+                       (d = rcu_seq_done(&rnp->gp_seq, c)));
                if (likely(d))
                        break;
                WARN_ON(signal_pending(current));
@@ -2111,7 +2188,7 @@ wait_again:
        /* Wait for callbacks to appear. */
        if (!rcu_nocb_poll) {
                trace_rcu_nocb_wake(my_rdp->rsp->name, my_rdp->cpu, TPS("Sleep"));
-               swait_event_interruptible(my_rdp->nocb_wq,
+               swait_event_interruptible_exclusive(my_rdp->nocb_wq,
                                !READ_ONCE(my_rdp->nocb_leader_sleep));
                raw_spin_lock_irqsave(&my_rdp->nocb_lock, flags);
                my_rdp->nocb_leader_sleep = true;
@@ -2176,7 +2253,7 @@ wait_again:
                raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
                if (rdp != my_rdp && tail == &rdp->nocb_follower_head) {
                        /* List was empty, so wake up the follower.  */
-                       swake_up(&rdp->nocb_wq);
+                       swake_up_one(&rdp->nocb_wq);
                }
        }
 
@@ -2193,7 +2270,7 @@ static void nocb_follower_wait(struct rcu_data *rdp)
 {
        for (;;) {
                trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("FollowerSleep"));
-               swait_event_interruptible(rdp->nocb_wq,
+               swait_event_interruptible_exclusive(rdp->nocb_wq,
                                         READ_ONCE(rdp->nocb_follower_head));
                if (smp_load_acquire(&rdp->nocb_follower_head)) {
                        /* ^^^ Ensure CB invocation follows _head test. */
@@ -2568,23 +2645,6 @@ static bool init_nocb_callback_list(struct rcu_data *rdp)
 
 #endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */
 
-/*
- * An adaptive-ticks CPU can potentially execute in kernel mode for an
- * arbitrarily long period of time with the scheduling-clock tick turned
- * off.  RCU will be paying attention to this CPU because it is in the
- * kernel, but the CPU cannot be guaranteed to be executing the RCU state
- * machine because the scheduling-clock tick has been disabled.  Therefore,
- * if an adaptive-ticks CPU is failing to respond to the current grace
- * period and has not be idle from an RCU perspective, kick it.
- */
-static void __maybe_unused rcu_kick_nohz_cpu(int cpu)
-{
-#ifdef CONFIG_NO_HZ_FULL
-       if (tick_nohz_full_cpu(cpu))
-               smp_send_reschedule(cpu);
-#endif /* #ifdef CONFIG_NO_HZ_FULL */
-}
-
 /*
  * Is this CPU a NO_HZ_FULL CPU that should ignore RCU so that the
  * grace-period kthread will do force_quiescent_state() processing?
@@ -2610,8 +2670,6 @@ static bool rcu_nohz_full_cpu(struct rcu_state *rsp)
  */
 static void rcu_bind_gp_kthread(void)
 {
-       int __maybe_unused cpu;
-
        if (!tick_nohz_full_enabled())
                return;
        housekeeping_affine(current, HK_FLAG_RCU);
index 4c230a60ece44e1cf2fd65b954898fa3c6f52454..39cb23d22109386562683c57931df279257a9e85 100644 (file)
@@ -507,14 +507,15 @@ early_initcall(check_cpu_stall_init);
 #ifdef CONFIG_TASKS_RCU
 
 /*
- * Simple variant of RCU whose quiescent states are voluntary context switch,
- * user-space execution, and idle.  As such, grace periods can take one good
- * long time.  There are no read-side primitives similar to rcu_read_lock()
- * and rcu_read_unlock() because this implementation is intended to get
- * the system into a safe state for some of the manipulations involved in
- * tracing and the like.  Finally, this implementation does not support
- * high call_rcu_tasks() rates from multiple CPUs.  If this is required,
- * per-CPU callback lists will be needed.
+ * Simple variant of RCU whose quiescent states are voluntary context
+ * switch, cond_resched_rcu_qs(), user-space execution, and idle.
+ * As such, grace periods can take one good long time.  There are no
+ * read-side primitives similar to rcu_read_lock() and rcu_read_unlock()
+ * because this implementation is intended to get the system into a safe
+ * state for some of the manipulations involved in tracing and the like.
+ * Finally, this implementation does not support high call_rcu_tasks()
+ * rates from multiple CPUs.  If this is required, per-CPU callback lists
+ * will be needed.
  */
 
 /* Global list of callbacks and associated lock. */
@@ -542,11 +543,11 @@ static struct task_struct *rcu_tasks_kthread_ptr;
  * period elapses, in other words after all currently executing RCU
  * read-side critical sections have completed. call_rcu_tasks() assumes
  * that the read-side critical sections end at a voluntary context
- * switch (not a preemption!), entry into idle, or transition to usermode
- * execution.  As such, there are no read-side primitives analogous to
- * rcu_read_lock() and rcu_read_unlock() because this primitive is intended
- * to determine that all tasks have passed through a safe state, not so
- * much for data-strcuture synchronization.
+ * switch (not a preemption!), cond_resched_rcu_qs(), entry into idle,
+ * or transition to usermode execution.  As such, there are no read-side
+ * primitives analogous to rcu_read_lock() and rcu_read_unlock() because
+ * this primitive is intended to determine that all tasks have passed
+ * through a safe state, not so much for data-strcuture synchronization.
  *
  * See the description of call_rcu() for more detailed information on
  * memory ordering guarantees.
@@ -667,6 +668,7 @@ static int __noreturn rcu_tasks_kthread(void *arg)
        struct rcu_head *list;
        struct rcu_head *next;
        LIST_HEAD(rcu_tasks_holdouts);
+       int fract;
 
        /* Run on housekeeping CPUs by default.  Sysadm can move if desired. */
        housekeeping_affine(current, HK_FLAG_RCU);
@@ -748,13 +750,25 @@ static int __noreturn rcu_tasks_kthread(void *arg)
                 * holdouts.  When the list is empty, we are done.
                 */
                lastreport = jiffies;
-               while (!list_empty(&rcu_tasks_holdouts)) {
+
+               /* Start off with HZ/10 wait and slowly back off to 1 HZ wait*/
+               fract = 10;
+
+               for (;;) {
                        bool firstreport;
                        bool needreport;
                        int rtst;
                        struct task_struct *t1;
 
-                       schedule_timeout_interruptible(HZ);
+                       if (list_empty(&rcu_tasks_holdouts))
+                               break;
+
+                       /* Slowly back off waiting for holdouts */
+                       schedule_timeout_interruptible(HZ/fract);
+
+                       if (fract > 1)
+                               fract--;
+
                        rtst = READ_ONCE(rcu_task_stall_timeout);
                        needreport = rtst > 0 &&
                                     time_after(jiffies, lastreport + rtst);
@@ -800,6 +814,7 @@ static int __noreturn rcu_tasks_kthread(void *arg)
                        list = next;
                        cond_resched();
                }
+               /* Paranoid sleep to keep this from entering a tight loop */
                schedule_timeout_uninterruptible(HZ/10);
        }
 }
index ae306f90c51484fae6bb583733ca5e8f8b3e76be..c6242d8594dc7c0fab52de9df7f9cf01e49e5d0f 100644 (file)
@@ -85,9 +85,9 @@ static int rseq_update_cpu_id(struct task_struct *t)
 {
        u32 cpu_id = raw_smp_processor_id();
 
-       if (__put_user(cpu_id, &t->rseq->cpu_id_start))
+       if (put_user(cpu_id, &t->rseq->cpu_id_start))
                return -EFAULT;
-       if (__put_user(cpu_id, &t->rseq->cpu_id))
+       if (put_user(cpu_id, &t->rseq->cpu_id))
                return -EFAULT;
        trace_rseq_update(t);
        return 0;
@@ -100,14 +100,14 @@ static int rseq_reset_rseq_cpu_id(struct task_struct *t)
        /*
         * Reset cpu_id_start to its initial state (0).
         */
-       if (__put_user(cpu_id_start, &t->rseq->cpu_id_start))
+       if (put_user(cpu_id_start, &t->rseq->cpu_id_start))
                return -EFAULT;
        /*
         * Reset cpu_id to RSEQ_CPU_ID_UNINITIALIZED, so any user coming
         * in after unregistration can figure out that rseq needs to be
         * registered again.
         */
-       if (__put_user(cpu_id, &t->rseq->cpu_id))
+       if (put_user(cpu_id, &t->rseq->cpu_id))
                return -EFAULT;
        return 0;
 }
@@ -115,29 +115,36 @@ static int rseq_reset_rseq_cpu_id(struct task_struct *t)
 static int rseq_get_rseq_cs(struct task_struct *t, struct rseq_cs *rseq_cs)
 {
        struct rseq_cs __user *urseq_cs;
-       unsigned long ptr;
+       u64 ptr;
        u32 __user *usig;
        u32 sig;
        int ret;
 
-       ret = __get_user(ptr, &t->rseq->rseq_cs);
-       if (ret)
-               return ret;
+       if (copy_from_user(&ptr, &t->rseq->rseq_cs.ptr64, sizeof(ptr)))
+               return -EFAULT;
        if (!ptr) {
                memset(rseq_cs, 0, sizeof(*rseq_cs));
                return 0;
        }
-       urseq_cs = (struct rseq_cs __user *)ptr;
+       if (ptr >= TASK_SIZE)
+               return -EINVAL;
+       urseq_cs = (struct rseq_cs __user *)(unsigned long)ptr;
        if (copy_from_user(rseq_cs, urseq_cs, sizeof(*rseq_cs)))
                return -EFAULT;
-       if (rseq_cs->version > 0)
-               return -EINVAL;
 
+       if (rseq_cs->start_ip >= TASK_SIZE ||
+           rseq_cs->start_ip + rseq_cs->post_commit_offset >= TASK_SIZE ||
+           rseq_cs->abort_ip >= TASK_SIZE ||
+           rseq_cs->version > 0)
+               return -EINVAL;
+       /* Check for overflow. */
+       if (rseq_cs->start_ip + rseq_cs->post_commit_offset < rseq_cs->start_ip)
+               return -EINVAL;
        /* Ensure that abort_ip is not in the critical section. */
        if (rseq_cs->abort_ip - rseq_cs->start_ip < rseq_cs->post_commit_offset)
                return -EINVAL;
 
-       usig = (u32 __user *)(rseq_cs->abort_ip - sizeof(u32));
+       usig = (u32 __user *)(unsigned long)(rseq_cs->abort_ip - sizeof(u32));
        ret = get_user(sig, usig);
        if (ret)
                return ret;
@@ -146,7 +153,7 @@ static int rseq_get_rseq_cs(struct task_struct *t, struct rseq_cs *rseq_cs)
                printk_ratelimited(KERN_WARNING
                        "Possible attack attempt. Unexpected rseq signature 0x%x, expecting 0x%x (pid=%d, addr=%p).\n",
                        sig, current->rseq_sig, current->pid, usig);
-               return -EPERM;
+               return -EINVAL;
        }
        return 0;
 }
@@ -157,7 +164,7 @@ static int rseq_need_restart(struct task_struct *t, u32 cs_flags)
        int ret;
 
        /* Get thread flags. */
-       ret = __get_user(flags, &t->rseq->flags);
+       ret = get_user(flags, &t->rseq->flags);
        if (ret)
                return ret;
 
@@ -195,9 +202,11 @@ static int clear_rseq_cs(struct task_struct *t)
         * of code outside of the rseq assembly block. This performs
         * a lazy clear of the rseq_cs field.
         *
-        * Set rseq_cs to NULL with single-copy atomicity.
+        * Set rseq_cs to NULL.
         */
-       return __put_user(0UL, &t->rseq->rseq_cs);
+       if (clear_user(&t->rseq->rseq_cs.ptr64, sizeof(t->rseq->rseq_cs.ptr64)))
+               return -EFAULT;
+       return 0;
 }
 
 /*
@@ -251,10 +260,10 @@ static int rseq_ip_fixup(struct pt_regs *regs)
  * respect to other threads scheduled on the same CPU, and with respect
  * to signal handlers.
  */
-void __rseq_handle_notify_resume(struct pt_regs *regs)
+void __rseq_handle_notify_resume(struct ksignal *ksig, struct pt_regs *regs)
 {
        struct task_struct *t = current;
-       int ret;
+       int ret, sig;
 
        if (unlikely(t->flags & PF_EXITING))
                return;
@@ -268,7 +277,8 @@ void __rseq_handle_notify_resume(struct pt_regs *regs)
        return;
 
 error:
-       force_sig(SIGSEGV, t);
+       sig = ksig ? ksig->sig : 0;
+       force_sigsegv(sig, t);
 }
 
 #ifdef CONFIG_DEBUG_RSEQ
index d9a02b31810896b3201ba5bcee426ce2e1ac34a1..7fe183404c383f8d4611d47eb9d4299994f8b7ee 100644 (file)
@@ -20,7 +20,7 @@ obj-y += core.o loadavg.o clock.o cputime.o
 obj-y += idle.o fair.o rt.o deadline.o
 obj-y += wait.o wait_bit.o swait.o completion.o
 
-obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o topology.o stop_task.o
+obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o topology.o stop_task.o pelt.o
 obj-$(CONFIG_SCHED_AUTOGROUP) += autogroup.o
 obj-$(CONFIG_SCHEDSTATS) += stats.o
 obj-$(CONFIG_SCHED_DEBUG) += debug.o
index 10c83e73837a14bb63629a3278d2967f01a134c4..e3e3b979f9bd2645d6cc7416e7c6c25cb7d20b2e 100644 (file)
@@ -53,6 +53,7 @@
  *
  */
 #include "sched.h"
+#include <linux/sched_clock.h>
 
 /*
  * Scheduler clock - returns current time in nanosec units.
@@ -66,12 +67,7 @@ unsigned long long __weak sched_clock(void)
 }
 EXPORT_SYMBOL_GPL(sched_clock);
 
-__read_mostly int sched_clock_running;
-
-void sched_clock_init(void)
-{
-       sched_clock_running = 1;
-}
+static DEFINE_STATIC_KEY_FALSE(sched_clock_running);
 
 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
 /*
@@ -195,17 +191,40 @@ void clear_sched_clock_stable(void)
 
        smp_mb(); /* matches sched_clock_init_late() */
 
-       if (sched_clock_running == 2)
+       if (static_key_count(&sched_clock_running.key) == 2)
                __clear_sched_clock_stable();
 }
 
+static void __sched_clock_gtod_offset(void)
+{
+       struct sched_clock_data *scd = this_scd();
+
+       __scd_stamp(scd);
+       __gtod_offset = (scd->tick_raw + __sched_clock_offset) - scd->tick_gtod;
+}
+
+void __init sched_clock_init(void)
+{
+       /*
+        * Set __gtod_offset such that once we mark sched_clock_running,
+        * sched_clock_tick() continues where sched_clock() left off.
+        *
+        * Even if TSC is buggered, we're still UP at this point so it
+        * can't really be out of sync.
+        */
+       local_irq_disable();
+       __sched_clock_gtod_offset();
+       local_irq_enable();
+
+       static_branch_inc(&sched_clock_running);
+}
 /*
  * We run this as late_initcall() such that it runs after all built-in drivers,
  * notably: acpi_processor and intel_idle, which can mark the TSC as unstable.
  */
 static int __init sched_clock_init_late(void)
 {
-       sched_clock_running = 2;
+       static_branch_inc(&sched_clock_running);
        /*
         * Ensure that it is impossible to not do a static_key update.
         *
@@ -350,8 +369,8 @@ u64 sched_clock_cpu(int cpu)
        if (sched_clock_stable())
                return sched_clock() + __sched_clock_offset;
 
-       if (unlikely(!sched_clock_running))
-               return 0ull;
+       if (!static_branch_unlikely(&sched_clock_running))
+               return sched_clock();
 
        preempt_disable_notrace();
        scd = cpu_sdc(cpu);
@@ -373,7 +392,7 @@ void sched_clock_tick(void)
        if (sched_clock_stable())
                return;
 
-       if (unlikely(!sched_clock_running))
+       if (!static_branch_unlikely(&sched_clock_running))
                return;
 
        lockdep_assert_irqs_disabled();
@@ -385,8 +404,6 @@ void sched_clock_tick(void)
 
 void sched_clock_tick_stable(void)
 {
-       u64 gtod, clock;
-
        if (!sched_clock_stable())
                return;
 
@@ -398,9 +415,7 @@ void sched_clock_tick_stable(void)
         * TSC to be unstable, any computation will be computing crap.
         */
        local_irq_disable();
-       gtod = ktime_get_ns();
-       clock = sched_clock();
-       __gtod_offset = (clock + __sched_clock_offset) - gtod;
+       __sched_clock_gtod_offset();
        local_irq_enable();
 }
 
@@ -434,9 +449,17 @@ EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event);
 
 #else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
 
+void __init sched_clock_init(void)
+{
+       static_branch_inc(&sched_clock_running);
+       local_irq_disable();
+       generic_sched_clock_init();
+       local_irq_enable();
+}
+
 u64 sched_clock_cpu(int cpu)
 {
-       if (unlikely(!sched_clock_running))
+       if (!static_branch_unlikely(&sched_clock_running))
                return 0;
 
        return sched_clock();
index e426b0cb9ac6314b8e19ada428e81fe1eabea7fb..a1ad5b7d5521be6340bf221c1a1c5983507a1754 100644 (file)
@@ -22,8 +22,8 @@
  *
  * See also complete_all(), wait_for_completion() and related routines.
  *
- * It may be assumed that this function implies a write memory barrier before
- * changing the task state if and only if any tasks are woken up.
+ * If this function wakes up a task, it executes a full memory barrier before
+ * accessing the task state.
  */
 void complete(struct completion *x)
 {
@@ -44,8 +44,8 @@ EXPORT_SYMBOL(complete);
  *
  * This will wake up all threads waiting on this particular completion event.
  *
- * It may be assumed that this function implies a write memory barrier before
- * changing the task state if and only if any tasks are woken up.
+ * If this function wakes up a task, it executes a full memory barrier before
+ * accessing the task state.
  *
  * Since complete_all() sets the completion of @x permanently to done
  * to allow multiple waiters to finish, a call to reinit_completion()
index 78d8facba456c2fc44c7024ae1a1c8d9db6f0692..c45de46fdf10d7213cf37bed301e660a18c83135 100644 (file)
@@ -7,7 +7,6 @@
  */
 #include "sched.h"
 
-#include <linux/kthread.h>
 #include <linux/nospec.h>
 
 #include <linux/kcov.h>
@@ -18,6 +17,8 @@
 #include "../workqueue_internal.h"
 #include "../smpboot.h"
 
+#include "pelt.h"
+
 #define CREATE_TRACE_POINTS
 #include <trace/events/sched.h>
 
@@ -45,14 +46,6 @@ const_debug unsigned int sysctl_sched_features =
  */
 const_debug unsigned int sysctl_sched_nr_migrate = 32;
 
-/*
- * period over which we average the RT time consumption, measured
- * in ms.
- *
- * default: 1s
- */
-const_debug unsigned int sysctl_sched_time_avg = MSEC_PER_SEC;
-
 /*
  * period over which we measure -rt task CPU usage in us.
  * default: 1s
@@ -184,9 +177,9 @@ static void update_rq_clock_task(struct rq *rq, s64 delta)
 
        rq->clock_task += delta;
 
-#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
+#ifdef HAVE_SCHED_AVG_IRQ
        if ((irq_delta + steal) && sched_feat(NONTASK_CAPACITY))
-               sched_rt_avg_update(rq, irq_delta + steal);
+               update_irq_load_avg(rq, irq_delta + steal);
 #endif
 }
 
@@ -413,8 +406,8 @@ void wake_q_add(struct wake_q_head *head, struct task_struct *task)
         * its already queued (either by us or someone else) and will get the
         * wakeup due to that.
         *
-        * This cmpxchg() implies a full barrier, which pairs with the write
-        * barrier implied by the wakeup in wake_up_q().
+        * This cmpxchg() executes a full barrier, which pairs with the full
+        * barrier executed by the wakeup in wake_up_q().
         */
        if (cmpxchg(&node->next, NULL, WAKE_Q_TAIL))
                return;
@@ -442,8 +435,8 @@ void wake_up_q(struct wake_q_head *head)
                task->wake_q.next = NULL;
 
                /*
-                * wake_up_process() implies a wmb() to pair with the queueing
-                * in wake_q_add() so as not to miss wakeups.
+                * wake_up_process() executes a full barrier, which pairs with
+                * the queueing in wake_q_add() so as not to miss wakeups.
                 */
                wake_up_process(task);
                put_task_struct(task);
@@ -650,23 +643,6 @@ bool sched_can_stop_tick(struct rq *rq)
        return true;
 }
 #endif /* CONFIG_NO_HZ_FULL */
-
-void sched_avg_update(struct rq *rq)
-{
-       s64 period = sched_avg_period();
-
-       while ((s64)(rq_clock(rq) - rq->age_stamp) > period) {
-               /*
-                * Inline assembly required to prevent the compiler
-                * optimising this loop into a divmod call.
-                * See __iter_div_u64_rem() for another example of this.
-                */
-               asm("" : "+rm" (rq->age_stamp));
-               rq->age_stamp += period;
-               rq->rt_avg /= 2;
-       }
-}
-
 #endif /* CONFIG_SMP */
 
 #if defined(CONFIG_RT_GROUP_SCHED) || (defined(CONFIG_FAIR_GROUP_SCHED) && \
@@ -1200,6 +1176,7 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
        __set_task_cpu(p, new_cpu);
 }
 
+#ifdef CONFIG_NUMA_BALANCING
 static void __migrate_swap_task(struct task_struct *p, int cpu)
 {
        if (task_on_rq_queued(p)) {
@@ -1281,16 +1258,17 @@ unlock:
 /*
  * Cross migrate two tasks
  */
-int migrate_swap(struct task_struct *cur, struct task_struct *p)
+int migrate_swap(struct task_struct *cur, struct task_struct *p,
+               int target_cpu, int curr_cpu)
 {
        struct migration_swap_arg arg;
        int ret = -EINVAL;
 
        arg = (struct migration_swap_arg){
                .src_task = cur,
-               .src_cpu = task_cpu(cur),
+               .src_cpu = curr_cpu,
                .dst_task = p,
-               .dst_cpu = task_cpu(p),
+               .dst_cpu = target_cpu,
        };
 
        if (arg.src_cpu == arg.dst_cpu)
@@ -1315,6 +1293,7 @@ int migrate_swap(struct task_struct *cur, struct task_struct *p)
 out:
        return ret;
 }
+#endif /* CONFIG_NUMA_BALANCING */
 
 /*
  * wait_task_inactive - wait for a thread to unschedule.
@@ -1880,8 +1859,7 @@ static void ttwu_queue(struct task_struct *p, int cpu, int wake_flags)
  *     rq(c1)->lock (if not at the same time, then in that order).
  *  C) LOCK of the rq(c1)->lock scheduling in task
  *
- * Transitivity guarantees that B happens after A and C after B.
- * Note: we only require RCpc transitivity.
+ * Release/acquire chaining guarantees that B happens after A and C after B.
  * Note: the CPU doing B need not be c0 or c1
  *
  * Example:
@@ -1943,16 +1921,9 @@ static void ttwu_queue(struct task_struct *p, int cpu, int wake_flags)
  *   UNLOCK rq(0)->lock
  *
  *
- * However; for wakeups there is a second guarantee we must provide, namely we
- * must observe the state that lead to our wakeup. That is, not only must our
- * task observe its own prior state, it must also observe the stores prior to
- * its wakeup.
- *
- * This means that any means of doing remote wakeups must order the CPU doing
- * the wakeup against the CPU the task is going to end up running on. This,
- * however, is already required for the regular Program-Order guarantee above,
- * since the waking CPU is the one issueing the ACQUIRE (smp_cond_load_acquire).
- *
+ * However, for wakeups there is a second guarantee we must provide, namely we
+ * must ensure that CONDITION=1 done by the caller can not be reordered with
+ * accesses to the task state; see try_to_wake_up() and set_current_state().
  */
 
 /**
@@ -1968,6 +1939,9 @@ static void ttwu_queue(struct task_struct *p, int cpu, int wake_flags)
  * Atomic against schedule() which would dequeue a task, also see
  * set_current_state().
  *
+ * This function executes a full memory barrier before accessing the task
+ * state; see set_current_state().
+ *
  * Return: %true if @p->state changes (an actual wakeup was done),
  *        %false otherwise.
  */
@@ -1999,21 +1973,20 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
         * be possible to, falsely, observe p->on_rq == 0 and get stuck
         * in smp_cond_load_acquire() below.
         *
-        * sched_ttwu_pending()                 try_to_wake_up()
-        *   [S] p->on_rq = 1;                  [L] P->state
-        *       UNLOCK rq->lock  -----.
-        *                              \
-        *                               +---   RMB
-        * schedule()                   /
-        *       LOCK rq->lock    -----'
-        *       UNLOCK rq->lock
+        * sched_ttwu_pending()                 try_to_wake_up()
+        *   STORE p->on_rq = 1                   LOAD p->state
+        *   UNLOCK rq->lock
+        *
+        * __schedule() (switch to task 'p')
+        *   LOCK rq->lock                        smp_rmb();
+        *   smp_mb__after_spinlock();
+        *   UNLOCK rq->lock
         *
         * [task p]
-        *   [S] p->state = UNINTERRUPTIBLE     [L] p->on_rq
+        *   STORE p->state = UNINTERRUPTIBLE     LOAD p->on_rq
         *
-        * Pairs with the UNLOCK+LOCK on rq->lock from the
-        * last wakeup of our task and the schedule that got our task
-        * current.
+        * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in
+        * __schedule().  See the comment for smp_mb__after_spinlock().
         */
        smp_rmb();
        if (p->on_rq && ttwu_remote(p, wake_flags))
@@ -2027,15 +2000,17 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
         * One must be running (->on_cpu == 1) in order to remove oneself
         * from the runqueue.
         *
-        *  [S] ->on_cpu = 1;   [L] ->on_rq
-        *      UNLOCK rq->lock
-        *                      RMB
-        *      LOCK   rq->lock
-        *  [S] ->on_rq = 0;    [L] ->on_cpu
+        * __schedule() (switch to task 'p')    try_to_wake_up()
+        *   STORE p->on_cpu = 1                  LOAD p->on_rq
+        *   UNLOCK rq->lock
         *
-        * Pairs with the full barrier implied in the UNLOCK+LOCK on rq->lock
-        * from the consecutive calls to schedule(); the first switching to our
-        * task, the second putting it to sleep.
+        * __schedule() (put 'p' to sleep)
+        *   LOCK rq->lock                        smp_rmb();
+        *   smp_mb__after_spinlock();
+        *   STORE p->on_rq = 0                   LOAD p->on_cpu
+        *
+        * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in
+        * __schedule().  See the comment for smp_mb__after_spinlock().
         */
        smp_rmb();
 
@@ -2141,8 +2116,7 @@ out:
  *
  * Return: 1 if the process was woken up, 0 if it was already running.
  *
- * It may be assumed that this function implies a write memory barrier before
- * changing the task state if and only if any tasks are woken up.
+ * This function executes a full memory barrier before accessing the task state.
  */
 int wake_up_process(struct task_struct *p)
 {
@@ -2318,7 +2292,6 @@ static inline void init_schedstats(void) {}
 int sched_fork(unsigned long clone_flags, struct task_struct *p)
 {
        unsigned long flags;
-       int cpu = get_cpu();
 
        __sched_fork(clone_flags, p);
        /*
@@ -2354,14 +2327,12 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p)
                p->sched_reset_on_fork = 0;
        }
 
-       if (dl_prio(p->prio)) {
-               put_cpu();
+       if (dl_prio(p->prio))
                return -EAGAIN;
-       } else if (rt_prio(p->prio)) {
+       else if (rt_prio(p->prio))
                p->sched_class = &rt_sched_class;
-       } else {
+       else
                p->sched_class = &fair_sched_class;
-       }
 
        init_entity_runnable_average(&p->se);
 
@@ -2377,7 +2348,7 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p)
         * We're setting the CPU for the first time, we don't migrate,
         * so use __set_task_cpu().
         */
-       __set_task_cpu(p, cpu);
+       __set_task_cpu(p, smp_processor_id());
        if (p->sched_class->task_fork)
                p->sched_class->task_fork(p);
        raw_spin_unlock_irqrestore(&p->pi_lock, flags);
@@ -2394,8 +2365,6 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p)
        plist_node_init(&p->pushable_tasks, MAX_PRIO);
        RB_CLEAR_NODE(&p->pushable_dl_tasks);
 #endif
-
-       put_cpu();
        return 0;
 }
 
@@ -2724,28 +2693,20 @@ static struct rq *finish_task_switch(struct task_struct *prev)
                membarrier_mm_sync_core_before_usermode(mm);
                mmdrop(mm);
        }
-       if (unlikely(prev_state & (TASK_DEAD|TASK_PARKED))) {
-               switch (prev_state) {
-               case TASK_DEAD:
-                       if (prev->sched_class->task_dead)
-                               prev->sched_class->task_dead(prev);
+       if (unlikely(prev_state == TASK_DEAD)) {
+               if (prev->sched_class->task_dead)
+                       prev->sched_class->task_dead(prev);
 
-                       /*
-                        * Remove function-return probe instances associated with this
-                        * task and put them back on the free list.
-                        */
-                       kprobe_flush_task(prev);
-
-                       /* Task is done with its stack. */
-                       put_task_stack(prev);
+               /*
+                * Remove function-return probe instances associated with this
+                * task and put them back on the free list.
+                */
+               kprobe_flush_task(prev);
 
-                       put_task_struct(prev);
-                       break;
+               /* Task is done with its stack. */
+               put_task_stack(prev);
 
-               case TASK_PARKED:
-                       kthread_park_complete(prev);
-                       break;
-               }
+               put_task_struct(prev);
        }
 
        tick_nohz_task_switch();
@@ -3113,7 +3074,9 @@ static void sched_tick_remote(struct work_struct *work)
        struct tick_work *twork = container_of(dwork, struct tick_work, work);
        int cpu = twork->cpu;
        struct rq *rq = cpu_rq(cpu);
+       struct task_struct *curr;
        struct rq_flags rf;
+       u64 delta;
 
        /*
         * Handle the tick only if it appears the remote CPU is running in full
@@ -3122,24 +3085,28 @@ static void sched_tick_remote(struct work_struct *work)
         * statistics and checks timeslices in a time-independent way, regardless
         * of when exactly it is running.
         */
-       if (!idle_cpu(cpu) && tick_nohz_tick_stopped_cpu(cpu)) {
-               struct task_struct *curr;
-               u64 delta;
+       if (idle_cpu(cpu) || !tick_nohz_tick_stopped_cpu(cpu))
+               goto out_requeue;
 
-               rq_lock_irq(rq, &rf);
-               update_rq_clock(rq);
-               curr = rq->curr;
-               delta = rq_clock_task(rq) - curr->se.exec_start;
+       rq_lock_irq(rq, &rf);
+       curr = rq->curr;
+       if (is_idle_task(curr))
+               goto out_unlock;
 
-               /*
-                * Make sure the next tick runs within a reasonable
-                * amount of time.
-                */
-               WARN_ON_ONCE(delta > (u64)NSEC_PER_SEC * 3);
-               curr->sched_class->task_tick(rq, curr, 0);
-               rq_unlock_irq(rq, &rf);
-       }
+       update_rq_clock(rq);
+       delta = rq_clock_task(rq) - curr->se.exec_start;
 
+       /*
+        * Make sure the next tick runs within a reasonable
+        * amount of time.
+        */
+       WARN_ON_ONCE(delta > (u64)NSEC_PER_SEC * 3);
+       curr->sched_class->task_tick(rq, curr, 0);
+
+out_unlock:
+       rq_unlock_irq(rq, &rf);
+
+out_requeue:
        /*
         * Run the remote tick once per second (1Hz). This arbitrary
         * frequency is large enough to avoid overload but short enough
@@ -5717,13 +5684,6 @@ void set_rq_offline(struct rq *rq)
        }
 }
 
-static void set_cpu_rq_start_time(unsigned int cpu)
-{
-       struct rq *rq = cpu_rq(cpu);
-
-       rq->age_stamp = sched_clock_cpu(cpu);
-}
-
 /*
  * used to mark begin/end of suspend/resume:
  */
@@ -5841,7 +5801,6 @@ static void sched_rq_cpu_starting(unsigned int cpu)
 
 int sched_cpu_starting(unsigned int cpu)
 {
-       set_cpu_rq_start_time(cpu);
        sched_rq_cpu_starting(cpu);
        sched_tick_start(cpu);
        return 0;
@@ -5957,7 +5916,6 @@ void __init sched_init(void)
        int i, j;
        unsigned long alloc_size = 0, ptr;
 
-       sched_clock_init();
        wait_bit_init();
 
 #ifdef CONFIG_FAIR_GROUP_SCHED
@@ -6109,7 +6067,6 @@ void __init sched_init(void)
 
 #ifdef CONFIG_SMP
        idle_thread_set_boot_cpu();
-       set_cpu_rq_start_time(smp_processor_id());
 #endif
        init_sched_fair_class();
 
@@ -6788,6 +6745,16 @@ static int cpu_cfs_stat_show(struct seq_file *sf, void *v)
        seq_printf(sf, "nr_throttled %d\n", cfs_b->nr_throttled);
        seq_printf(sf, "throttled_time %llu\n", cfs_b->throttled_time);
 
+       if (schedstat_enabled() && tg != &root_task_group) {
+               u64 ws = 0;
+               int i;
+
+               for_each_possible_cpu(i)
+                       ws += schedstat_val(tg->se[i]->statistics.wait_sum);
+
+               seq_printf(sf, "wait_sum %llu\n", ws);
+       }
+
        return 0;
 }
 #endif /* CONFIG_CFS_BANDWIDTH */
index 3cde46483f0aa5e57ea14502322fd90741e88ac0..3fffad3bc8a86a366b93d5480c8840f34a84121e 100644 (file)
@@ -53,9 +53,7 @@ struct sugov_cpu {
        unsigned int            iowait_boost_max;
        u64                     last_update;
 
-       /* The fields below are only needed when sharing a policy: */
-       unsigned long           util_cfs;
-       unsigned long           util_dl;
+       unsigned long           bw_dl;
        unsigned long           max;
 
        /* The field below is for single-CPU policies only: */
@@ -179,33 +177,90 @@ static unsigned int get_next_freq(struct sugov_policy *sg_policy,
        return cpufreq_driver_resolve_freq(policy, freq);
 }
 
-static void sugov_get_util(struct sugov_cpu *sg_cpu)
+/*
+ * This function computes an effective utilization for the given CPU, to be
+ * used for frequency selection given the linear relation: f = u * f_max.
+ *
+ * The scheduler tracks the following metrics:
+ *
+ *   cpu_util_{cfs,rt,dl,irq}()
+ *   cpu_bw_dl()
+ *
+ * Where the cfs,rt and dl util numbers are tracked with the same metric and
+ * synchronized windows and are thus directly comparable.
+ *
+ * The cfs,rt,dl utilization are the running times measured with rq->clock_task
+ * which excludes things like IRQ and steal-time. These latter are then accrued
+ * in the irq utilization.
+ *
+ * The DL bandwidth number otoh is not a measured metric but a value computed
+ * based on the task model parameters and gives the minimal utilization
+ * required to meet deadlines.
+ */
+static unsigned long sugov_get_util(struct sugov_cpu *sg_cpu)
 {
        struct rq *rq = cpu_rq(sg_cpu->cpu);
+       unsigned long util, irq, max;
 
-       sg_cpu->max = arch_scale_cpu_capacity(NULL, sg_cpu->cpu);
-       sg_cpu->util_cfs = cpu_util_cfs(rq);
-       sg_cpu->util_dl  = cpu_util_dl(rq);
-}
+       sg_cpu->max = max = arch_scale_cpu_capacity(NULL, sg_cpu->cpu);
+       sg_cpu->bw_dl = cpu_bw_dl(rq);
 
-static unsigned long sugov_aggregate_util(struct sugov_cpu *sg_cpu)
-{
-       struct rq *rq = cpu_rq(sg_cpu->cpu);
+       if (rt_rq_is_runnable(&rq->rt))
+               return max;
 
-       if (rq->rt.rt_nr_running)
-               return sg_cpu->max;
+       /*
+        * Early check to see if IRQ/steal time saturates the CPU, can be
+        * because of inaccuracies in how we track these -- see
+        * update_irq_load_avg().
+        */
+       irq = cpu_util_irq(rq);
+       if (unlikely(irq >= max))
+               return max;
+
+       /*
+        * Because the time spend on RT/DL tasks is visible as 'lost' time to
+        * CFS tasks and we use the same metric to track the effective
+        * utilization (PELT windows are synchronized) we can directly add them
+        * to obtain the CPU's actual utilization.
+        */
+       util = cpu_util_cfs(rq);
+       util += cpu_util_rt(rq);
+
+       /*
+        * We do not make cpu_util_dl() a permanent part of this sum because we
+        * want to use cpu_bw_dl() later on, but we need to check if the
+        * CFS+RT+DL sum is saturated (ie. no idle time) such that we select
+        * f_max when there is no idle time.
+        *
+        * NOTE: numerical errors or stop class might cause us to not quite hit
+        * saturation when we should -- something for later.
+        */
+       if ((util + cpu_util_dl(rq)) >= max)
+               return max;
+
+       /*
+        * There is still idle time; further improve the number by using the
+        * irq metric. Because IRQ/steal time is hidden from the task clock we
+        * need to scale the task numbers:
+        *
+        *              1 - irq
+        *   U' = irq + ------- * U
+        *                max
+        */
+       util = scale_irq_capacity(util, irq, max);
+       util += irq;
 
        /*
-        * Utilization required by DEADLINE must always be granted while, for
-        * FAIR, we use blocked utilization of IDLE CPUs as a mechanism to
-        * gracefully reduce the frequency when no tasks show up for longer
+        * Bandwidth required by DEADLINE must always be granted while, for
+        * FAIR and RT, we use blocked utilization of IDLE CPUs as a mechanism
+        * to gracefully reduce the frequency when no tasks show up for longer
         * periods of time.
         *
-        * Ideally we would like to set util_dl as min/guaranteed freq and
-        * util_cfs + util_dl as requested freq. However, cpufreq is not yet
-        * ready for such an interface. So, we only do the latter for now.
+        * Ideally we would like to set bw_dl as min/guaranteed freq and util +
+        * bw_dl as requested freq. However, cpufreq is not yet ready for such
+        * an interface. So, we only do the latter for now.
         */
-       return min(sg_cpu->max, (sg_cpu->util_dl + sg_cpu->util_cfs));
+       return min(max, util + sg_cpu->bw_dl);
 }
 
 /**
@@ -360,7 +415,7 @@ static inline bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) { return false; }
  */
 static inline void ignore_dl_rate_limit(struct sugov_cpu *sg_cpu, struct sugov_policy *sg_policy)
 {
-       if (cpu_util_dl(cpu_rq(sg_cpu->cpu)) > sg_cpu->util_dl)
+       if (cpu_bw_dl(cpu_rq(sg_cpu->cpu)) > sg_cpu->bw_dl)
                sg_policy->need_freq_update = true;
 }
 
@@ -383,9 +438,8 @@ static void sugov_update_single(struct update_util_data *hook, u64 time,
 
        busy = sugov_cpu_is_busy(sg_cpu);
 
-       sugov_get_util(sg_cpu);
+       util = sugov_get_util(sg_cpu);
        max = sg_cpu->max;
-       util = sugov_aggregate_util(sg_cpu);
        sugov_iowait_apply(sg_cpu, time, &util, &max);
        next_f = get_next_freq(sg_policy, util, max);
        /*
@@ -424,9 +478,8 @@ static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time)
                struct sugov_cpu *j_sg_cpu = &per_cpu(sugov_cpu, j);
                unsigned long j_util, j_max;
 
-               sugov_get_util(j_sg_cpu);
+               j_util = sugov_get_util(j_sg_cpu);
                j_max = j_sg_cpu->max;
-               j_util = sugov_aggregate_util(j_sg_cpu);
                sugov_iowait_apply(j_sg_cpu, time, &j_util, &j_max);
 
                if (j_util * max > j_max * util) {
index fbfc3f1d368a08dd9ebd7c510caf67f52d377334..997ea7b839fa048fece2738fa2a7a38472ca4f01 100644 (file)
@@ -16,6 +16,7 @@
  *                    Fabio Checconi <fchecconi@gmail.com>
  */
 #include "sched.h"
+#include "pelt.h"
 
 struct dl_bandwidth def_dl_bandwidth;
 
@@ -1179,8 +1180,6 @@ static void update_curr_dl(struct rq *rq)
        curr->se.exec_start = now;
        cgroup_account_cputime(curr, delta_exec);
 
-       sched_rt_avg_update(rq, delta_exec);
-
        if (dl_entity_is_special(dl_se))
                return;
 
@@ -1761,6 +1760,9 @@ pick_next_task_dl(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
 
        deadline_queue_push_tasks(rq);
 
+       if (rq->curr->sched_class != &dl_sched_class)
+               update_dl_rq_load_avg(rq_clock_task(rq), rq, 0);
+
        return p;
 }
 
@@ -1768,6 +1770,7 @@ static void put_prev_task_dl(struct rq *rq, struct task_struct *p)
 {
        update_curr_dl(rq);
 
+       update_dl_rq_load_avg(rq_clock_task(rq), rq, 1);
        if (on_dl_rq(&p->dl) && p->nr_cpus_allowed > 1)
                enqueue_pushable_dl_task(rq, p);
 }
@@ -1784,6 +1787,7 @@ static void task_tick_dl(struct rq *rq, struct task_struct *p, int queued)
 {
        update_curr_dl(rq);
 
+       update_dl_rq_load_avg(rq_clock_task(rq), rq, 1);
        /*
         * Even when we have runtime, update_curr_dl() might have resulted in us
         * not being the leftmost task anymore. In that case NEED_RESCHED will
@@ -2090,8 +2094,14 @@ retry:
        sub_rq_bw(&next_task->dl, &rq->dl);
        set_task_cpu(next_task, later_rq->cpu);
        add_rq_bw(&next_task->dl, &later_rq->dl);
+
+       /*
+        * Update the later_rq clock here, because the clock is used
+        * by the cpufreq_update_util() inside __add_running_bw().
+        */
+       update_rq_clock(later_rq);
        add_running_bw(&next_task->dl, &later_rq->dl);
-       activate_task(later_rq, next_task, 0);
+       activate_task(later_rq, next_task, ENQUEUE_NOCLOCK);
        ret = 1;
 
        resched_curr(later_rq);
@@ -2290,8 +2300,17 @@ static void switched_from_dl(struct rq *rq, struct task_struct *p)
        if (task_on_rq_queued(p) && p->dl.dl_runtime)
                task_non_contending(p);
 
-       if (!task_on_rq_queued(p))
+       if (!task_on_rq_queued(p)) {
+               /*
+                * Inactive timer is armed. However, p is leaving DEADLINE and
+                * might migrate away from this rq while continuing to run on
+                * some other class. We need to remove its contribution from
+                * this rq running_bw now, or sub_rq_bw (below) will complain.
+                */
+               if (p->dl.dl_non_contending)
+                       sub_running_bw(&p->dl, &rq->dl);
                sub_rq_bw(&p->dl, &rq->dl);
+       }
 
        /*
         * We cannot use inactive_task_timer() to invoke sub_running_bw()
index e593b4118578c9aadaaefe20111520b3525c1ebf..60caf1fb94e05778067a06200ae1a66a255f75bb 100644 (file)
@@ -111,20 +111,19 @@ static int sched_feat_set(char *cmp)
                cmp += 3;
        }
 
-       for (i = 0; i < __SCHED_FEAT_NR; i++) {
-               if (strcmp(cmp, sched_feat_names[i]) == 0) {
-                       if (neg) {
-                               sysctl_sched_features &= ~(1UL << i);
-                               sched_feat_disable(i);
-                       } else {
-                               sysctl_sched_features |= (1UL << i);
-                               sched_feat_enable(i);
-                       }
-                       break;
-               }
+       i = match_string(sched_feat_names, __SCHED_FEAT_NR, cmp);
+       if (i < 0)
+               return i;
+
+       if (neg) {
+               sysctl_sched_features &= ~(1UL << i);
+               sched_feat_disable(i);
+       } else {
+               sysctl_sched_features |= (1UL << i);
+               sched_feat_enable(i);
        }
 
-       return i;
+       return 0;
 }
 
 static ssize_t
@@ -133,7 +132,7 @@ sched_feat_write(struct file *filp, const char __user *ubuf,
 {
        char buf[64];
        char *cmp;
-       int i;
+       int ret;
        struct inode *inode;
 
        if (cnt > 63)
@@ -148,10 +147,10 @@ sched_feat_write(struct file *filp, const char __user *ubuf,
        /* Ensure the static_key remains in a consistent state */
        inode = file_inode(filp);
        inode_lock(inode);
-       i = sched_feat_set(cmp);
+       ret = sched_feat_set(cmp);
        inode_unlock(inode);
-       if (i == __SCHED_FEAT_NR)
-               return -EINVAL;
+       if (ret < 0)
+               return ret;
 
        *ppos += cnt;
 
@@ -623,8 +622,6 @@ void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq)
 #undef PU
 }
 
-extern __read_mostly int sched_clock_running;
-
 static void print_cpu(struct seq_file *m, int cpu)
 {
        struct rq *rq = cpu_rq(cpu);
@@ -843,8 +840,8 @@ void print_numa_stats(struct seq_file *m, int node, unsigned long tsf,
                unsigned long tpf, unsigned long gsf, unsigned long gpf)
 {
        SEQ_printf(m, "numa_faults node=%d ", node);
-       SEQ_printf(m, "task_private=%lu task_shared=%lu ", tsf, tpf);
-       SEQ_printf(m, "group_private=%lu group_shared=%lu\n", gsf, gpf);
+       SEQ_printf(m, "task_private=%lu task_shared=%lu ", tpf, tsf);
+       SEQ_printf(m, "group_private=%lu group_shared=%lu\n", gpf, gsf);
 }
 #endif
 
index 1866e64792a791f8737128c88ae691d7453ff117..309c93fcc604784aa77c22c143ba1f097c917bfb 100644 (file)
@@ -255,9 +255,6 @@ static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
        return cfs_rq->rq;
 }
 
-/* An entity is a task if it doesn't "own" a runqueue */
-#define entity_is_task(se)     (!se->my_q)
-
 static inline struct task_struct *task_of(struct sched_entity *se)
 {
        SCHED_WARN_ON(!entity_is_task(se));
@@ -419,7 +416,6 @@ static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
        return container_of(cfs_rq, struct rq, cfs);
 }
 
-#define entity_is_task(se)     1
 
 #define for_each_sched_entity(se) \
                for (; se; se = NULL)
@@ -692,7 +688,7 @@ static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se)
 }
 
 #ifdef CONFIG_SMP
-
+#include "pelt.h"
 #include "sched-pelt.h"
 
 static int select_idle_sibling(struct task_struct *p, int prev_cpu, int cpu);
@@ -735,11 +731,12 @@ static void attach_entity_cfs_rq(struct sched_entity *se);
  * To solve this problem, we also cap the util_avg of successive tasks to
  * only 1/2 of the left utilization budget:
  *
- *   util_avg_cap = (1024 - cfs_rq->avg.util_avg) / 2^n
+ *   util_avg_cap = (cpu_scale - cfs_rq->avg.util_avg) / 2^n
  *
- * where n denotes the nth task.
+ * where n denotes the nth task and cpu_scale the CPU capacity.
  *
- * For example, a simplest series from the beginning would be like:
+ * For example, for a CPU with 1024 of capacity, a simplest series from
+ * the beginning would be like:
  *
  *  task  util_avg: 512, 256, 128,  64,  32,   16,    8, ...
  * cfs_rq util_avg: 512, 768, 896, 960, 992, 1008, 1016, ...
@@ -751,7 +748,8 @@ void post_init_entity_util_avg(struct sched_entity *se)
 {
        struct cfs_rq *cfs_rq = cfs_rq_of(se);
        struct sched_avg *sa = &se->avg;
-       long cap = (long)(SCHED_CAPACITY_SCALE - cfs_rq->avg.util_avg) / 2;
+       long cpu_scale = arch_scale_cpu_capacity(NULL, cpu_of(rq_of(cfs_rq)));
+       long cap = (long)(cpu_scale - cfs_rq->avg.util_avg) / 2;
 
        if (cap > 0) {
                if (cfs_rq->avg.util_avg != 0) {
@@ -1314,7 +1312,7 @@ static unsigned long score_nearby_nodes(struct task_struct *p, int nid,
                 * of each group. Skip other nodes.
                 */
                if (sched_numa_topology_type == NUMA_BACKPLANE &&
-                                       dist > maxdist)
+                                       dist >= maxdist)
                        continue;
 
                /* Add up the faults from nearby nodes. */
@@ -1452,15 +1450,12 @@ static unsigned long capacity_of(int cpu);
 
 /* Cached statistics for all CPUs within a node */
 struct numa_stats {
-       unsigned long nr_running;
        unsigned long load;
 
        /* Total compute capacity of CPUs on a node */
        unsigned long compute_capacity;
 
-       /* Approximate capacity in terms of runnable tasks on a node */
-       unsigned long task_capacity;
-       int has_free_capacity;
+       unsigned int nr_running;
 };
 
 /*
@@ -1487,8 +1482,7 @@ static void update_numa_stats(struct numa_stats *ns, int nid)
         * the @ns structure is NULL'ed and task_numa_compare() will
         * not find this node attractive.
         *
-        * We'll either bail at !has_free_capacity, or we'll detect a huge
-        * imbalance and bail there.
+        * We'll detect a huge imbalance and bail there.
         */
        if (!cpus)
                return;
@@ -1497,9 +1491,8 @@ static void update_numa_stats(struct numa_stats *ns, int nid)
        smt = DIV_ROUND_UP(SCHED_CAPACITY_SCALE * cpus, ns->compute_capacity);
        capacity = cpus / smt; /* cores */
 
-       ns->task_capacity = min_t(unsigned, capacity,
+       capacity = min_t(unsigned, capacity,
                DIV_ROUND_CLOSEST(ns->compute_capacity, SCHED_CAPACITY_SCALE));
-       ns->has_free_capacity = (ns->nr_running < ns->task_capacity);
 }
 
 struct task_numa_env {
@@ -1548,28 +1541,12 @@ static bool load_too_imbalanced(long src_load, long dst_load,
        src_capacity = env->src_stats.compute_capacity;
        dst_capacity = env->dst_stats.compute_capacity;
 
-       /* We care about the slope of the imbalance, not the direction. */
-       if (dst_load < src_load)
-               swap(dst_load, src_load);
+       imb = abs(dst_load * src_capacity - src_load * dst_capacity);
 
-       /* Is the difference below the threshold? */
-       imb = dst_load * src_capacity * 100 -
-             src_load * dst_capacity * env->imbalance_pct;
-       if (imb <= 0)
-               return false;
-
-       /*
-        * The imbalance is above the allowed threshold.
-        * Compare it with the old imbalance.
-        */
        orig_src_load = env->src_stats.load;
        orig_dst_load = env->dst_stats.load;
 
-       if (orig_dst_load < orig_src_load)
-               swap(orig_dst_load, orig_src_load);
-
-       old_imb = orig_dst_load * src_capacity * 100 -
-                 orig_src_load * dst_capacity * env->imbalance_pct;
+       old_imb = abs(orig_dst_load * src_capacity - orig_src_load * dst_capacity);
 
        /* Would this change make things worse? */
        return (imb > old_imb);
@@ -1582,9 +1559,8 @@ static bool load_too_imbalanced(long src_load, long dst_load,
  * be exchanged with the source task
  */
 static void task_numa_compare(struct task_numa_env *env,
-                             long taskimp, long groupimp)
+                             long taskimp, long groupimp, bool maymove)
 {
-       struct rq *src_rq = cpu_rq(env->src_cpu);
        struct rq *dst_rq = cpu_rq(env->dst_cpu);
        struct task_struct *cur;
        long src_load, dst_load;
@@ -1605,97 +1581,73 @@ static void task_numa_compare(struct task_numa_env *env,
        if (cur == env->p)
                goto unlock;
 
+       if (!cur) {
+               if (maymove || imp > env->best_imp)
+                       goto assign;
+               else
+                       goto unlock;
+       }
+
        /*
         * "imp" is the fault differential for the source task between the
         * source and destination node. Calculate the total differential for
         * the source task and potential destination task. The more negative
-        * the value is, the more rmeote accesses that would be expected to
+        * the value is, the more remote accesses that would be expected to
         * be incurred if the tasks were swapped.
         */
-       if (cur) {
-               /* Skip this swap candidate if cannot move to the source CPU: */
-               if (!cpumask_test_cpu(env->src_cpu, &cur->cpus_allowed))
-                       goto unlock;
+       /* Skip this swap candidate if cannot move to the source cpu */
+       if (!cpumask_test_cpu(env->src_cpu, &cur->cpus_allowed))
+               goto unlock;
 
+       /*
+        * If dst and source tasks are in the same NUMA group, or not
+        * in any group then look only at task weights.
+        */
+       if (cur->numa_group == env->p->numa_group) {
+               imp = taskimp + task_weight(cur, env->src_nid, dist) -
+                     task_weight(cur, env->dst_nid, dist);
                /*
-                * If dst and source tasks are in the same NUMA group, or not
-                * in any group then look only at task weights.
+                * Add some hysteresis to prevent swapping the
+                * tasks within a group over tiny differences.
                 */
-               if (cur->numa_group == env->p->numa_group) {
-                       imp = taskimp + task_weight(cur, env->src_nid, dist) -
-                             task_weight(cur, env->dst_nid, dist);
-                       /*
-                        * Add some hysteresis to prevent swapping the
-                        * tasks within a group over tiny differences.
-                        */
-                       if (cur->numa_group)
-                               imp -= imp/16;
-               } else {
-                       /*
-                        * Compare the group weights. If a task is all by
-                        * itself (not part of a group), use the task weight
-                        * instead.
-                        */
-                       if (cur->numa_group)
-                               imp += group_weight(cur, env->src_nid, dist) -
-                                      group_weight(cur, env->dst_nid, dist);
-                       else
-                               imp += task_weight(cur, env->src_nid, dist) -
-                                      task_weight(cur, env->dst_nid, dist);
-               }
+               if (cur->numa_group)
+                       imp -= imp / 16;
+       } else {
+               /*
+                * Compare the group weights. If a task is all by itself
+                * (not part of a group), use the task weight instead.
+                */
+               if (cur->numa_group && env->p->numa_group)
+                       imp += group_weight(cur, env->src_nid, dist) -
+                              group_weight(cur, env->dst_nid, dist);
+               else
+                       imp += task_weight(cur, env->src_nid, dist) -
+                              task_weight(cur, env->dst_nid, dist);
        }
 
-       if (imp <= env->best_imp && moveimp <= env->best_imp)
+       if (imp <= env->best_imp)
                goto unlock;
 
-       if (!cur) {
-               /* Is there capacity at our destination? */
-               if (env->src_stats.nr_running <= env->src_stats.task_capacity &&
-                   !env->dst_stats.has_free_capacity)
-                       goto unlock;
-
-               goto balance;
-       }
-
-       /* Balance doesn't matter much if we're running a task per CPU: */
-       if (imp > env->best_imp && src_rq->nr_running == 1 &&
-                       dst_rq->nr_running == 1)
+       if (maymove && moveimp > imp && moveimp > env->best_imp) {
+               imp = moveimp - 1;
+               cur = NULL;
                goto assign;
+       }
 
        /*
         * In the overloaded case, try and keep the load balanced.
         */
-balance:
-       load = task_h_load(env->p);
+       load = task_h_load(env->p) - task_h_load(cur);
+       if (!load)
+               goto assign;
+
        dst_load = env->dst_stats.load + load;
        src_load = env->src_stats.load - load;
 
-       if (moveimp > imp && moveimp > env->best_imp) {
-               /*
-                * If the improvement from just moving env->p direction is
-                * better than swapping tasks around, check if a move is
-                * possible. Store a slightly smaller score than moveimp,
-                * so an actually idle CPU will win.
-                */
-               if (!load_too_imbalanced(src_load, dst_load, env)) {
-                       imp = moveimp - 1;
-                       cur = NULL;
-                       goto assign;
-               }
-       }
-
-       if (imp <= env->best_imp)
-               goto unlock;
-
-       if (cur) {
-               load = task_h_load(cur);
-               dst_load -= load;
-               src_load += load;
-       }
-
        if (load_too_imbalanced(src_load, dst_load, env))
                goto unlock;
 
+assign:
        /*
         * One idle CPU per node is evaluated for a task numa move.
         * Call select_idle_sibling to maybe find a better one.
@@ -1711,7 +1663,6 @@ balance:
                local_irq_enable();
        }
 
-assign:
        task_numa_assign(env, cur, imp);
 unlock:
        rcu_read_unlock();
@@ -1720,43 +1671,30 @@ unlock:
 static void task_numa_find_cpu(struct task_numa_env *env,
                                long taskimp, long groupimp)
 {
+       long src_load, dst_load, load;
+       bool maymove = false;
        int cpu;
 
+       load = task_h_load(env->p);
+       dst_load = env->dst_stats.load + load;
+       src_load = env->src_stats.load - load;
+
+       /*
+        * If the improvement from just moving env->p direction is better
+        * than swapping tasks around, check if a move is possible.
+        */
+       maymove = !load_too_imbalanced(src_load, dst_load, env);
+
        for_each_cpu(cpu, cpumask_of_node(env->dst_nid)) {
                /* Skip this CPU if the source task cannot migrate */
                if (!cpumask_test_cpu(cpu, &env->p->cpus_allowed))
                        continue;
 
                env->dst_cpu = cpu;
-               task_numa_compare(env, taskimp, groupimp);
+               task_numa_compare(env, taskimp, groupimp, maymove);
        }
 }
 
-/* Only move tasks to a NUMA node less busy than the current node. */
-static bool numa_has_capacity(struct task_numa_env *env)
-{
-       struct numa_stats *src = &env->src_stats;
-       struct numa_stats *dst = &env->dst_stats;
-
-       if (src->has_free_capacity && !dst->has_free_capacity)
-               return false;
-
-       /*
-        * Only consider a task move if the source has a higher load
-        * than the destination, corrected for CPU capacity on each node.
-        *
-        *      src->load                dst->load
-        * --------------------- vs ---------------------
-        * src->compute_capacity    dst->compute_capacity
-        */
-       if (src->load * dst->compute_capacity * env->imbalance_pct >
-
-           dst->load * src->compute_capacity * 100)
-               return true;
-
-       return false;
-}
-
 static int task_numa_migrate(struct task_struct *p)
 {
        struct task_numa_env env = {
@@ -1797,7 +1735,7 @@ static int task_numa_migrate(struct task_struct *p)
         * elsewhere, so there is no point in (re)trying.
         */
        if (unlikely(!sd)) {
-               p->numa_preferred_nid = task_node(p);
+               sched_setnuma(p, task_node(p));
                return -EINVAL;
        }
 
@@ -1811,8 +1749,7 @@ static int task_numa_migrate(struct task_struct *p)
        update_numa_stats(&env.dst_stats, env.dst_nid);
 
        /* Try to find a spot on the preferred nid. */
-       if (numa_has_capacity(&env))
-               task_numa_find_cpu(&env, taskimp, groupimp);
+       task_numa_find_cpu(&env, taskimp, groupimp);
 
        /*
         * Look at other nodes in these cases:
@@ -1842,8 +1779,7 @@ static int task_numa_migrate(struct task_struct *p)
                        env.dist = dist;
                        env.dst_nid = nid;
                        update_numa_stats(&env.dst_stats, env.dst_nid);
-                       if (numa_has_capacity(&env))
-                               task_numa_find_cpu(&env, taskimp, groupimp);
+                       task_numa_find_cpu(&env, taskimp, groupimp);
                }
        }
 
@@ -1856,15 +1792,13 @@ static int task_numa_migrate(struct task_struct *p)
         * trying for a better one later. Do not set the preferred node here.
         */
        if (p->numa_group) {
-               struct numa_group *ng = p->numa_group;
-
                if (env.best_cpu == -1)
                        nid = env.src_nid;
                else
-                       nid = env.dst_nid;
+                       nid = cpu_to_node(env.best_cpu);
 
-               if (ng->active_nodes > 1 && numa_is_active_node(env.dst_nid, ng))
-                       sched_setnuma(p, env.dst_nid);
+               if (nid != p->numa_preferred_nid)
+                       sched_setnuma(p, nid);
        }
 
        /* No better CPU than the current one was found. */
@@ -1884,7 +1818,8 @@ static int task_numa_migrate(struct task_struct *p)
                return ret;
        }
 
-       ret = migrate_swap(p, env.best_task);
+       ret = migrate_swap(p, env.best_task, env.best_cpu, env.src_cpu);
+
        if (ret != 0)
                trace_sched_stick_numa(p, env.src_cpu, task_cpu(env.best_task));
        put_task_struct(env.best_task);
@@ -2144,8 +2079,8 @@ static int preferred_group_nid(struct task_struct *p, int nid)
 
 static void task_numa_placement(struct task_struct *p)
 {
-       int seq, nid, max_nid = -1, max_group_nid = -1;
-       unsigned long max_faults = 0, max_group_faults = 0;
+       int seq, nid, max_nid = -1;
+       unsigned long max_faults = 0;
        unsigned long fault_types[2] = { 0, 0 };
        unsigned long total_faults;
        u64 runtime, period;
@@ -2224,33 +2159,30 @@ static void task_numa_placement(struct task_struct *p)
                        }
                }
 
-               if (faults > max_faults) {
-                       max_faults = faults;
+               if (!p->numa_group) {
+                       if (faults > max_faults) {
+                               max_faults = faults;
+                               max_nid = nid;
+                       }
+               } else if (group_faults > max_faults) {
+                       max_faults = group_faults;
                        max_nid = nid;
                }
-
-               if (group_faults > max_group_faults) {
-                       max_group_faults = group_faults;
-                       max_group_nid = nid;
-               }
        }
 
-       update_task_scan_period(p, fault_types[0], fault_types[1]);
-
        if (p->numa_group) {
                numa_group_count_active_nodes(p->numa_group);
                spin_unlock_irq(group_lock);
-               max_nid = preferred_group_nid(p, max_group_nid);
+               max_nid = preferred_group_nid(p, max_nid);
        }
 
        if (max_faults) {
                /* Set the new preferred node */
                if (max_nid != p->numa_preferred_nid)
                        sched_setnuma(p, max_nid);
-
-               if (task_node(p) != p->numa_preferred_nid)
-                       numa_migrate_preferred(p);
        }
+
+       update_task_scan_period(p, fault_types[0], fault_types[1]);
 }
 
 static inline int get_numa_group(struct numa_group *grp)
@@ -2450,14 +2382,14 @@ void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags)
                                numa_is_active_node(mem_node, ng))
                local = 1;
 
-       task_numa_placement(p);
-
        /*
         * Retry task to preferred node migration periodically, in case it
         * case it previously failed, or the scheduler moved us.
         */
-       if (time_after(jiffies, p->numa_migrate_retry))
+       if (time_after(jiffies, p->numa_migrate_retry)) {
+               task_numa_placement(p);
                numa_migrate_preferred(p);
+       }
 
        if (migrated)
                p->numa_pages_migrated += pages;
@@ -2749,19 +2681,6 @@ account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
 } while (0)
 
 #ifdef CONFIG_SMP
-/*
- * XXX we want to get rid of these helpers and use the full load resolution.
- */
-static inline long se_weight(struct sched_entity *se)
-{
-       return scale_load_down(se->load.weight);
-}
-
-static inline long se_runnable(struct sched_entity *se)
-{
-       return scale_load_down(se->runnable_weight);
-}
-
 static inline void
 enqueue_runnable_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
 {
@@ -3062,314 +2981,6 @@ static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq, int flags)
 }
 
 #ifdef CONFIG_SMP
-/*
- * Approximate:
- *   val * y^n,    where y^32 ~= 0.5 (~1 scheduling period)
- */
-static u64 decay_load(u64 val, u64 n)
-{
-       unsigned int local_n;
-
-       if (unlikely(n > LOAD_AVG_PERIOD * 63))
-               return 0;
-
-       /* after bounds checking we can collapse to 32-bit */
-       local_n = n;
-
-       /*
-        * As y^PERIOD = 1/2, we can combine
-        *    y^n = 1/2^(n/PERIOD) * y^(n%PERIOD)
-        * With a look-up table which covers y^n (n<PERIOD)
-        *
-        * To achieve constant time decay_load.
-        */
-       if (unlikely(local_n >= LOAD_AVG_PERIOD)) {
-               val >>= local_n / LOAD_AVG_PERIOD;
-               local_n %= LOAD_AVG_PERIOD;
-       }
-
-       val = mul_u64_u32_shr(val, runnable_avg_yN_inv[local_n], 32);
-       return val;
-}
-
-static u32 __accumulate_pelt_segments(u64 periods, u32 d1, u32 d3)
-{
-       u32 c1, c2, c3 = d3; /* y^0 == 1 */
-
-       /*
-        * c1 = d1 y^p
-        */
-       c1 = decay_load((u64)d1, periods);
-
-       /*
-        *            p-1
-        * c2 = 1024 \Sum y^n
-        *            n=1
-        *
-        *              inf        inf
-        *    = 1024 ( \Sum y^n - \Sum y^n - y^0 )
-        *              n=0        n=p
-        */
-       c2 = LOAD_AVG_MAX - decay_load(LOAD_AVG_MAX, periods) - 1024;
-
-       return c1 + c2 + c3;
-}
-
-/*
- * Accumulate the three separate parts of the sum; d1 the remainder
- * of the last (incomplete) period, d2 the span of full periods and d3
- * the remainder of the (incomplete) current period.
- *
- *           d1          d2           d3
- *           ^           ^            ^
- *           |           |            |
- *         |<->|<----------------->|<--->|
- * ... |---x---|------| ... |------|-----x (now)
- *
- *                           p-1
- * u' = (u + d1) y^p + 1024 \Sum y^n + d3 y^0
- *                           n=1
- *
- *    = u y^p +                                        (Step 1)
- *
- *                     p-1
- *      d1 y^p + 1024 \Sum y^n + d3 y^0                (Step 2)
- *                     n=1
- */
-static __always_inline u32
-accumulate_sum(u64 delta, int cpu, struct sched_avg *sa,
-              unsigned long load, unsigned long runnable, int running)
-{
-       unsigned long scale_freq, scale_cpu;
-       u32 contrib = (u32)delta; /* p == 0 -> delta < 1024 */
-       u64 periods;
-
-       scale_freq = arch_scale_freq_capacity(cpu);
-       scale_cpu = arch_scale_cpu_capacity(NULL, cpu);
-
-       delta += sa->period_contrib;
-       periods = delta / 1024; /* A period is 1024us (~1ms) */
-
-       /*
-        * Step 1: decay old *_sum if we crossed period boundaries.
-        */
-       if (periods) {
-               sa->load_sum = decay_load(sa->load_sum, periods);
-               sa->runnable_load_sum =
-                       decay_load(sa->runnable_load_sum, periods);
-               sa->util_sum = decay_load((u64)(sa->util_sum), periods);
-
-               /*
-                * Step 2
-                */
-               delta %= 1024;
-               contrib = __accumulate_pelt_segments(periods,
-                               1024 - sa->period_contrib, delta);
-       }
-       sa->period_contrib = delta;
-
-       contrib = cap_scale(contrib, scale_freq);
-       if (load)
-               sa->load_sum += load * contrib;
-       if (runnable)
-               sa->runnable_load_sum += runnable * contrib;
-       if (running)
-               sa->util_sum += contrib * scale_cpu;
-
-       return periods;
-}
-
-/*
- * We can represent the historical contribution to runnable average as the
- * coefficients of a geometric series.  To do this we sub-divide our runnable
- * history into segments of approximately 1ms (1024us); label the segment that
- * occurred N-ms ago p_N, with p_0 corresponding to the current period, e.g.
- *
- * [<- 1024us ->|<- 1024us ->|<- 1024us ->| ...
- *      p0            p1           p2
- *     (now)       (~1ms ago)  (~2ms ago)
- *
- * Let u_i denote the fraction of p_i that the entity was runnable.
- *
- * We then designate the fractions u_i as our co-efficients, yielding the
- * following representation of historical load:
- *   u_0 + u_1*y + u_2*y^2 + u_3*y^3 + ...
- *
- * We choose y based on the with of a reasonably scheduling period, fixing:
- *   y^32 = 0.5
- *
- * This means that the contribution to load ~32ms ago (u_32) will be weighted
- * approximately half as much as the contribution to load within the last ms
- * (u_0).
- *
- * When a period "rolls over" and we have new u_0`, multiplying the previous
- * sum again by y is sufficient to update:
- *   load_avg = u_0` + y*(u_0 + u_1*y + u_2*y^2 + ... )
- *            = u_0 + u_1*y + u_2*y^2 + ... [re-labeling u_i --> u_{i+1}]
- */
-static __always_inline int
-___update_load_sum(u64 now, int cpu, struct sched_avg *sa,
-                 unsigned long load, unsigned long runnable, int running)
-{
-       u64 delta;
-
-       delta = now - sa->last_update_time;
-       /*
-        * This should only happen when time goes backwards, which it
-        * unfortunately does during sched clock init when we swap over to TSC.
-        */
-       if ((s64)delta < 0) {
-               sa->last_update_time = now;
-               return 0;
-       }
-
-       /*
-        * Use 1024ns as the unit of measurement since it's a reasonable
-        * approximation of 1us and fast to compute.
-        */
-       delta >>= 10;
-       if (!delta)
-               return 0;
-
-       sa->last_update_time += delta << 10;
-
-       /*
-        * running is a subset of runnable (weight) so running can't be set if
-        * runnable is clear. But there are some corner cases where the current
-        * se has been already dequeued but cfs_rq->curr still points to it.
-        * This means that weight will be 0 but not running for a sched_entity
-        * but also for a cfs_rq if the latter becomes idle. As an example,
-        * this happens during idle_balance() which calls
-        * update_blocked_averages()
-        */
-       if (!load)
-               runnable = running = 0;
-
-       /*
-        * Now we know we crossed measurement unit boundaries. The *_avg
-        * accrues by two steps:
-        *
-        * Step 1: accumulate *_sum since last_update_time. If we haven't
-        * crossed period boundaries, finish.
-        */
-       if (!accumulate_sum(delta, cpu, sa, load, runnable, running))
-               return 0;
-
-       return 1;
-}
-
-static __always_inline void
-___update_load_avg(struct sched_avg *sa, unsigned long load, unsigned long runnable)
-{
-       u32 divider = LOAD_AVG_MAX - 1024 + sa->period_contrib;
-
-       /*
-        * Step 2: update *_avg.
-        */
-       sa->load_avg = div_u64(load * sa->load_sum, divider);
-       sa->runnable_load_avg = div_u64(runnable * sa->runnable_load_sum, divider);
-       sa->util_avg = sa->util_sum / divider;
-}
-
-/*
- * When a task is dequeued, its estimated utilization should not be update if
- * its util_avg has not been updated at least once.
- * This flag is used to synchronize util_avg updates with util_est updates.
- * We map this information into the LSB bit of the utilization saved at
- * dequeue time (i.e. util_est.dequeued).
- */
-#define UTIL_AVG_UNCHANGED 0x1
-
-static inline void cfs_se_util_change(struct sched_avg *avg)
-{
-       unsigned int enqueued;
-
-       if (!sched_feat(UTIL_EST))
-               return;
-
-       /* Avoid store if the flag has been already set */
-       enqueued = avg->util_est.enqueued;
-       if (!(enqueued & UTIL_AVG_UNCHANGED))
-               return;
-
-       /* Reset flag to report util_avg has been updated */
-       enqueued &= ~UTIL_AVG_UNCHANGED;
-       WRITE_ONCE(avg->util_est.enqueued, enqueued);
-}
-
-/*
- * sched_entity:
- *
- *   task:
- *     se_runnable() == se_weight()
- *
- *   group: [ see update_cfs_group() ]
- *     se_weight()   = tg->weight * grq->load_avg / tg->load_avg
- *     se_runnable() = se_weight(se) * grq->runnable_load_avg / grq->load_avg
- *
- *   load_sum := runnable_sum
- *   load_avg = se_weight(se) * runnable_avg
- *
- *   runnable_load_sum := runnable_sum
- *   runnable_load_avg = se_runnable(se) * runnable_avg
- *
- * XXX collapse load_sum and runnable_load_sum
- *
- * cfq_rs:
- *
- *   load_sum = \Sum se_weight(se) * se->avg.load_sum
- *   load_avg = \Sum se->avg.load_avg
- *
- *   runnable_load_sum = \Sum se_runnable(se) * se->avg.runnable_load_sum
- *   runnable_load_avg = \Sum se->avg.runable_load_avg
- */
-
-static int
-__update_load_avg_blocked_se(u64 now, int cpu, struct sched_entity *se)
-{
-       if (entity_is_task(se))
-               se->runnable_weight = se->load.weight;
-
-       if (___update_load_sum(now, cpu, &se->avg, 0, 0, 0)) {
-               ___update_load_avg(&se->avg, se_weight(se), se_runnable(se));
-               return 1;
-       }
-
-       return 0;
-}
-
-static int
-__update_load_avg_se(u64 now, int cpu, struct cfs_rq *cfs_rq, struct sched_entity *se)
-{
-       if (entity_is_task(se))
-               se->runnable_weight = se->load.weight;
-
-       if (___update_load_sum(now, cpu, &se->avg, !!se->on_rq, !!se->on_rq,
-                               cfs_rq->curr == se)) {
-
-               ___update_load_avg(&se->avg, se_weight(se), se_runnable(se));
-               cfs_se_util_change(&se->avg);
-               return 1;
-       }
-
-       return 0;
-}
-
-static int
-__update_load_avg_cfs_rq(u64 now, int cpu, struct cfs_rq *cfs_rq)
-{
-       if (___update_load_sum(now, cpu, &cfs_rq->avg,
-                               scale_load_down(cfs_rq->load.weight),
-                               scale_load_down(cfs_rq->runnable_weight),
-                               cfs_rq->curr != NULL)) {
-
-               ___update_load_avg(&cfs_rq->avg, 1, 1);
-               return 1;
-       }
-
-       return 0;
-}
-
 #ifdef CONFIG_FAIR_GROUP_SCHED
 /**
  * update_tg_load_avg - update the tg's load avg
@@ -3982,18 +3593,10 @@ util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p, bool task_sleep)
        if (!sched_feat(UTIL_EST))
                return;
 
-       /*
-        * Update root cfs_rq's estimated utilization
-        *
-        * If *p is the last task then the root cfs_rq's estimated utilization
-        * of a CPU is 0 by definition.
-        */
-       ue.enqueued = 0;
-       if (cfs_rq->nr_running) {
-               ue.enqueued  = cfs_rq->avg.util_est.enqueued;
-               ue.enqueued -= min_t(unsigned int, ue.enqueued,
-                                    (_task_util_est(p) | UTIL_AVG_UNCHANGED));
-       }
+       /* Update root cfs_rq's estimated utilization */
+       ue.enqueued  = cfs_rq->avg.util_est.enqueued;
+       ue.enqueued -= min_t(unsigned int, ue.enqueued,
+                            (_task_util_est(p) | UTIL_AVG_UNCHANGED));
        WRITE_ONCE(cfs_rq->avg.util_est.enqueued, ue.enqueued);
 
        /*
@@ -4045,12 +3648,6 @@ util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p, bool task_sleep)
 
 #else /* CONFIG_SMP */
 
-static inline int
-update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
-{
-       return 0;
-}
-
 #define UPDATE_TG      0x0
 #define SKIP_AGE_LOAD  0x0
 #define DO_ATTACH      0x0
@@ -4590,6 +4187,7 @@ void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b)
        now = sched_clock_cpu(smp_processor_id());
        cfs_b->runtime = cfs_b->quota;
        cfs_b->runtime_expires = now + ktime_to_ns(cfs_b->period);
+       cfs_b->expires_seq++;
 }
 
 static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
@@ -4612,6 +4210,7 @@ static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq)
        struct task_group *tg = cfs_rq->tg;
        struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(tg);
        u64 amount = 0, min_amount, expires;
+       int expires_seq;
 
        /* note: this is a positive sum as runtime_remaining <= 0 */
        min_amount = sched_cfs_bandwidth_slice() - cfs_rq->runtime_remaining;
@@ -4628,6 +4227,7 @@ static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq)
                        cfs_b->idle = 0;
                }
        }
+       expires_seq = cfs_b->expires_seq;
        expires = cfs_b->runtime_expires;
        raw_spin_unlock(&cfs_b->lock);
 
@@ -4637,8 +4237,10 @@ static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq)
         * spread between our sched_clock and the one on which runtime was
         * issued.
         */
-       if ((s64)(expires - cfs_rq->runtime_expires) > 0)
+       if (cfs_rq->expires_seq != expires_seq) {
+               cfs_rq->expires_seq = expires_seq;
                cfs_rq->runtime_expires = expires;
+       }
 
        return cfs_rq->runtime_remaining > 0;
 }
@@ -4664,12 +4266,9 @@ static void expire_cfs_rq_runtime(struct cfs_rq *cfs_rq)
         * has not truly expired.
         *
         * Fortunately we can check determine whether this the case by checking
-        * whether the global deadline has advanced. It is valid to compare
-        * cfs_b->runtime_expires without any locks since we only care about
-        * exact equality, so a partial write will still work.
+        * whether the global deadline(cfs_b->expires_seq) has advanced.
         */
-
-       if (cfs_rq->runtime_expires != cfs_b->runtime_expires) {
+       if (cfs_rq->expires_seq == cfs_b->expires_seq) {
                /* extend local deadline, drift is bounded above by 2 ticks */
                cfs_rq->runtime_expires += TICK_NSEC;
        } else {
@@ -4732,7 +4331,6 @@ static inline int throttled_lb_pair(struct task_group *tg,
               throttled_hierarchy(dest_cfs_rq);
 }
 
-/* updated child weight may affect parent so we have to do this bottom up */
 static int tg_unthrottle_up(struct task_group *tg, void *data)
 {
        struct rq *rq = data;
@@ -5202,13 +4800,18 @@ static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq)
 
 void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
 {
+       u64 overrun;
+
        lockdep_assert_held(&cfs_b->lock);
 
-       if (!cfs_b->period_active) {
-               cfs_b->period_active = 1;
-               hrtimer_forward_now(&cfs_b->period_timer, cfs_b->period);
-               hrtimer_start_expires(&cfs_b->period_timer, HRTIMER_MODE_ABS_PINNED);
-       }
+       if (cfs_b->period_active)
+               return;
+
+       cfs_b->period_active = 1;
+       overrun = hrtimer_forward_now(&cfs_b->period_timer, cfs_b->period);
+       cfs_b->runtime_expires += (overrun + 1) * ktime_to_ns(cfs_b->period);
+       cfs_b->expires_seq++;
+       hrtimer_start_expires(&cfs_b->period_timer, HRTIMER_MODE_ABS_PINNED);
 }
 
 static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
@@ -5654,8 +5257,6 @@ static void cpu_load_update(struct rq *this_rq, unsigned long this_load,
 
                this_rq->cpu_load[i] = (old_load * (scale - 1) + new_load) >> i;
        }
-
-       sched_avg_update(this_rq);
 }
 
 /* Used instead of source_load when we know the type == 0 */
@@ -7295,8 +6896,8 @@ static int task_hot(struct task_struct *p, struct lb_env *env)
 static int migrate_degrades_locality(struct task_struct *p, struct lb_env *env)
 {
        struct numa_group *numa_group = rcu_dereference(p->numa_group);
-       unsigned long src_faults, dst_faults;
-       int src_nid, dst_nid;
+       unsigned long src_weight, dst_weight;
+       int src_nid, dst_nid, dist;
 
        if (!static_branch_likely(&sched_numa_balancing))
                return -1;
@@ -7323,18 +6924,19 @@ static int migrate_degrades_locality(struct task_struct *p, struct lb_env *env)
                return 0;
 
        /* Leaving a core idle is often worse than degrading locality. */
-       if (env->idle != CPU_NOT_IDLE)
+       if (env->idle == CPU_IDLE)
                return -1;
 
+       dist = node_distance(src_nid, dst_nid);
        if (numa_group) {
-               src_faults = group_faults(p, src_nid);
-               dst_faults = group_faults(p, dst_nid);
+               src_weight = group_weight(p, src_nid, dist);
+               dst_weight = group_weight(p, dst_nid, dist);
        } else {
-               src_faults = task_faults(p, src_nid);
-               dst_faults = task_faults(p, dst_nid);
+               src_weight = task_weight(p, src_nid, dist);
+               dst_weight = task_weight(p, dst_nid, dist);
        }
 
-       return dst_faults < src_faults;
+       return dst_weight < src_weight;
 }
 
 #else
@@ -7621,6 +7223,22 @@ static inline bool cfs_rq_has_blocked(struct cfs_rq *cfs_rq)
        return false;
 }
 
+static inline bool others_have_blocked(struct rq *rq)
+{
+       if (READ_ONCE(rq->avg_rt.util_avg))
+               return true;
+
+       if (READ_ONCE(rq->avg_dl.util_avg))
+               return true;
+
+#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
+       if (READ_ONCE(rq->avg_irq.util_avg))
+               return true;
+#endif
+
+       return false;
+}
+
 #ifdef CONFIG_FAIR_GROUP_SCHED
 
 static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq)
@@ -7680,6 +7298,12 @@ static void update_blocked_averages(int cpu)
                if (cfs_rq_has_blocked(cfs_rq))
                        done = false;
        }
+       update_rt_rq_load_avg(rq_clock_task(rq), rq, 0);
+       update_dl_rq_load_avg(rq_clock_task(rq), rq, 0);
+       update_irq_load_avg(rq, 0);
+       /* Don't need periodic decay once load/util_avg are null */
+       if (others_have_blocked(rq))
+               done = false;
 
 #ifdef CONFIG_NO_HZ_COMMON
        rq->last_blocked_load_update_tick = jiffies;
@@ -7745,9 +7369,12 @@ static inline void update_blocked_averages(int cpu)
        rq_lock_irqsave(rq, &rf);
        update_rq_clock(rq);
        update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq);
+       update_rt_rq_load_avg(rq_clock_task(rq), rq, 0);
+       update_dl_rq_load_avg(rq_clock_task(rq), rq, 0);
+       update_irq_load_avg(rq, 0);
 #ifdef CONFIG_NO_HZ_COMMON
        rq->last_blocked_load_update_tick = jiffies;
-       if (!cfs_rq_has_blocked(cfs_rq))
+       if (!cfs_rq_has_blocked(cfs_rq) && !others_have_blocked(rq))
                rq->has_blocked_load = 0;
 #endif
        rq_unlock_irqrestore(rq, &rf);
@@ -7857,39 +7484,32 @@ static inline int get_sd_load_idx(struct sched_domain *sd,
 static unsigned long scale_rt_capacity(int cpu)
 {
        struct rq *rq = cpu_rq(cpu);
-       u64 total, used, age_stamp, avg;
-       s64 delta;
+       unsigned long max = arch_scale_cpu_capacity(NULL, cpu);
+       unsigned long used, free;
+       unsigned long irq;
 
-       /*
-        * Since we're reading these variables without serialization make sure
-        * we read them once before doing sanity checks on them.
-        */
-       age_stamp = READ_ONCE(rq->age_stamp);
-       avg = READ_ONCE(rq->rt_avg);
-       delta = __rq_clock_broken(rq) - age_stamp;
+       irq = cpu_util_irq(rq);
 
-       if (unlikely(delta < 0))
-               delta = 0;
+       if (unlikely(irq >= max))
+               return 1;
 
-       total = sched_avg_period() + delta;
+       used = READ_ONCE(rq->avg_rt.util_avg);
+       used += READ_ONCE(rq->avg_dl.util_avg);
 
-       used = div_u64(avg, total);
+       if (unlikely(used >= max))
+               return 1;
 
-       if (likely(used < SCHED_CAPACITY_SCALE))
-               return SCHED_CAPACITY_SCALE - used;
+       free = max - used;
 
-       return 1;
+       return scale_irq_capacity(free, irq, max);
 }
 
 static void update_cpu_capacity(struct sched_domain *sd, int cpu)
 {
-       unsigned long capacity = arch_scale_cpu_capacity(sd, cpu);
+       unsigned long capacity = scale_rt_capacity(cpu);
        struct sched_group *sdg = sd->groups;
 
-       cpu_rq(cpu)->cpu_capacity_orig = capacity;
-
-       capacity *= scale_rt_capacity(cpu);
-       capacity >>= SCHED_CAPACITY_SHIFT;
+       cpu_rq(cpu)->cpu_capacity_orig = arch_scale_cpu_capacity(sd, cpu);
 
        if (!capacity)
                capacity = 1;
diff --git a/kernel/sched/pelt.c b/kernel/sched/pelt.c
new file mode 100644 (file)
index 0000000..35475c0
--- /dev/null
@@ -0,0 +1,399 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Per Entity Load Tracking
+ *
+ *  Copyright (C) 2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
+ *
+ *  Interactivity improvements by Mike Galbraith
+ *  (C) 2007 Mike Galbraith <efault@gmx.de>
+ *
+ *  Various enhancements by Dmitry Adamushko.
+ *  (C) 2007 Dmitry Adamushko <dmitry.adamushko@gmail.com>
+ *
+ *  Group scheduling enhancements by Srivatsa Vaddagiri
+ *  Copyright IBM Corporation, 2007
+ *  Author: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
+ *
+ *  Scaled math optimizations by Thomas Gleixner
+ *  Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de>
+ *
+ *  Adaptive scheduling granularity, math enhancements by Peter Zijlstra
+ *  Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
+ *
+ *  Move PELT related code from fair.c into this pelt.c file
+ *  Author: Vincent Guittot <vincent.guittot@linaro.org>
+ */
+
+#include <linux/sched.h>
+#include "sched.h"
+#include "sched-pelt.h"
+#include "pelt.h"
+
+/*
+ * Approximate:
+ *   val * y^n,    where y^32 ~= 0.5 (~1 scheduling period)
+ */
+static u64 decay_load(u64 val, u64 n)
+{
+       unsigned int local_n;
+
+       if (unlikely(n > LOAD_AVG_PERIOD * 63))
+               return 0;
+
+       /* after bounds checking we can collapse to 32-bit */
+       local_n = n;
+
+       /*
+        * As y^PERIOD = 1/2, we can combine
+        *    y^n = 1/2^(n/PERIOD) * y^(n%PERIOD)
+        * With a look-up table which covers y^n (n<PERIOD)
+        *
+        * To achieve constant time decay_load.
+        */
+       if (unlikely(local_n >= LOAD_AVG_PERIOD)) {
+               val >>= local_n / LOAD_AVG_PERIOD;
+               local_n %= LOAD_AVG_PERIOD;
+       }
+
+       val = mul_u64_u32_shr(val, runnable_avg_yN_inv[local_n], 32);
+       return val;
+}
+
+static u32 __accumulate_pelt_segments(u64 periods, u32 d1, u32 d3)
+{
+       u32 c1, c2, c3 = d3; /* y^0 == 1 */
+
+       /*
+        * c1 = d1 y^p
+        */
+       c1 = decay_load((u64)d1, periods);
+
+       /*
+        *            p-1
+        * c2 = 1024 \Sum y^n
+        *            n=1
+        *
+        *              inf        inf
+        *    = 1024 ( \Sum y^n - \Sum y^n - y^0 )
+        *              n=0        n=p
+        */
+       c2 = LOAD_AVG_MAX - decay_load(LOAD_AVG_MAX, periods) - 1024;
+
+       return c1 + c2 + c3;
+}
+
+#define cap_scale(v, s) ((v)*(s) >> SCHED_CAPACITY_SHIFT)
+
+/*
+ * Accumulate the three separate parts of the sum; d1 the remainder
+ * of the last (incomplete) period, d2 the span of full periods and d3
+ * the remainder of the (incomplete) current period.
+ *
+ *           d1          d2           d3
+ *           ^           ^            ^
+ *           |           |            |
+ *         |<->|<----------------->|<--->|
+ * ... |---x---|------| ... |------|-----x (now)
+ *
+ *                           p-1
+ * u' = (u + d1) y^p + 1024 \Sum y^n + d3 y^0
+ *                           n=1
+ *
+ *    = u y^p +                                        (Step 1)
+ *
+ *                     p-1
+ *      d1 y^p + 1024 \Sum y^n + d3 y^0                (Step 2)
+ *                     n=1
+ */
+static __always_inline u32
+accumulate_sum(u64 delta, int cpu, struct sched_avg *sa,
+              unsigned long load, unsigned long runnable, int running)
+{
+       unsigned long scale_freq, scale_cpu;
+       u32 contrib = (u32)delta; /* p == 0 -> delta < 1024 */
+       u64 periods;
+
+       scale_freq = arch_scale_freq_capacity(cpu);
+       scale_cpu = arch_scale_cpu_capacity(NULL, cpu);
+
+       delta += sa->period_contrib;
+       periods = delta / 1024; /* A period is 1024us (~1ms) */
+
+       /*
+        * Step 1: decay old *_sum if we crossed period boundaries.
+        */
+       if (periods) {
+               sa->load_sum = decay_load(sa->load_sum, periods);
+               sa->runnable_load_sum =
+                       decay_load(sa->runnable_load_sum, periods);
+               sa->util_sum = decay_load((u64)(sa->util_sum), periods);
+
+               /*
+                * Step 2
+                */
+               delta %= 1024;
+               contrib = __accumulate_pelt_segments(periods,
+                               1024 - sa->period_contrib, delta);
+       }
+       sa->period_contrib = delta;
+
+       contrib = cap_scale(contrib, scale_freq);
+       if (load)
+               sa->load_sum += load * contrib;
+       if (runnable)
+               sa->runnable_load_sum += runnable * contrib;
+       if (running)
+               sa->util_sum += contrib * scale_cpu;
+
+       return periods;
+}
+
+/*
+ * We can represent the historical contribution to runnable average as the
+ * coefficients of a geometric series.  To do this we sub-divide our runnable
+ * history into segments of approximately 1ms (1024us); label the segment that
+ * occurred N-ms ago p_N, with p_0 corresponding to the current period, e.g.
+ *
+ * [<- 1024us ->|<- 1024us ->|<- 1024us ->| ...
+ *      p0            p1           p2
+ *     (now)       (~1ms ago)  (~2ms ago)
+ *
+ * Let u_i denote the fraction of p_i that the entity was runnable.
+ *
+ * We then designate the fractions u_i as our co-efficients, yielding the
+ * following representation of historical load:
+ *   u_0 + u_1*y + u_2*y^2 + u_3*y^3 + ...
+ *
+ * We choose y based on the with of a reasonably scheduling period, fixing:
+ *   y^32 = 0.5
+ *
+ * This means that the contribution to load ~32ms ago (u_32) will be weighted
+ * approximately half as much as the contribution to load within the last ms
+ * (u_0).
+ *
+ * When a period "rolls over" and we have new u_0`, multiplying the previous
+ * sum again by y is sufficient to update:
+ *   load_avg = u_0` + y*(u_0 + u_1*y + u_2*y^2 + ... )
+ *            = u_0 + u_1*y + u_2*y^2 + ... [re-labeling u_i --> u_{i+1}]
+ */
+static __always_inline int
+___update_load_sum(u64 now, int cpu, struct sched_avg *sa,
+                 unsigned long load, unsigned long runnable, int running)
+{
+       u64 delta;
+
+       delta = now - sa->last_update_time;
+       /*
+        * This should only happen when time goes backwards, which it
+        * unfortunately does during sched clock init when we swap over to TSC.
+        */
+       if ((s64)delta < 0) {
+               sa->last_update_time = now;
+               return 0;
+       }
+
+       /*
+        * Use 1024ns as the unit of measurement since it's a reasonable
+        * approximation of 1us and fast to compute.
+        */
+       delta >>= 10;
+       if (!delta)
+               return 0;
+
+       sa->last_update_time += delta << 10;
+
+       /*
+        * running is a subset of runnable (weight) so running can't be set if
+        * runnable is clear. But there are some corner cases where the current
+        * se has been already dequeued but cfs_rq->curr still points to it.
+        * This means that weight will be 0 but not running for a sched_entity
+        * but also for a cfs_rq if the latter becomes idle. As an example,
+        * this happens during idle_balance() which calls
+        * update_blocked_averages()
+        */
+       if (!load)
+               runnable = running = 0;
+
+       /*
+        * Now we know we crossed measurement unit boundaries. The *_avg
+        * accrues by two steps:
+        *
+        * Step 1: accumulate *_sum since last_update_time. If we haven't
+        * crossed period boundaries, finish.
+        */
+       if (!accumulate_sum(delta, cpu, sa, load, runnable, running))
+               return 0;
+
+       return 1;
+}
+
+static __always_inline void
+___update_load_avg(struct sched_avg *sa, unsigned long load, unsigned long runnable)
+{
+       u32 divider = LOAD_AVG_MAX - 1024 + sa->period_contrib;
+
+       /*
+        * Step 2: update *_avg.
+        */
+       sa->load_avg = div_u64(load * sa->load_sum, divider);
+       sa->runnable_load_avg = div_u64(runnable * sa->runnable_load_sum, divider);
+       WRITE_ONCE(sa->util_avg, sa->util_sum / divider);
+}
+
+/*
+ * sched_entity:
+ *
+ *   task:
+ *     se_runnable() == se_weight()
+ *
+ *   group: [ see update_cfs_group() ]
+ *     se_weight()   = tg->weight * grq->load_avg / tg->load_avg
+ *     se_runnable() = se_weight(se) * grq->runnable_load_avg / grq->load_avg
+ *
+ *   load_sum := runnable_sum
+ *   load_avg = se_weight(se) * runnable_avg
+ *
+ *   runnable_load_sum := runnable_sum
+ *   runnable_load_avg = se_runnable(se) * runnable_avg
+ *
+ * XXX collapse load_sum and runnable_load_sum
+ *
+ * cfq_rq:
+ *
+ *   load_sum = \Sum se_weight(se) * se->avg.load_sum
+ *   load_avg = \Sum se->avg.load_avg
+ *
+ *   runnable_load_sum = \Sum se_runnable(se) * se->avg.runnable_load_sum
+ *   runnable_load_avg = \Sum se->avg.runable_load_avg
+ */
+
+int __update_load_avg_blocked_se(u64 now, int cpu, struct sched_entity *se)
+{
+       if (entity_is_task(se))
+               se->runnable_weight = se->load.weight;
+
+       if (___update_load_sum(now, cpu, &se->avg, 0, 0, 0)) {
+               ___update_load_avg(&se->avg, se_weight(se), se_runnable(se));
+               return 1;
+       }
+
+       return 0;
+}
+
+int __update_load_avg_se(u64 now, int cpu, struct cfs_rq *cfs_rq, struct sched_entity *se)
+{
+       if (entity_is_task(se))
+               se->runnable_weight = se->load.weight;
+
+       if (___update_load_sum(now, cpu, &se->avg, !!se->on_rq, !!se->on_rq,
+                               cfs_rq->curr == se)) {
+
+               ___update_load_avg(&se->avg, se_weight(se), se_runnable(se));
+               cfs_se_util_change(&se->avg);
+               return 1;
+       }
+
+       return 0;
+}
+
+int __update_load_avg_cfs_rq(u64 now, int cpu, struct cfs_rq *cfs_rq)
+{
+       if (___update_load_sum(now, cpu, &cfs_rq->avg,
+                               scale_load_down(cfs_rq->load.weight),
+                               scale_load_down(cfs_rq->runnable_weight),
+                               cfs_rq->curr != NULL)) {
+
+               ___update_load_avg(&cfs_rq->avg, 1, 1);
+               return 1;
+       }
+
+       return 0;
+}
+
+/*
+ * rt_rq:
+ *
+ *   util_sum = \Sum se->avg.util_sum but se->avg.util_sum is not tracked
+ *   util_sum = cpu_scale * load_sum
+ *   runnable_load_sum = load_sum
+ *
+ *   load_avg and runnable_load_avg are not supported and meaningless.
+ *
+ */
+
+int update_rt_rq_load_avg(u64 now, struct rq *rq, int running)
+{
+       if (___update_load_sum(now, rq->cpu, &rq->avg_rt,
+                               running,
+                               running,
+                               running)) {
+
+               ___update_load_avg(&rq->avg_rt, 1, 1);
+               return 1;
+       }
+
+       return 0;
+}
+
+/*
+ * dl_rq:
+ *
+ *   util_sum = \Sum se->avg.util_sum but se->avg.util_sum is not tracked
+ *   util_sum = cpu_scale * load_sum
+ *   runnable_load_sum = load_sum
+ *
+ */
+
+int update_dl_rq_load_avg(u64 now, struct rq *rq, int running)
+{
+       if (___update_load_sum(now, rq->cpu, &rq->avg_dl,
+                               running,
+                               running,
+                               running)) {
+
+               ___update_load_avg(&rq->avg_dl, 1, 1);
+               return 1;
+       }
+
+       return 0;
+}
+
+#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
+/*
+ * irq:
+ *
+ *   util_sum = \Sum se->avg.util_sum but se->avg.util_sum is not tracked
+ *   util_sum = cpu_scale * load_sum
+ *   runnable_load_sum = load_sum
+ *
+ */
+
+int update_irq_load_avg(struct rq *rq, u64 running)
+{
+       int ret = 0;
+       /*
+        * We know the time that has been used by interrupt since last update
+        * but we don't when. Let be pessimistic and assume that interrupt has
+        * happened just before the update. This is not so far from reality
+        * because interrupt will most probably wake up task and trig an update
+        * of rq clock during which the metric si updated.
+        * We start to decay with normal context time and then we add the
+        * interrupt context time.
+        * We can safely remove running from rq->clock because
+        * rq->clock += delta with delta >= running
+        */
+       ret = ___update_load_sum(rq->clock - running, rq->cpu, &rq->avg_irq,
+                               0,
+                               0,
+                               0);
+       ret += ___update_load_sum(rq->clock, rq->cpu, &rq->avg_irq,
+                               1,
+                               1,
+                               1);
+
+       if (ret)
+               ___update_load_avg(&rq->avg_irq, 1, 1);
+
+       return ret;
+}
+#endif
diff --git a/kernel/sched/pelt.h b/kernel/sched/pelt.h
new file mode 100644 (file)
index 0000000..d2894db
--- /dev/null
@@ -0,0 +1,72 @@
+#ifdef CONFIG_SMP
+
+int __update_load_avg_blocked_se(u64 now, int cpu, struct sched_entity *se);
+int __update_load_avg_se(u64 now, int cpu, struct cfs_rq *cfs_rq, struct sched_entity *se);
+int __update_load_avg_cfs_rq(u64 now, int cpu, struct cfs_rq *cfs_rq);
+int update_rt_rq_load_avg(u64 now, struct rq *rq, int running);
+int update_dl_rq_load_avg(u64 now, struct rq *rq, int running);
+
+#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
+int update_irq_load_avg(struct rq *rq, u64 running);
+#else
+static inline int
+update_irq_load_avg(struct rq *rq, u64 running)
+{
+       return 0;
+}
+#endif
+
+/*
+ * When a task is dequeued, its estimated utilization should not be update if
+ * its util_avg has not been updated at least once.
+ * This flag is used to synchronize util_avg updates with util_est updates.
+ * We map this information into the LSB bit of the utilization saved at
+ * dequeue time (i.e. util_est.dequeued).
+ */
+#define UTIL_AVG_UNCHANGED 0x1
+
+static inline void cfs_se_util_change(struct sched_avg *avg)
+{
+       unsigned int enqueued;
+
+       if (!sched_feat(UTIL_EST))
+               return;
+
+       /* Avoid store if the flag has been already set */
+       enqueued = avg->util_est.enqueued;
+       if (!(enqueued & UTIL_AVG_UNCHANGED))
+               return;
+
+       /* Reset flag to report util_avg has been updated */
+       enqueued &= ~UTIL_AVG_UNCHANGED;
+       WRITE_ONCE(avg->util_est.enqueued, enqueued);
+}
+
+#else
+
+static inline int
+update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
+{
+       return 0;
+}
+
+static inline int
+update_rt_rq_load_avg(u64 now, struct rq *rq, int running)
+{
+       return 0;
+}
+
+static inline int
+update_dl_rq_load_avg(u64 now, struct rq *rq, int running)
+{
+       return 0;
+}
+
+static inline int
+update_irq_load_avg(struct rq *rq, u64 running)
+{
+       return 0;
+}
+#endif
+
+
index 47556b0c9a95faff3e827f6ffd690646cff38224..2e2955a8cf8fe3648a007036dde85320f5834a45 100644 (file)
@@ -5,6 +5,8 @@
  */
 #include "sched.h"
 
+#include "pelt.h"
+
 int sched_rr_timeslice = RR_TIMESLICE;
 int sysctl_sched_rr_timeslice = (MSEC_PER_SEC / HZ) * RR_TIMESLICE;
 
@@ -508,8 +510,11 @@ static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
 
        rt_se = rt_rq->tg->rt_se[cpu];
 
-       if (!rt_se)
+       if (!rt_se) {
                dequeue_top_rt_rq(rt_rq);
+               /* Kick cpufreq (see the comment in kernel/sched/sched.h). */
+               cpufreq_update_util(rq_of_rt_rq(rt_rq), 0);
+       }
        else if (on_rt_rq(rt_se))
                dequeue_rt_entity(rt_se, 0);
 }
@@ -833,6 +838,8 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
                 * can be time-consuming. Try to avoid it when possible.
                 */
                raw_spin_lock(&rt_rq->rt_runtime_lock);
+               if (!sched_feat(RT_RUNTIME_SHARE) && rt_rq->rt_runtime != RUNTIME_INF)
+                       rt_rq->rt_runtime = rt_b->rt_runtime;
                skip = !rt_rq->rt_time && !rt_rq->rt_nr_running;
                raw_spin_unlock(&rt_rq->rt_runtime_lock);
                if (skip)
@@ -968,8 +975,6 @@ static void update_curr_rt(struct rq *rq)
        curr->se.exec_start = now;
        cgroup_account_cputime(curr, delta_exec);
 
-       sched_rt_avg_update(rq, delta_exec);
-
        if (!rt_bandwidth_enabled())
                return;
 
@@ -1001,8 +1006,6 @@ dequeue_top_rt_rq(struct rt_rq *rt_rq)
        sub_nr_running(rq, rt_rq->rt_nr_running);
        rt_rq->rt_queued = 0;
 
-       /* Kick cpufreq (see the comment in kernel/sched/sched.h). */
-       cpufreq_update_util(rq, 0);
 }
 
 static void
@@ -1014,11 +1017,14 @@ enqueue_top_rt_rq(struct rt_rq *rt_rq)
 
        if (rt_rq->rt_queued)
                return;
-       if (rt_rq_throttled(rt_rq) || !rt_rq->rt_nr_running)
+
+       if (rt_rq_throttled(rt_rq))
                return;
 
-       add_nr_running(rq, rt_rq->rt_nr_running);
-       rt_rq->rt_queued = 1;
+       if (rt_rq->rt_nr_running) {
+               add_nr_running(rq, rt_rq->rt_nr_running);
+               rt_rq->rt_queued = 1;
+       }
 
        /* Kick cpufreq (see the comment in kernel/sched/sched.h). */
        cpufreq_update_util(rq, 0);
@@ -1572,6 +1578,14 @@ pick_next_task_rt(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
 
        rt_queue_push_tasks(rq);
 
+       /*
+        * If prev task was rt, put_prev_task() has already updated the
+        * utilization. We only care of the case where we start to schedule a
+        * rt task
+        */
+       if (rq->curr->sched_class != &rt_sched_class)
+               update_rt_rq_load_avg(rq_clock_task(rq), rq, 0);
+
        return p;
 }
 
@@ -1579,6 +1593,8 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
 {
        update_curr_rt(rq);
 
+       update_rt_rq_load_avg(rq_clock_task(rq), rq, 1);
+
        /*
         * The previous task needs to be made eligible for pushing
         * if it is still active
@@ -2308,6 +2324,7 @@ static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
        struct sched_rt_entity *rt_se = &p->rt;
 
        update_curr_rt(rq);
+       update_rt_rq_load_avg(rq_clock_task(rq), rq, 1);
 
        watchdog(rq, p);
 
index 6601baf2361c04605ce198091bdfe8d56aa8a7b7..4a2e8cae63c41111672a898d3955008cd345c69b 100644 (file)
@@ -334,9 +334,10 @@ struct cfs_bandwidth {
        u64                     runtime;
        s64                     hierarchical_quota;
        u64                     runtime_expires;
+       int                     expires_seq;
 
-       int                     idle;
-       int                     period_active;
+       short                   idle;
+       short                   period_active;
        struct hrtimer          period_timer;
        struct hrtimer          slack_timer;
        struct list_head        throttled_cfs_rq;
@@ -551,6 +552,7 @@ struct cfs_rq {
 
 #ifdef CONFIG_CFS_BANDWIDTH
        int                     runtime_enabled;
+       int                     expires_seq;
        u64                     runtime_expires;
        s64                     runtime_remaining;
 
@@ -592,6 +594,7 @@ struct rt_rq {
        unsigned long           rt_nr_total;
        int                     overloaded;
        struct plist_head       pushable_tasks;
+
 #endif /* CONFIG_SMP */
        int                     rt_queued;
 
@@ -609,6 +612,11 @@ struct rt_rq {
 #endif
 };
 
+static inline bool rt_rq_is_runnable(struct rt_rq *rt_rq)
+{
+       return rt_rq->rt_queued && rt_rq->rt_nr_running;
+}
+
 /* Deadline class' related fields in a runqueue */
 struct dl_rq {
        /* runqueue is an rbtree, ordered by deadline */
@@ -666,7 +674,26 @@ struct dl_rq {
        u64                     bw_ratio;
 };
 
+#ifdef CONFIG_FAIR_GROUP_SCHED
+/* An entity is a task if it doesn't "own" a runqueue */
+#define entity_is_task(se)     (!se->my_q)
+#else
+#define entity_is_task(se)     1
+#endif
+
 #ifdef CONFIG_SMP
+/*
+ * XXX we want to get rid of these helpers and use the full load resolution.
+ */
+static inline long se_weight(struct sched_entity *se)
+{
+       return scale_load_down(se->load.weight);
+}
+
+static inline long se_runnable(struct sched_entity *se)
+{
+       return scale_load_down(se->runnable_weight);
+}
 
 static inline bool sched_asym_prefer(int a, int b)
 {
@@ -826,8 +853,12 @@ struct rq {
 
        struct list_head cfs_tasks;
 
-       u64                     rt_avg;
-       u64                     age_stamp;
+       struct sched_avg        avg_rt;
+       struct sched_avg        avg_dl;
+#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
+#define HAVE_SCHED_AVG_IRQ
+       struct sched_avg        avg_irq;
+#endif
        u64                     idle_stamp;
        u64                     avg_idle;
 
@@ -1068,7 +1099,8 @@ enum numa_faults_stats {
 };
 extern void sched_setnuma(struct task_struct *p, int node);
 extern int migrate_task_to(struct task_struct *p, int cpu);
-extern int migrate_swap(struct task_struct *, struct task_struct *);
+extern int migrate_swap(struct task_struct *p, struct task_struct *t,
+                       int cpu, int scpu);
 extern void init_numa_balancing(unsigned long clone_flags, struct task_struct *p);
 #else
 static inline void
@@ -1683,15 +1715,9 @@ extern void deactivate_task(struct rq *rq, struct task_struct *p, int flags);
 
 extern void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags);
 
-extern const_debug unsigned int sysctl_sched_time_avg;
 extern const_debug unsigned int sysctl_sched_nr_migrate;
 extern const_debug unsigned int sysctl_sched_migration_cost;
 
-static inline u64 sched_avg_period(void)
-{
-       return (u64)sysctl_sched_time_avg * NSEC_PER_MSEC / 2;
-}
-
 #ifdef CONFIG_SCHED_HRTICK
 
 /*
@@ -1728,8 +1754,6 @@ unsigned long arch_scale_freq_capacity(int cpu)
 #endif
 
 #ifdef CONFIG_SMP
-extern void sched_avg_update(struct rq *rq);
-
 #ifndef arch_scale_cpu_capacity
 static __always_inline
 unsigned long arch_scale_cpu_capacity(struct sched_domain *sd, int cpu)
@@ -1740,12 +1764,6 @@ unsigned long arch_scale_cpu_capacity(struct sched_domain *sd, int cpu)
        return SCHED_CAPACITY_SCALE;
 }
 #endif
-
-static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
-{
-       rq->rt_avg += rt_delta * arch_scale_freq_capacity(cpu_of(rq));
-       sched_avg_update(rq);
-}
 #else
 #ifndef arch_scale_cpu_capacity
 static __always_inline
@@ -1754,8 +1772,6 @@ unsigned long arch_scale_cpu_capacity(void __always_unused *sd, int cpu)
        return SCHED_CAPACITY_SCALE;
 }
 #endif
-static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta) { }
-static inline void sched_avg_update(struct rq *rq) { }
 #endif
 
 struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
@@ -2170,11 +2186,16 @@ static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) {}
 #endif
 
 #ifdef CONFIG_CPU_FREQ_GOV_SCHEDUTIL
-static inline unsigned long cpu_util_dl(struct rq *rq)
+static inline unsigned long cpu_bw_dl(struct rq *rq)
 {
        return (rq->dl.running_bw * SCHED_CAPACITY_SCALE) >> BW_SHIFT;
 }
 
+static inline unsigned long cpu_util_dl(struct rq *rq)
+{
+       return READ_ONCE(rq->avg_dl.util_avg);
+}
+
 static inline unsigned long cpu_util_cfs(struct rq *rq)
 {
        unsigned long util = READ_ONCE(rq->cfs.avg.util_avg);
@@ -2186,4 +2207,37 @@ static inline unsigned long cpu_util_cfs(struct rq *rq)
 
        return util;
 }
+
+static inline unsigned long cpu_util_rt(struct rq *rq)
+{
+       return READ_ONCE(rq->avg_rt.util_avg);
+}
+#endif
+
+#ifdef HAVE_SCHED_AVG_IRQ
+static inline unsigned long cpu_util_irq(struct rq *rq)
+{
+       return rq->avg_irq.util_avg;
+}
+
+static inline
+unsigned long scale_irq_capacity(unsigned long util, unsigned long irq, unsigned long max)
+{
+       util *= (max - irq);
+       util /= max;
+
+       return util;
+
+}
+#else
+static inline unsigned long cpu_util_irq(struct rq *rq)
+{
+       return 0;
+}
+
+static inline
+unsigned long scale_irq_capacity(unsigned long util, unsigned long irq, unsigned long max)
+{
+       return util;
+}
 #endif
index b6fb2c3b3ff73d6abe01beb5dc5b1bf3b181728d..66b59ac77c2209fd9fccd92c8ae0c660e7428e4d 100644 (file)
@@ -32,7 +32,7 @@ void swake_up_locked(struct swait_queue_head *q)
 }
 EXPORT_SYMBOL(swake_up_locked);
 
-void swake_up(struct swait_queue_head *q)
+void swake_up_one(struct swait_queue_head *q)
 {
        unsigned long flags;
 
@@ -40,7 +40,7 @@ void swake_up(struct swait_queue_head *q)
        swake_up_locked(q);
        raw_spin_unlock_irqrestore(&q->lock, flags);
 }
-EXPORT_SYMBOL(swake_up);
+EXPORT_SYMBOL(swake_up_one);
 
 /*
  * Does not allow usage from IRQ disabled, since we must be able to
@@ -69,14 +69,14 @@ void swake_up_all(struct swait_queue_head *q)
 }
 EXPORT_SYMBOL(swake_up_all);
 
-void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait)
+static void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait)
 {
        wait->task = current;
        if (list_empty(&wait->task_list))
-               list_add(&wait->task_list, &q->task_list);
+               list_add_tail(&wait->task_list, &q->task_list);
 }
 
-void prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait, int state)
+void prepare_to_swait_exclusive(struct swait_queue_head *q, struct swait_queue *wait, int state)
 {
        unsigned long flags;
 
@@ -85,16 +85,28 @@ void prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait, int
        set_current_state(state);
        raw_spin_unlock_irqrestore(&q->lock, flags);
 }
-EXPORT_SYMBOL(prepare_to_swait);
+EXPORT_SYMBOL(prepare_to_swait_exclusive);
 
 long prepare_to_swait_event(struct swait_queue_head *q, struct swait_queue *wait, int state)
 {
-       if (signal_pending_state(state, current))
-               return -ERESTARTSYS;
+       unsigned long flags;
+       long ret = 0;
 
-       prepare_to_swait(q, wait, state);
+       raw_spin_lock_irqsave(&q->lock, flags);
+       if (unlikely(signal_pending_state(state, current))) {
+               /*
+                * See prepare_to_wait_event(). TL;DR, subsequent swake_up_one()
+                * must not see us.
+                */
+               list_del_init(&wait->task_list);
+               ret = -ERESTARTSYS;
+       } else {
+               __prepare_to_swait(q, wait);
+               set_current_state(state);
+       }
+       raw_spin_unlock_irqrestore(&q->lock, flags);
 
-       return 0;
+       return ret;
 }
 EXPORT_SYMBOL(prepare_to_swait_event);
 
index 05a831427bc741e8d6290a6189bedbfda2457ded..56a0fed30c0a8786a47c600e90c4016140b1fb58 100644 (file)
@@ -47,7 +47,7 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
        if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) {
                printk(KERN_ERR "ERROR: domain->span does not contain CPU%d\n", cpu);
        }
-       if (!cpumask_test_cpu(cpu, sched_group_span(group))) {
+       if (group && !cpumask_test_cpu(cpu, sched_group_span(group))) {
                printk(KERN_ERR "ERROR: domain->groups does not contain CPU%d\n", cpu);
        }
 
index 928be527477eb8b1e7e04bbf7d3cea0dbde7316f..870f97b313e3891003b30982603cf333a879d4a8 100644 (file)
@@ -134,8 +134,8 @@ static void __wake_up_common_lock(struct wait_queue_head *wq_head, unsigned int
  * @nr_exclusive: how many wake-one or wake-many threads to wake up
  * @key: is directly passed to the wakeup function
  *
- * It may be assumed that this function implies a write memory barrier before
- * changing the task state if and only if any tasks are woken up.
+ * If this function wakes up a task, it executes a full memory barrier before
+ * accessing the task state.
  */
 void __wake_up(struct wait_queue_head *wq_head, unsigned int mode,
                        int nr_exclusive, void *key)
@@ -180,8 +180,8 @@ EXPORT_SYMBOL_GPL(__wake_up_locked_key_bookmark);
  *
  * On UP it can prevent extra preemption.
  *
- * It may be assumed that this function implies a write memory barrier before
- * changing the task state if and only if any tasks are woken up.
+ * If this function wakes up a task, it executes a full memory barrier before
+ * accessing the task state.
  */
 void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode,
                        int nr_exclusive, void *key)
@@ -392,35 +392,36 @@ static inline bool is_kthread_should_stop(void)
  *     if (condition)
  *         break;
  *
- *     p->state = mode;                                condition = true;
- *     smp_mb(); // A                          smp_wmb(); // C
- *     if (!wq_entry->flags & WQ_FLAG_WOKEN)   wq_entry->flags |= WQ_FLAG_WOKEN;
- *         schedule()                          try_to_wake_up();
- *     p->state = TASK_RUNNING;                    ~~~~~~~~~~~~~~~~~~
- *     wq_entry->flags &= ~WQ_FLAG_WOKEN;              condition = true;
- *     smp_mb() // B                           smp_wmb(); // C
- *                                             wq_entry->flags |= WQ_FLAG_WOKEN;
- * }
- * remove_wait_queue(&wq_head, &wait);
+ *     // in wait_woken()                      // in woken_wake_function()
  *
+ *     p->state = mode;                                wq_entry->flags |= WQ_FLAG_WOKEN;
+ *     smp_mb(); // A                          try_to_wake_up():
+ *     if (!(wq_entry->flags & WQ_FLAG_WOKEN))    <full barrier>
+ *         schedule()                             if (p->state & mode)
+ *     p->state = TASK_RUNNING;                              p->state = TASK_RUNNING;
+ *     wq_entry->flags &= ~WQ_FLAG_WOKEN;      ~~~~~~~~~~~~~~~~~~
+ *     smp_mb(); // B                          condition = true;
+ * }                                           smp_mb(); // C
+ * remove_wait_queue(&wq_head, &wait);         wq_entry->flags |= WQ_FLAG_WOKEN;
  */
 long wait_woken(struct wait_queue_entry *wq_entry, unsigned mode, long timeout)
 {
-       set_current_state(mode); /* A */
        /*
-        * The above implies an smp_mb(), which matches with the smp_wmb() from
-        * woken_wake_function() such that if we observe WQ_FLAG_WOKEN we must
-        * also observe all state before the wakeup.
+        * The below executes an smp_mb(), which matches with the full barrier
+        * executed by the try_to_wake_up() in woken_wake_function() such that
+        * either we see the store to wq_entry->flags in woken_wake_function()
+        * or woken_wake_function() sees our store to current->state.
         */
+       set_current_state(mode); /* A */
        if (!(wq_entry->flags & WQ_FLAG_WOKEN) && !is_kthread_should_stop())
                timeout = schedule_timeout(timeout);
        __set_current_state(TASK_RUNNING);
 
        /*
-        * The below implies an smp_mb(), it too pairs with the smp_wmb() from
-        * woken_wake_function() such that we must either observe the wait
-        * condition being true _OR_ WQ_FLAG_WOKEN such that we will not miss
-        * an event.
+        * The below executes an smp_mb(), which matches with the smp_mb() (C)
+        * in woken_wake_function() such that either we see the wait condition
+        * being true or the store to wq_entry->flags in woken_wake_function()
+        * follows ours in the coherence order.
         */
        smp_store_mb(wq_entry->flags, wq_entry->flags & ~WQ_FLAG_WOKEN); /* B */
 
@@ -430,14 +431,8 @@ EXPORT_SYMBOL(wait_woken);
 
 int woken_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key)
 {
-       /*
-        * Although this function is called under waitqueue lock, LOCK
-        * doesn't imply write barrier and the users expects write
-        * barrier semantics on wakeup functions.  The following
-        * smp_wmb() is equivalent to smp_wmb() in try_to_wake_up()
-        * and is paired with smp_store_mb() in wait_woken().
-        */
-       smp_wmb(); /* C */
+       /* Pairs with the smp_store_mb() in wait_woken(). */
+       smp_mb(); /* C */
        wq_entry->flags |= WQ_FLAG_WOKEN;
 
        return default_wake_function(wq_entry, mode, sync, key);
index 5043e7433f4b15879a6498ed3d1ca6cfa2876f83..c230c2dd48e19753078d9bd9b7b9b463ff81c14f 100644 (file)
@@ -238,8 +238,7 @@ int smpboot_unpark_threads(unsigned int cpu)
 
        mutex_lock(&smpboot_threads_lock);
        list_for_each_entry(cur, &hotplug_threads, list)
-               if (cpumask_test_cpu(cpu, cur->cpumask))
-                       smpboot_unpark_thread(cur, cpu);
+               smpboot_unpark_thread(cur, cpu);
        mutex_unlock(&smpboot_threads_lock);
        return 0;
 }
@@ -280,34 +279,26 @@ static void smpboot_destroy_threads(struct smp_hotplug_thread *ht)
 }
 
 /**
- * smpboot_register_percpu_thread_cpumask - Register a per_cpu thread related
+ * smpboot_register_percpu_thread - Register a per_cpu thread related
  *                                         to hotplug
  * @plug_thread:       Hotplug thread descriptor
- * @cpumask:           The cpumask where threads run
  *
  * Creates and starts the threads on all online cpus.
  */
-int smpboot_register_percpu_thread_cpumask(struct smp_hotplug_thread *plug_thread,
-                                          const struct cpumask *cpumask)
+int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread)
 {
        unsigned int cpu;
        int ret = 0;
 
-       if (!alloc_cpumask_var(&plug_thread->cpumask, GFP_KERNEL))
-               return -ENOMEM;
-       cpumask_copy(plug_thread->cpumask, cpumask);
-
        get_online_cpus();
        mutex_lock(&smpboot_threads_lock);
        for_each_online_cpu(cpu) {
                ret = __smpboot_create_thread(plug_thread, cpu);
                if (ret) {
                        smpboot_destroy_threads(plug_thread);
-                       free_cpumask_var(plug_thread->cpumask);
                        goto out;
                }
-               if (cpumask_test_cpu(cpu, cpumask))
-                       smpboot_unpark_thread(plug_thread, cpu);
+               smpboot_unpark_thread(plug_thread, cpu);
        }
        list_add(&plug_thread->list, &hotplug_threads);
 out:
@@ -315,7 +306,7 @@ out:
        put_online_cpus();
        return ret;
 }
-EXPORT_SYMBOL_GPL(smpboot_register_percpu_thread_cpumask);
+EXPORT_SYMBOL_GPL(smpboot_register_percpu_thread);
 
 /**
  * smpboot_unregister_percpu_thread - Unregister a per_cpu thread related to hotplug
@@ -331,44 +322,9 @@ void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread)
        smpboot_destroy_threads(plug_thread);
        mutex_unlock(&smpboot_threads_lock);
        put_online_cpus();
-       free_cpumask_var(plug_thread->cpumask);
 }
 EXPORT_SYMBOL_GPL(smpboot_unregister_percpu_thread);
 
-/**
- * smpboot_update_cpumask_percpu_thread - Adjust which per_cpu hotplug threads stay parked
- * @plug_thread:       Hotplug thread descriptor
- * @new:               Revised mask to use
- *
- * The cpumask field in the smp_hotplug_thread must not be updated directly
- * by the client, but only by calling this function.
- * This function can only be called on a registered smp_hotplug_thread.
- */
-void smpboot_update_cpumask_percpu_thread(struct smp_hotplug_thread *plug_thread,
-                                         const struct cpumask *new)
-{
-       struct cpumask *old = plug_thread->cpumask;
-       static struct cpumask tmp;
-       unsigned int cpu;
-
-       lockdep_assert_cpus_held();
-       mutex_lock(&smpboot_threads_lock);
-
-       /* Park threads that were exclusively enabled on the old mask. */
-       cpumask_andnot(&tmp, old, new);
-       for_each_cpu_and(cpu, &tmp, cpu_online_mask)
-               smpboot_park_thread(plug_thread, cpu);
-
-       /* Unpark threads that are exclusively enabled on the new mask. */
-       cpumask_andnot(&tmp, new, old);
-       for_each_cpu_and(cpu, &tmp, cpu_online_mask)
-               smpboot_unpark_thread(plug_thread, cpu);
-
-       cpumask_copy(old, new);
-
-       mutex_unlock(&smpboot_threads_lock);
-}
-
 static DEFINE_PER_CPU(atomic_t, cpu_hotplug_state) = ATOMIC_INIT(CPU_POST_DEAD);
 
 /*
index de2f57fddc04ed85f5d419fe64e51cdcbb93193b..6f584861d329bfb0382a5691320938cf3db7009b 100644 (file)
@@ -79,12 +79,16 @@ static void wakeup_softirqd(void)
 
 /*
  * If ksoftirqd is scheduled, we do not want to process pending softirqs
- * right now. Let ksoftirqd handle this at its own rate, to get fairness.
+ * right now. Let ksoftirqd handle this at its own rate, to get fairness,
+ * unless we're doing some of the synchronous softirqs.
  */
-static bool ksoftirqd_running(void)
+#define SOFTIRQ_NOW_MASK ((1 << HI_SOFTIRQ) | (1 << TASKLET_SOFTIRQ))
+static bool ksoftirqd_running(unsigned long pending)
 {
        struct task_struct *tsk = __this_cpu_read(ksoftirqd);
 
+       if (pending & SOFTIRQ_NOW_MASK)
+               return false;
        return tsk && (tsk->state == TASK_RUNNING);
 }
 
@@ -139,9 +143,13 @@ static void __local_bh_enable(unsigned int cnt)
 {
        lockdep_assert_irqs_disabled();
 
+       if (preempt_count() == cnt)
+               trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip());
+
        if (softirq_count() == (cnt & SOFTIRQ_MASK))
                trace_softirqs_on(_RET_IP_);
-       preempt_count_sub(cnt);
+
+       __preempt_count_sub(cnt);
 }
 
 /*
@@ -324,7 +332,7 @@ asmlinkage __visible void do_softirq(void)
 
        pending = local_softirq_pending();
 
-       if (pending && !ksoftirqd_running())
+       if (pending && !ksoftirqd_running(pending))
                do_softirq_own_stack();
 
        local_irq_restore(flags);
@@ -351,7 +359,7 @@ void irq_enter(void)
 
 static inline void invoke_softirq(void)
 {
-       if (ksoftirqd_running())
+       if (ksoftirqd_running(local_softirq_pending()))
                return;
 
        if (!force_irqthreads) {
@@ -382,7 +390,7 @@ static inline void tick_irq_exit(void)
 
        /* Make sure that timer wheel updates are propagated */
        if ((idle_cpu(cpu) && !need_resched()) || tick_nohz_full_cpu(cpu)) {
-               if (!in_interrupt())
+               if (!in_irq())
                        tick_nohz_irq_exit();
        }
 #endif
index f89014a2c2381e4b38c0839970d28864f751a8d1..067cb83f37eae5644fa84e29a614abd6b4d8a3de 100644 (file)
@@ -81,6 +81,7 @@ static bool cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work)
        unsigned long flags;
        bool enabled;
 
+       preempt_disable();
        raw_spin_lock_irqsave(&stopper->lock, flags);
        enabled = stopper->enabled;
        if (enabled)
@@ -90,6 +91,7 @@ static bool cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work)
        raw_spin_unlock_irqrestore(&stopper->lock, flags);
 
        wake_up_q(&wakeq);
+       preempt_enable();
 
        return enabled;
 }
@@ -236,13 +238,24 @@ static int cpu_stop_queue_two_works(int cpu1, struct cpu_stop_work *work1,
        struct cpu_stopper *stopper2 = per_cpu_ptr(&cpu_stopper, cpu2);
        DEFINE_WAKE_Q(wakeq);
        int err;
+
 retry:
+       /*
+        * The waking up of stopper threads has to happen in the same
+        * scheduling context as the queueing.  Otherwise, there is a
+        * possibility of one of the above stoppers being woken up by another
+        * CPU, and preempting us. This will cause us to not wake up the other
+        * stopper forever.
+        */
+       preempt_disable();
        raw_spin_lock_irq(&stopper1->lock);
        raw_spin_lock_nested(&stopper2->lock, SINGLE_DEPTH_NESTING);
 
-       err = -ENOENT;
-       if (!stopper1->enabled || !stopper2->enabled)
+       if (!stopper1->enabled || !stopper2->enabled) {
+               err = -ENOENT;
                goto unlock;
+       }
+
        /*
         * Ensure that if we race with __stop_cpus() the stoppers won't get
         * queued up in reverse order leading to system deadlock.
@@ -253,24 +266,30 @@ retry:
         * It can be falsely true but it is safe to spin until it is cleared,
         * queue_stop_cpus_work() does everything under preempt_disable().
         */
-       err = -EDEADLK;
-       if (unlikely(stop_cpus_in_progress))
-                       goto unlock;
+       if (unlikely(stop_cpus_in_progress)) {
+               err = -EDEADLK;
+               goto unlock;
+       }
 
        err = 0;
        __cpu_stop_queue_work(stopper1, work1, &wakeq);
        __cpu_stop_queue_work(stopper2, work2, &wakeq);
+
 unlock:
        raw_spin_unlock(&stopper2->lock);
        raw_spin_unlock_irq(&stopper1->lock);
 
        if (unlikely(err == -EDEADLK)) {
+               preempt_enable();
+
                while (stop_cpus_in_progress)
                        cpu_relax();
+
                goto retry;
        }
 
        wake_up_q(&wakeq);
+       preempt_enable();
 
        return err;
 }
index 38509dc1f77b0916cc7633f6814d86549ea62a40..e27b51d3facdb5c617b4eecbaa2ff030dbf22acd 100644 (file)
@@ -2512,11 +2512,11 @@ static int do_sysinfo(struct sysinfo *info)
 {
        unsigned long mem_total, sav_total;
        unsigned int mem_unit, bitcount;
-       struct timespec tp;
+       struct timespec64 tp;
 
        memset(info, 0, sizeof(struct sysinfo));
 
-       get_monotonic_boottime(&tp);
+       ktime_get_boottime_ts64(&tp);
        info->uptime = tp.tv_sec + (tp.tv_nsec ? 1 : 0);
 
        get_avenrun(info->loads, 0, SI_LOAD_SHIFT - FSHIFT);
index 2d9837c0aff4aef97ad8bb1542bcbd7cf1493b35..f22f76b7a138ceedcc5392a24f493ec04366bc1b 100644 (file)
@@ -368,14 +368,6 @@ static struct ctl_table kern_table[] = {
                .mode           = 0644,
                .proc_handler   = proc_dointvec,
        },
-       {
-               .procname       = "sched_time_avg_ms",
-               .data           = &sysctl_sched_time_avg,
-               .maxlen         = sizeof(unsigned int),
-               .mode           = 0644,
-               .proc_handler   = proc_dointvec_minmax,
-               .extra1         = &one,
-       },
 #ifdef CONFIG_SCHEDSTATS
        {
                .procname       = "sched_schedstats",
index dd53e354f630d9251de443d6a9cd6632c3646211..7bca480151b02417d114747494e86e4ee2b1eac8 100644 (file)
@@ -162,90 +162,6 @@ static int test_kprobes(void)
 
 }
 
-#if 0
-static u32 jph_val;
-
-static u32 j_kprobe_target(u32 value)
-{
-       if (preemptible()) {
-               handler_errors++;
-               pr_err("jprobe-handler is preemptible\n");
-       }
-       if (value != rand1) {
-               handler_errors++;
-               pr_err("incorrect value in jprobe handler\n");
-       }
-
-       jph_val = rand1;
-       jprobe_return();
-       return 0;
-}
-
-static struct jprobe jp = {
-       .entry          = j_kprobe_target,
-       .kp.symbol_name = "kprobe_target"
-};
-
-static int test_jprobe(void)
-{
-       int ret;
-
-       ret = register_jprobe(&jp);
-       if (ret < 0) {
-               pr_err("register_jprobe returned %d\n", ret);
-               return ret;
-       }
-
-       ret = target(rand1);
-       unregister_jprobe(&jp);
-       if (jph_val == 0) {
-               pr_err("jprobe handler not called\n");
-               handler_errors++;
-       }
-
-       return 0;
-}
-
-static struct jprobe jp2 = {
-       .entry          = j_kprobe_target,
-       .kp.symbol_name = "kprobe_target2"
-};
-
-static int test_jprobes(void)
-{
-       int ret;
-       struct jprobe *jps[2] = {&jp, &jp2};
-
-       /* addr and flags should be cleard for reusing kprobe. */
-       jp.kp.addr = NULL;
-       jp.kp.flags = 0;
-       ret = register_jprobes(jps, 2);
-       if (ret < 0) {
-               pr_err("register_jprobes returned %d\n", ret);
-               return ret;
-       }
-
-       jph_val = 0;
-       ret = target(rand1);
-       if (jph_val == 0) {
-               pr_err("jprobe handler not called\n");
-               handler_errors++;
-       }
-
-       jph_val = 0;
-       ret = target2(rand1);
-       if (jph_val == 0) {
-               pr_err("jprobe handler2 not called\n");
-               handler_errors++;
-       }
-       unregister_jprobes(jps, 2);
-
-       return 0;
-}
-#else
-#define test_jprobe() (0)
-#define test_jprobes() (0)
-#endif
 #ifdef CONFIG_KRETPROBES
 static u32 krph_val;
 
@@ -383,16 +299,6 @@ int init_test_probes(void)
        if (ret < 0)
                errors++;
 
-       num_tests++;
-       ret = test_jprobe();
-       if (ret < 0)
-               errors++;
-
-       num_tests++;
-       ret = test_jprobes();
-       if (ret < 0)
-               errors++;
-
 #ifdef CONFIG_KRETPROBES
        num_tests++;
        ret = test_kretprobe();
index 639321bf2e397934b1b9ec53c241c81bf9c074d1..fa5de5e8de61d88d266cd2651dedd2090ad89284 100644 (file)
@@ -581,11 +581,11 @@ static void alarm_timer_rearm(struct k_itimer *timr)
  * @timr:      Pointer to the posixtimer data struct
  * @now:       Current time to forward the timer against
  */
-static int alarm_timer_forward(struct k_itimer *timr, ktime_t now)
+static s64 alarm_timer_forward(struct k_itimer *timr, ktime_t now)
 {
        struct alarm *alarm = &timr->it.alarm.alarmtimer;
 
-       return (int) alarm_forward(alarm, timr->it_interval, now);
+       return alarm_forward(alarm, timr->it_interval, now);
 }
 
 /**
@@ -808,7 +808,8 @@ static int alarm_timer_nsleep(const clockid_t which_clock, int flags,
        /* Convert (if necessary) to absolute time */
        if (flags != TIMER_ABSTIME) {
                ktime_t now = alarm_bases[type].gettime();
-               exp = ktime_add(now, exp);
+
+               exp = ktime_add_safe(now, exp);
        }
 
        ret = alarmtimer_do_nsleep(&alarm, exp, type);
index 16c027e9cc730a38d1bb97da89dd9f30c4557145..8c0e4092f661824edf50a85342284d9ddcb03e6b 100644 (file)
@@ -463,6 +463,12 @@ void clockevents_register_device(struct clock_event_device *dev)
                dev->cpumask = cpumask_of(smp_processor_id());
        }
 
+       if (dev->cpumask == cpu_all_mask) {
+               WARN(1, "%s cpumask == cpu_all_mask, using cpu_possible_mask instead\n",
+                    dev->name);
+               dev->cpumask = cpu_possible_mask;
+       }
+
        raw_spin_lock_irqsave(&clockevents_lock, flags);
 
        list_add(&dev->list, &clockevent_devices);
index f89a78e2792b645ea8b8abc540988569c560799b..f74fb00d806444739f9d8ee1611a98c694325f95 100644 (file)
@@ -94,6 +94,8 @@ EXPORT_SYMBOL_GPL(clocks_calc_mult_shift);
 /*[Clocksource internal variables]---------
  * curr_clocksource:
  *     currently selected clocksource.
+ * suspend_clocksource:
+ *     used to calculate the suspend time.
  * clocksource_list:
  *     linked list with the registered clocksources
  * clocksource_mutex:
@@ -102,10 +104,12 @@ EXPORT_SYMBOL_GPL(clocks_calc_mult_shift);
  *     Name of the user-specified clocksource.
  */
 static struct clocksource *curr_clocksource;
+static struct clocksource *suspend_clocksource;
 static LIST_HEAD(clocksource_list);
 static DEFINE_MUTEX(clocksource_mutex);
 static char override_name[CS_NAME_LEN];
 static int finished_booting;
+static u64 suspend_start;
 
 #ifdef CONFIG_CLOCKSOURCE_WATCHDOG
 static void clocksource_watchdog_work(struct work_struct *work);
@@ -447,6 +451,140 @@ static inline void clocksource_watchdog_unlock(unsigned long *flags) { }
 
 #endif /* CONFIG_CLOCKSOURCE_WATCHDOG */
 
+static bool clocksource_is_suspend(struct clocksource *cs)
+{
+       return cs == suspend_clocksource;
+}
+
+static void __clocksource_suspend_select(struct clocksource *cs)
+{
+       /*
+        * Skip the clocksource which will be stopped in suspend state.
+        */
+       if (!(cs->flags & CLOCK_SOURCE_SUSPEND_NONSTOP))
+               return;
+
+       /*
+        * The nonstop clocksource can be selected as the suspend clocksource to
+        * calculate the suspend time, so it should not supply suspend/resume
+        * interfaces to suspend the nonstop clocksource when system suspends.
+        */
+       if (cs->suspend || cs->resume) {
+               pr_warn("Nonstop clocksource %s should not supply suspend/resume interfaces\n",
+                       cs->name);
+       }
+
+       /* Pick the best rating. */
+       if (!suspend_clocksource || cs->rating > suspend_clocksource->rating)
+               suspend_clocksource = cs;
+}
+
+/**
+ * clocksource_suspend_select - Select the best clocksource for suspend timing
+ * @fallback:  if select a fallback clocksource
+ */
+static void clocksource_suspend_select(bool fallback)
+{
+       struct clocksource *cs, *old_suspend;
+
+       old_suspend = suspend_clocksource;
+       if (fallback)
+               suspend_clocksource = NULL;
+
+       list_for_each_entry(cs, &clocksource_list, list) {
+               /* Skip current if we were requested for a fallback. */
+               if (fallback && cs == old_suspend)
+                       continue;
+
+               __clocksource_suspend_select(cs);
+       }
+}
+
+/**
+ * clocksource_start_suspend_timing - Start measuring the suspend timing
+ * @cs:                        current clocksource from timekeeping
+ * @start_cycles:      current cycles from timekeeping
+ *
+ * This function will save the start cycle values of suspend timer to calculate
+ * the suspend time when resuming system.
+ *
+ * This function is called late in the suspend process from timekeeping_suspend(),
+ * that means processes are freezed, non-boot cpus and interrupts are disabled
+ * now. It is therefore possible to start the suspend timer without taking the
+ * clocksource mutex.
+ */
+void clocksource_start_suspend_timing(struct clocksource *cs, u64 start_cycles)
+{
+       if (!suspend_clocksource)
+               return;
+
+       /*
+        * If current clocksource is the suspend timer, we should use the
+        * tkr_mono.cycle_last value as suspend_start to avoid same reading
+        * from suspend timer.
+        */
+       if (clocksource_is_suspend(cs)) {
+               suspend_start = start_cycles;
+               return;
+       }
+
+       if (suspend_clocksource->enable &&
+           suspend_clocksource->enable(suspend_clocksource)) {
+               pr_warn_once("Failed to enable the non-suspend-able clocksource.\n");
+               return;
+       }
+
+       suspend_start = suspend_clocksource->read(suspend_clocksource);
+}
+
+/**
+ * clocksource_stop_suspend_timing - Stop measuring the suspend timing
+ * @cs:                current clocksource from timekeeping
+ * @cycle_now: current cycles from timekeeping
+ *
+ * This function will calculate the suspend time from suspend timer.
+ *
+ * Returns nanoseconds since suspend started, 0 if no usable suspend clocksource.
+ *
+ * This function is called early in the resume process from timekeeping_resume(),
+ * that means there is only one cpu, no processes are running and the interrupts
+ * are disabled. It is therefore possible to stop the suspend timer without
+ * taking the clocksource mutex.
+ */
+u64 clocksource_stop_suspend_timing(struct clocksource *cs, u64 cycle_now)
+{
+       u64 now, delta, nsec = 0;
+
+       if (!suspend_clocksource)
+               return 0;
+
+       /*
+        * If current clocksource is the suspend timer, we should use the
+        * tkr_mono.cycle_last value from timekeeping as current cycle to
+        * avoid same reading from suspend timer.
+        */
+       if (clocksource_is_suspend(cs))
+               now = cycle_now;
+       else
+               now = suspend_clocksource->read(suspend_clocksource);
+
+       if (now > suspend_start) {
+               delta = clocksource_delta(now, suspend_start,
+                                         suspend_clocksource->mask);
+               nsec = mul_u64_u32_shr(delta, suspend_clocksource->mult,
+                                      suspend_clocksource->shift);
+       }
+
+       /*
+        * Disable the suspend timer to save power if current clocksource is
+        * not the suspend timer.
+        */
+       if (!clocksource_is_suspend(cs) && suspend_clocksource->disable)
+               suspend_clocksource->disable(suspend_clocksource);
+
+       return nsec;
+}
+
 /**
  * clocksource_suspend - suspend the clocksource(s)
  */
@@ -792,6 +930,7 @@ int __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq)
 
        clocksource_select();
        clocksource_select_watchdog(false);
+       __clocksource_suspend_select(cs);
        mutex_unlock(&clocksource_mutex);
        return 0;
 }
@@ -820,6 +959,7 @@ void clocksource_change_rating(struct clocksource *cs, int rating)
 
        clocksource_select();
        clocksource_select_watchdog(false);
+       clocksource_suspend_select(false);
        mutex_unlock(&clocksource_mutex);
 }
 EXPORT_SYMBOL(clocksource_change_rating);
@@ -845,6 +985,15 @@ static int clocksource_unbind(struct clocksource *cs)
                        return -EBUSY;
        }
 
+       if (clocksource_is_suspend(cs)) {
+               /*
+                * Select and try to install a replacement suspend clocksource.
+                * If no replacement suspend clocksource, we will just let the
+                * clocksource go and have no suspend clocksource.
+                */
+               clocksource_suspend_select(true);
+       }
+
        clocksource_watchdog_lock(&flags);
        clocksource_dequeue_watchdog(cs);
        list_del_init(&cs->list);
index 055a4a728c00cce3945afc04b9bee692b243896b..e1a549c9e39918303d359c036d532f5ec22ed682 100644 (file)
@@ -718,8 +718,8 @@ static void hrtimer_switch_to_hres(void)
        struct hrtimer_cpu_base *base = this_cpu_ptr(&hrtimer_bases);
 
        if (tick_init_highres()) {
-               printk(KERN_WARNING "Could not switch to high resolution "
-                                   "mode on CPU %d\n", base->cpu);
+               pr_warn("Could not switch to high resolution mode on CPU %u\n",
+                       base->cpu);
                return;
        }
        base->hres_active = 1;
@@ -1573,8 +1573,7 @@ retry:
        else
                expires_next = ktime_add(now, delta);
        tick_program_event(expires_next, 1);
-       printk_once(KERN_WARNING "hrtimer: interrupt took %llu ns\n",
-                   ktime_to_ns(delta));
+       pr_warn_once("hrtimer: interrupt took %llu ns\n", ktime_to_ns(delta));
 }
 
 /* called with interrupts disabled */
@@ -1659,7 +1658,7 @@ EXPORT_SYMBOL_GPL(hrtimer_init_sleeper);
 int nanosleep_copyout(struct restart_block *restart, struct timespec64 *ts)
 {
        switch(restart->nanosleep.type) {
-#ifdef CONFIG_COMPAT
+#ifdef CONFIG_COMPAT_32BIT_TIME
        case TT_COMPAT:
                if (compat_put_timespec64(ts, restart->nanosleep.compat_rmtp))
                        return -EFAULT;
index a09ded765f6c50f6c0d9ba46c30644b539d1472a..c5e0cba3b39cc12c5b6ef2f12c04395123e999c6 100644 (file)
@@ -502,7 +502,7 @@ static void sched_sync_hw_clock(struct timespec64 now,
 {
        struct timespec64 next;
 
-       getnstimeofday64(&next);
+       ktime_get_real_ts64(&next);
        if (!fail)
                next.tv_sec = 659;
        else {
@@ -537,7 +537,7 @@ static void sync_rtc_clock(void)
        if (!IS_ENABLED(CONFIG_RTC_SYSTOHC))
                return;
 
-       getnstimeofday64(&now);
+       ktime_get_real_ts64(&now);
 
        adjust = now;
        if (persistent_clock_is_local)
@@ -591,7 +591,7 @@ static bool sync_cmos_clock(void)
         * Architectures are strongly encouraged to use rtclib and not
         * implement this legacy API.
         */
-       getnstimeofday64(&now);
+       ktime_get_real_ts64(&now);
        if (rtc_tv_nsec_ok(-1 * target_nsec, &adjust, &now)) {
                if (persistent_clock_is_local)
                        adjust.tv_sec -= (sys_tz.tz_minuteswest * 60);
@@ -642,7 +642,7 @@ void ntp_notify_cmos_timer(void)
 /*
  * Propagate a new txc->status value into the NTP state:
  */
-static inline void process_adj_status(struct timex *txc, struct timespec64 *ts)
+static inline void process_adj_status(const struct timex *txc)
 {
        if ((time_status & STA_PLL) && !(txc->status & STA_PLL)) {
                time_state = TIME_OK;
@@ -665,12 +665,10 @@ static inline void process_adj_status(struct timex *txc, struct timespec64 *ts)
 }
 
 
-static inline void process_adjtimex_modes(struct timex *txc,
-                                               struct timespec64 *ts,
-                                               s32 *time_tai)
+static inline void process_adjtimex_modes(const struct timex *txc, s32 *time_tai)
 {
        if (txc->modes & ADJ_STATUS)
-               process_adj_status(txc, ts);
+               process_adj_status(txc);
 
        if (txc->modes & ADJ_NANO)
                time_status |= STA_NANO;
@@ -718,7 +716,7 @@ static inline void process_adjtimex_modes(struct timex *txc,
  * adjtimex mainly allows reading (and writing, if superuser) of
  * kernel time-keeping variables. used by xntpd.
  */
-int __do_adjtimex(struct timex *txc, struct timespec64 *ts, s32 *time_tai)
+int __do_adjtimex(struct timex *txc, const struct timespec64 *ts, s32 *time_tai)
 {
        int result;
 
@@ -735,7 +733,7 @@ int __do_adjtimex(struct timex *txc, struct timespec64 *ts, s32 *time_tai)
 
                /* If there are input parameters, then process them: */
                if (txc->modes)
-                       process_adjtimex_modes(txc, ts, time_tai);
+                       process_adjtimex_modes(txc, time_tai);
 
                txc->offset = shift_right(time_offset * NTP_INTERVAL_FREQ,
                                  NTP_SCALE_SHIFT);
@@ -1022,12 +1020,11 @@ void __hardpps(const struct timespec64 *phase_ts, const struct timespec64 *raw_t
 
 static int __init ntp_tick_adj_setup(char *str)
 {
-       int rc = kstrtol(str, 0, (long *)&ntp_tick_adj);
-
+       int rc = kstrtos64(str, 0, &ntp_tick_adj);
        if (rc)
                return rc;
-       ntp_tick_adj <<= NTP_SCALE_SHIFT;
 
+       ntp_tick_adj <<= NTP_SCALE_SHIFT;
        return 1;
 }
 
index 909bd1f1bfb14dba87f6dc82fc85009108cabcdc..c24b0e13f0111de1d1febbf676a8724fcd6cd042 100644 (file)
@@ -8,6 +8,6 @@ extern void ntp_clear(void);
 extern u64 ntp_tick_length(void);
 extern ktime_t ntp_get_next_leap(void);
 extern int second_overflow(time64_t secs);
-extern int __do_adjtimex(struct timex *, struct timespec64 *, s32 *);
-extern void __hardpps(const struct timespec64 *, const struct timespec64 *);
+extern int __do_adjtimex(struct timex *txc, const struct timespec64 *ts, s32 *time_tai);
+extern void __hardpps(const struct timespec64 *phase_ts, const struct timespec64 *raw_ts);
 #endif /* _LINUX_NTP_INTERNAL_H */
index 5a6251ac6f7acd183c35a51d9d55fb680fda64dd..294d7b65af33638bd978d279bf4fa53448efd7a8 100644 (file)
@@ -85,7 +85,7 @@ static void bump_cpu_timer(struct k_itimer *timer, u64 now)
                        continue;
 
                timer->it.cpu.expires += incr;
-               timer->it_overrun += 1 << i;
+               timer->it_overrun += 1LL << i;
                delta -= incr;
        }
 }
@@ -604,7 +604,6 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags,
        /*
         * Disarm any old timer after extracting its expiry time.
         */
-       lockdep_assert_irqs_disabled();
 
        ret = 0;
        old_incr = timer->it.cpu.incr;
@@ -1049,7 +1048,6 @@ static void posix_cpu_timer_rearm(struct k_itimer *timer)
        /*
         * Now re-arm for the new expiry time.
         */
-       lockdep_assert_irqs_disabled();
        arm_timer(timer);
 unlock:
        unlock_task_sighand(p, &flags);
index 26aa9569e24a54a6060584a5931ce36748f36d83..2c6847d5d69bae179291f1a6259f611e469dfb8c 100644 (file)
@@ -81,7 +81,7 @@ int do_clock_gettime(clockid_t which_clock, struct timespec64 *tp)
                ktime_get_ts64(tp);
                break;
        case CLOCK_BOOTTIME:
-               get_monotonic_boottime64(tp);
+               ktime_get_boottime_ts64(tp);
                break;
        default:
                return -EINVAL;
index e08ce3f27447390846394e55a11cd4e9c029bb37..f23cc46ecf3ed10bbd0bd940c87f3e813dd5bf4c 100644 (file)
@@ -85,15 +85,6 @@ static const struct k_clock clock_realtime, clock_monotonic;
 #error "SIGEV_THREAD_ID must not share bit with other SIGEV values!"
 #endif
 
-/*
- * parisc wants ENOTSUP instead of EOPNOTSUPP
- */
-#ifndef ENOTSUP
-# define ENANOSLEEP_NOTSUP EOPNOTSUPP
-#else
-# define ENANOSLEEP_NOTSUP ENOTSUP
-#endif
-
 /*
  * The timer ID is turned into a timer address by idr_find().
  * Verifying a valid ID consists of:
@@ -228,21 +219,21 @@ static int posix_ktime_get_ts(clockid_t which_clock, struct timespec64 *tp)
  */
 static int posix_get_monotonic_raw(clockid_t which_clock, struct timespec64 *tp)
 {
-       getrawmonotonic64(tp);
+       ktime_get_raw_ts64(tp);
        return 0;
 }
 
 
 static int posix_get_realtime_coarse(clockid_t which_clock, struct timespec64 *tp)
 {
-       *tp = current_kernel_time64();
+       ktime_get_coarse_real_ts64(tp);
        return 0;
 }
 
 static int posix_get_monotonic_coarse(clockid_t which_clock,
                                                struct timespec64 *tp)
 {
-       *tp = get_monotonic_coarse64();
+       ktime_get_coarse_ts64(tp);
        return 0;
 }
 
@@ -254,13 +245,13 @@ static int posix_get_coarse_res(const clockid_t which_clock, struct timespec64 *
 
 static int posix_get_boottime(const clockid_t which_clock, struct timespec64 *tp)
 {
-       get_monotonic_boottime64(tp);
+       ktime_get_boottime_ts64(tp);
        return 0;
 }
 
 static int posix_get_tai(clockid_t which_clock, struct timespec64 *tp)
 {
-       timekeeping_clocktai64(tp);
+       ktime_get_clocktai_ts64(tp);
        return 0;
 }
 
@@ -283,6 +274,17 @@ static __init int init_posix_timers(void)
 }
 __initcall(init_posix_timers);
 
+/*
+ * The siginfo si_overrun field and the return value of timer_getoverrun(2)
+ * are of type int. Clamp the overrun value to INT_MAX
+ */
+static inline int timer_overrun_to_int(struct k_itimer *timr, int baseval)
+{
+       s64 sum = timr->it_overrun_last + (s64)baseval;
+
+       return sum > (s64)INT_MAX ? INT_MAX : (int)sum;
+}
+
 static void common_hrtimer_rearm(struct k_itimer *timr)
 {
        struct hrtimer *timer = &timr->it.real.timer;
@@ -290,9 +292,8 @@ static void common_hrtimer_rearm(struct k_itimer *timr)
        if (!timr->it_interval)
                return;
 
-       timr->it_overrun += (unsigned int) hrtimer_forward(timer,
-                                               timer->base->get_time(),
-                                               timr->it_interval);
+       timr->it_overrun += hrtimer_forward(timer, timer->base->get_time(),
+                                           timr->it_interval);
        hrtimer_restart(timer);
 }
 
@@ -321,10 +322,10 @@ void posixtimer_rearm(struct siginfo *info)
 
                timr->it_active = 1;
                timr->it_overrun_last = timr->it_overrun;
-               timr->it_overrun = -1;
+               timr->it_overrun = -1LL;
                ++timr->it_requeue_pending;
 
-               info->si_overrun += timr->it_overrun_last;
+               info->si_overrun = timer_overrun_to_int(timr, info->si_overrun);
        }
 
        unlock_timer(timr, flags);
@@ -418,9 +419,8 @@ static enum hrtimer_restart posix_timer_fn(struct hrtimer *timer)
                                        now = ktime_add(now, kj);
                        }
 #endif
-                       timr->it_overrun += (unsigned int)
-                               hrtimer_forward(timer, now,
-                                               timr->it_interval);
+                       timr->it_overrun += hrtimer_forward(timer, now,
+                                                           timr->it_interval);
                        ret = HRTIMER_RESTART;
                        ++timr->it_requeue_pending;
                        timr->it_active = 1;
@@ -524,7 +524,7 @@ static int do_timer_create(clockid_t which_clock, struct sigevent *event,
        new_timer->it_id = (timer_t) new_timer_id;
        new_timer->it_clock = which_clock;
        new_timer->kclock = kc;
-       new_timer->it_overrun = -1;
+       new_timer->it_overrun = -1LL;
 
        if (event) {
                rcu_read_lock();
@@ -645,11 +645,11 @@ static ktime_t common_hrtimer_remaining(struct k_itimer *timr, ktime_t now)
        return __hrtimer_expires_remaining_adjusted(timer, now);
 }
 
-static int common_hrtimer_forward(struct k_itimer *timr, ktime_t now)
+static s64 common_hrtimer_forward(struct k_itimer *timr, ktime_t now)
 {
        struct hrtimer *timer = &timr->it.real.timer;
 
-       return (int)hrtimer_forward(timer, now, timr->it_interval);
+       return hrtimer_forward(timer, now, timr->it_interval);
 }
 
 /*
@@ -743,7 +743,7 @@ static int do_timer_gettime(timer_t timer_id,  struct itimerspec64 *setting)
 
 /* Get the time remaining on a POSIX.1b interval timer. */
 SYSCALL_DEFINE2(timer_gettime, timer_t, timer_id,
-               struct itimerspec __user *, setting)
+               struct __kernel_itimerspec __user *, setting)
 {
        struct itimerspec64 cur_setting;
 
@@ -755,7 +755,8 @@ SYSCALL_DEFINE2(timer_gettime, timer_t, timer_id,
        return ret;
 }
 
-#ifdef CONFIG_COMPAT
+#ifdef CONFIG_COMPAT_32BIT_TIME
+
 COMPAT_SYSCALL_DEFINE2(timer_gettime, timer_t, timer_id,
                       struct compat_itimerspec __user *, setting)
 {
@@ -768,6 +769,7 @@ COMPAT_SYSCALL_DEFINE2(timer_gettime, timer_t, timer_id,
        }
        return ret;
 }
+
 #endif
 
 /*
@@ -789,7 +791,7 @@ SYSCALL_DEFINE1(timer_getoverrun, timer_t, timer_id)
        if (!timr)
                return -EINVAL;
 
-       overrun = timr->it_overrun_last;
+       overrun = timer_overrun_to_int(timr, 0);
        unlock_timer(timr, flags);
 
        return overrun;
@@ -906,8 +908,8 @@ retry:
 
 /* Set a POSIX.1b interval timer */
 SYSCALL_DEFINE4(timer_settime, timer_t, timer_id, int, flags,
-               const struct itimerspec __user *, new_setting,
-               struct itimerspec __user *, old_setting)
+               const struct __kernel_itimerspec __user *, new_setting,
+               struct __kernel_itimerspec __user *, old_setting)
 {
        struct itimerspec64 new_spec, old_spec;
        struct itimerspec64 *rtn = old_setting ? &old_spec : NULL;
@@ -927,7 +929,7 @@ SYSCALL_DEFINE4(timer_settime, timer_t, timer_id, int, flags,
        return error;
 }
 
-#ifdef CONFIG_COMPAT
+#ifdef CONFIG_COMPAT_32BIT_TIME
 COMPAT_SYSCALL_DEFINE4(timer_settime, timer_t, timer_id, int, flags,
                       struct compat_itimerspec __user *, new,
                       struct compat_itimerspec __user *, old)
@@ -1220,7 +1222,7 @@ SYSCALL_DEFINE4(clock_nanosleep, const clockid_t, which_clock, int, flags,
        if (!kc)
                return -EINVAL;
        if (!kc->nsleep)
-               return -ENANOSLEEP_NOTSUP;
+               return -EOPNOTSUPP;
 
        if (get_timespec64(&t, rqtp))
                return -EFAULT;
@@ -1247,7 +1249,7 @@ COMPAT_SYSCALL_DEFINE4(clock_nanosleep, clockid_t, which_clock, int, flags,
        if (!kc)
                return -EINVAL;
        if (!kc->nsleep)
-               return -ENANOSLEEP_NOTSUP;
+               return -EOPNOTSUPP;
 
        if (compat_get_timespec64(&t, rqtp))
                return -EFAULT;
index 151e28f5bf304ce134897038639c53998adae4d8..ddb21145211a0280f9b20667a33b9503ce62d776 100644 (file)
@@ -19,7 +19,7 @@ struct k_clock {
        void    (*timer_get)(struct k_itimer *timr,
                             struct itimerspec64 *cur_setting);
        void    (*timer_rearm)(struct k_itimer *timr);
-       int     (*timer_forward)(struct k_itimer *timr, ktime_t now);
+       s64     (*timer_forward)(struct k_itimer *timr, ktime_t now);
        ktime_t (*timer_remaining)(struct k_itimer *timr, ktime_t now);
        int     (*timer_try_to_cancel)(struct k_itimer *timr);
        void    (*timer_arm)(struct k_itimer *timr, ktime_t expires,
index 2d8f05aad442a5fcafc37b98a177f84bde2047a5..cbc72c2c1fcaa091070ebb2ae229b090d51e5337 100644 (file)
@@ -237,7 +237,7 @@ sched_clock_register(u64 (*read)(void), int bits, unsigned long rate)
        pr_debug("Registered %pF as sched_clock source\n", read);
 }
 
-void __init sched_clock_postinit(void)
+void __init generic_sched_clock_init(void)
 {
        /*
         * If no sched_clock() function has been provided at that point,
index 58045eb976c38fc7c3540f8c0d12db9cf256aa52..a59641fb88b6963ac837a3b4cf4657c24e05098b 100644 (file)
@@ -90,7 +90,7 @@ static struct clock_event_device ce_broadcast_hrtimer = {
        .max_delta_ticks        = ULONG_MAX,
        .mult                   = 1,
        .shift                  = 0,
-       .cpumask                = cpu_all_mask,
+       .cpumask                = cpu_possible_mask,
 };
 
 static enum hrtimer_restart bc_handler(struct hrtimer *t)
index b7005dd21ec16ce5fa92e33b3b46f04bedbbf7f0..14de3727b18e6ca5c21780aa37af22fbc4d29739 100644 (file)
@@ -277,8 +277,7 @@ static bool tick_check_preferred(struct clock_event_device *curdev,
         */
        return !curdev ||
                newdev->rating > curdev->rating ||
-              (!cpumask_equal(curdev->cpumask, newdev->cpumask) &&
-               !tick_check_percpu(curdev, newdev, smp_processor_id()));
+              !cpumask_equal(curdev->cpumask, newdev->cpumask);
 }
 
 /*
index da9455a6b42ba1f03cbfaf75f427e92f1241d2bb..5b33e2f5c0ed3d2b158e456c2b6748e39f51b629 100644 (file)
@@ -642,7 +642,7 @@ static void tick_nohz_restart(struct tick_sched *ts, ktime_t now)
 
 static inline bool local_timer_softirq_pending(void)
 {
-       return local_softirq_pending() & TIMER_SOFTIRQ;
+       return local_softirq_pending() & BIT(TIMER_SOFTIRQ);
 }
 
 static ktime_t tick_nohz_next_event(struct tick_sched *ts, int cpu)
index 6fa99213fc720e4b77c467ae69a87007c22b37d2..ccdb351277eecf739605be2e98b44174de595883 100644 (file)
@@ -28,6 +28,7 @@
  */
 
 #include <linux/export.h>
+#include <linux/kernel.h>
 #include <linux/timex.h>
 #include <linux/capability.h>
 #include <linux/timekeeper_internal.h>
@@ -63,7 +64,7 @@ EXPORT_SYMBOL(sys_tz);
  */
 SYSCALL_DEFINE1(time, time_t __user *, tloc)
 {
-       time_t i = get_seconds();
+       time_t i = (time_t)ktime_get_real_seconds();
 
        if (tloc) {
                if (put_user(i,tloc))
@@ -106,11 +107,9 @@ SYSCALL_DEFINE1(stime, time_t __user *, tptr)
 /* compat_time_t is a 32 bit "long" and needs to get converted. */
 COMPAT_SYSCALL_DEFINE1(time, compat_time_t __user *, tloc)
 {
-       struct timeval tv;
        compat_time_t i;
 
-       do_gettimeofday(&tv);
-       i = tv.tv_sec;
+       i = (compat_time_t)ktime_get_real_seconds();
 
        if (tloc) {
                if (put_user(i,tloc))
@@ -314,9 +313,10 @@ unsigned int jiffies_to_msecs(const unsigned long j)
        return (j + (HZ / MSEC_PER_SEC) - 1)/(HZ / MSEC_PER_SEC);
 #else
 # if BITS_PER_LONG == 32
-       return (HZ_TO_MSEC_MUL32 * j) >> HZ_TO_MSEC_SHR32;
+       return (HZ_TO_MSEC_MUL32 * j + (1ULL << HZ_TO_MSEC_SHR32) - 1) >>
+              HZ_TO_MSEC_SHR32;
 # else
-       return (j * HZ_TO_MSEC_NUM) / HZ_TO_MSEC_DEN;
+       return DIV_ROUND_UP(j * HZ_TO_MSEC_NUM, HZ_TO_MSEC_DEN);
 # endif
 #endif
 }
@@ -929,7 +929,7 @@ int compat_put_timespec64(const struct timespec64 *ts, void __user *uts)
 EXPORT_SYMBOL_GPL(compat_put_timespec64);
 
 int get_itimerspec64(struct itimerspec64 *it,
-                       const struct itimerspec __user *uit)
+                       const struct __kernel_itimerspec __user *uit)
 {
        int ret;
 
@@ -944,7 +944,7 @@ int get_itimerspec64(struct itimerspec64 *it,
 EXPORT_SYMBOL_GPL(get_itimerspec64);
 
 int put_itimerspec64(const struct itimerspec64 *it,
-                       struct itimerspec __user *uit)
+                       struct __kernel_itimerspec __user *uit)
 {
        int ret;
 
@@ -957,3 +957,24 @@ int put_itimerspec64(const struct itimerspec64 *it,
        return ret;
 }
 EXPORT_SYMBOL_GPL(put_itimerspec64);
+
+int get_compat_itimerspec64(struct itimerspec64 *its,
+                       const struct compat_itimerspec __user *uits)
+{
+
+       if (__compat_get_timespec64(&its->it_interval, &uits->it_interval) ||
+           __compat_get_timespec64(&its->it_value, &uits->it_value))
+               return -EFAULT;
+       return 0;
+}
+EXPORT_SYMBOL_GPL(get_compat_itimerspec64);
+
+int put_compat_itimerspec64(const struct itimerspec64 *its,
+                       struct compat_itimerspec __user *uits)
+{
+       if (__compat_put_timespec64(&its->it_interval, &uits->it_interval) ||
+           __compat_put_timespec64(&its->it_value, &uits->it_value))
+               return -EFAULT;
+       return 0;
+}
+EXPORT_SYMBOL_GPL(put_compat_itimerspec64);
index 4786df904c22d0a8e9473e600efee97244eb5782..f3b22f456fac7b61bf3f0e9e9fb29dcbcf9c630f 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/nmi.h>
 #include <linux/sched.h>
 #include <linux/sched/loadavg.h>
+#include <linux/sched/clock.h>
 #include <linux/syscore_ops.h>
 #include <linux/clocksource.h>
 #include <linux/jiffies.h>
 #define TK_MIRROR              (1 << 1)
 #define TK_CLOCK_WAS_SET       (1 << 2)
 
+enum timekeeping_adv_mode {
+       /* Update timekeeper when a tick has passed */
+       TK_ADV_TICK,
+
+       /* Update timekeeper on a direct frequency change */
+       TK_ADV_FREQ
+};
+
 /*
  * The most important data for readout fits into a single 64 byte
  * cache line.
@@ -97,7 +106,7 @@ static inline void tk_normalize_xtime(struct timekeeper *tk)
        }
 }
 
-static inline struct timespec64 tk_xtime(struct timekeeper *tk)
+static inline struct timespec64 tk_xtime(const struct timekeeper *tk)
 {
        struct timespec64 ts;
 
@@ -154,7 +163,7 @@ static inline void tk_update_sleep_time(struct timekeeper *tk, ktime_t delta)
  * a read of the fast-timekeeper tkrs (which is protected by its own locking
  * and update logic).
  */
-static inline u64 tk_clock_read(struct tk_read_base *tkr)
+static inline u64 tk_clock_read(const struct tk_read_base *tkr)
 {
        struct clocksource *clock = READ_ONCE(tkr->clock);
 
@@ -203,7 +212,7 @@ static void timekeeping_check_update(struct timekeeper *tk, u64 offset)
        }
 }
 
-static inline u64 timekeeping_get_delta(struct tk_read_base *tkr)
+static inline u64 timekeeping_get_delta(const struct tk_read_base *tkr)
 {
        struct timekeeper *tk = &tk_core.timekeeper;
        u64 now, last, mask, max, delta;
@@ -247,7 +256,7 @@ static inline u64 timekeeping_get_delta(struct tk_read_base *tkr)
 static inline void timekeeping_check_update(struct timekeeper *tk, u64 offset)
 {
 }
-static inline u64 timekeeping_get_delta(struct tk_read_base *tkr)
+static inline u64 timekeeping_get_delta(const struct tk_read_base *tkr)
 {
        u64 cycle_now, delta;
 
@@ -344,7 +353,7 @@ u32 (*arch_gettimeoffset)(void) = default_arch_gettimeoffset;
 static inline u32 arch_gettimeoffset(void) { return 0; }
 #endif
 
-static inline u64 timekeeping_delta_to_ns(struct tk_read_base *tkr, u64 delta)
+static inline u64 timekeeping_delta_to_ns(const struct tk_read_base *tkr, u64 delta)
 {
        u64 nsec;
 
@@ -355,7 +364,7 @@ static inline u64 timekeeping_delta_to_ns(struct tk_read_base *tkr, u64 delta)
        return nsec + arch_gettimeoffset();
 }
 
-static inline u64 timekeeping_get_ns(struct tk_read_base *tkr)
+static inline u64 timekeeping_get_ns(const struct tk_read_base *tkr)
 {
        u64 delta;
 
@@ -363,7 +372,7 @@ static inline u64 timekeeping_get_ns(struct tk_read_base *tkr)
        return timekeeping_delta_to_ns(tkr, delta);
 }
 
-static inline u64 timekeeping_cycles_to_ns(struct tk_read_base *tkr, u64 cycles)
+static inline u64 timekeeping_cycles_to_ns(const struct tk_read_base *tkr, u64 cycles)
 {
        u64 delta;
 
@@ -386,7 +395,8 @@ static inline u64 timekeeping_cycles_to_ns(struct tk_read_base *tkr, u64 cycles)
  * slightly wrong timestamp (a few nanoseconds). See
  * @ktime_get_mono_fast_ns.
  */
-static void update_fast_timekeeper(struct tk_read_base *tkr, struct tk_fast *tkf)
+static void update_fast_timekeeper(const struct tk_read_base *tkr,
+                                  struct tk_fast *tkf)
 {
        struct tk_read_base *base = tkf->base;
 
@@ -541,10 +551,10 @@ EXPORT_SYMBOL_GPL(ktime_get_real_fast_ns);
  * number of cycles every time until timekeeping is resumed at which time the
  * proper readout base for the fast timekeeper will be restored automatically.
  */
-static void halt_fast_timekeeper(struct timekeeper *tk)
+static void halt_fast_timekeeper(const struct timekeeper *tk)
 {
        static struct tk_read_base tkr_dummy;
-       struct tk_read_base *tkr = &tk->tkr_mono;
+       const struct tk_read_base *tkr = &tk->tkr_mono;
 
        memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy));
        cycles_at_suspend = tk_clock_read(tkr);
@@ -1269,7 +1279,7 @@ EXPORT_SYMBOL(do_settimeofday64);
  *
  * Adds or subtracts an offset value from the current time.
  */
-static int timekeeping_inject_offset(struct timespec64 *ts)
+static int timekeeping_inject_offset(const struct timespec64 *ts)
 {
        struct timekeeper *tk = &tk_core.timekeeper;
        unsigned long flags;
@@ -1496,22 +1506,39 @@ void __weak read_persistent_clock64(struct timespec64 *ts64)
 }
 
 /**
- * read_boot_clock64 -  Return time of the system start.
+ * read_persistent_wall_and_boot_offset - Read persistent clock, and also offset
+ *                                        from the boot.
  *
  * Weak dummy function for arches that do not yet support it.
- * Function to read the exact time the system has been started.
- * Returns a timespec64 with tv_sec=0 and tv_nsec=0 if unsupported.
- *
- *  XXX - Do be sure to remove it once all arches implement it.
+ * wall_time   - current time as returned by persistent clock
+ * boot_offset - offset that is defined as wall_time - boot_time
+ * The default function calculates offset based on the current value of
+ * local_clock(). This way architectures that support sched_clock() but don't
+ * support dedicated boot time clock will provide the best estimate of the
+ * boot time.
  */
-void __weak read_boot_clock64(struct timespec64 *ts)
+void __weak __init
+read_persistent_wall_and_boot_offset(struct timespec64 *wall_time,
+                                    struct timespec64 *boot_offset)
 {
-       ts->tv_sec = 0;
-       ts->tv_nsec = 0;
+       read_persistent_clock64(wall_time);
+       *boot_offset = ns_to_timespec64(local_clock());
 }
 
-/* Flag for if timekeeping_resume() has injected sleeptime */
-static bool sleeptime_injected;
+/*
+ * Flag reflecting whether timekeeping_resume() has injected sleeptime.
+ *
+ * The flag starts of false and is only set when a suspend reaches
+ * timekeeping_suspend(), timekeeping_resume() sets it to false when the
+ * timekeeper clocksource is not stopping across suspend and has been
+ * used to update sleep time. If the timekeeper clocksource has stopped
+ * then the flag stays true and is used by the RTC resume code to decide
+ * whether sleeptime must be injected and if so the flag gets false then.
+ *
+ * If a suspend fails before reaching timekeeping_resume() then the flag
+ * stays false and prevents erroneous sleeptime injection.
+ */
+static bool suspend_timing_needed;
 
 /* Flag for if there is a persistent clock on this platform */
 static bool persistent_clock_exists;
@@ -1521,28 +1548,29 @@ static bool persistent_clock_exists;
  */
 void __init timekeeping_init(void)
 {
+       struct timespec64 wall_time, boot_offset, wall_to_mono;
        struct timekeeper *tk = &tk_core.timekeeper;
        struct clocksource *clock;
        unsigned long flags;
-       struct timespec64 now, boot, tmp;
-
-       read_persistent_clock64(&now);
-       if (!timespec64_valid_strict(&now)) {
-               pr_warn("WARNING: Persistent clock returned invalid value!\n"
-                       "         Check your CMOS/BIOS settings.\n");
-               now.tv_sec = 0;
-               now.tv_nsec = 0;
-       } else if (now.tv_sec || now.tv_nsec)
-               persistent_clock_exists = true;
 
-       read_boot_clock64(&boot);
-       if (!timespec64_valid_strict(&boot)) {
-               pr_warn("WARNING: Boot clock returned invalid value!\n"
-                       "         Check your CMOS/BIOS settings.\n");
-               boot.tv_sec = 0;
-               boot.tv_nsec = 0;
+       read_persistent_wall_and_boot_offset(&wall_time, &boot_offset);
+       if (timespec64_valid_strict(&wall_time) &&
+           timespec64_to_ns(&wall_time) > 0) {
+               persistent_clock_exists = true;
+       } else if (timespec64_to_ns(&wall_time) != 0) {
+               pr_warn("Persistent clock returned invalid value");
+               wall_time = (struct timespec64){0};
        }
 
+       if (timespec64_compare(&wall_time, &boot_offset) < 0)
+               boot_offset = (struct timespec64){0};
+
+       /*
+        * We want set wall_to_mono, so the following is true:
+        * wall time + wall_to_mono = boot time
+        */
+       wall_to_mono = timespec64_sub(boot_offset, wall_time);
+
        raw_spin_lock_irqsave(&timekeeper_lock, flags);
        write_seqcount_begin(&tk_core.seq);
        ntp_init();
@@ -1552,13 +1580,10 @@ void __init timekeeping_init(void)
                clock->enable(clock);
        tk_setup_internals(tk, clock);
 
-       tk_set_xtime(tk, &now);
+       tk_set_xtime(tk, &wall_time);
        tk->raw_sec = 0;
-       if (boot.tv_sec == 0 && boot.tv_nsec == 0)
-               boot = tk_xtime(tk);
 
-       set_normalized_timespec64(&tmp, -boot.tv_sec, -boot.tv_nsec);
-       tk_set_wall_to_mono(tk, tmp);
+       tk_set_wall_to_mono(tk, wall_to_mono);
 
        timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
 
@@ -1577,7 +1602,7 @@ static struct timespec64 timekeeping_suspend_time;
  * adds the sleep offset to the timekeeping variables.
  */
 static void __timekeeping_inject_sleeptime(struct timekeeper *tk,
-                                          struct timespec64 *delta)
+                                          const struct timespec64 *delta)
 {
        if (!timespec64_valid_strict(delta)) {
                printk_deferred(KERN_WARNING
@@ -1610,7 +1635,7 @@ static void __timekeeping_inject_sleeptime(struct timekeeper *tk,
  */
 bool timekeeping_rtc_skipresume(void)
 {
-       return sleeptime_injected;
+       return !suspend_timing_needed;
 }
 
 /**
@@ -1638,7 +1663,7 @@ bool timekeeping_rtc_skipsuspend(void)
  * This function should only be called by rtc_resume(), and allows
  * a suspend offset to be injected into the timekeeping values.
  */
-void timekeeping_inject_sleeptime64(struct timespec64 *delta)
+void timekeeping_inject_sleeptime64(const struct timespec64 *delta)
 {
        struct timekeeper *tk = &tk_core.timekeeper;
        unsigned long flags;
@@ -1646,6 +1671,8 @@ void timekeeping_inject_sleeptime64(struct timespec64 *delta)
        raw_spin_lock_irqsave(&timekeeper_lock, flags);
        write_seqcount_begin(&tk_core.seq);
 
+       suspend_timing_needed = false;
+
        timekeeping_forward_now(tk);
 
        __timekeeping_inject_sleeptime(tk, delta);
@@ -1669,9 +1696,9 @@ void timekeeping_resume(void)
        struct clocksource *clock = tk->tkr_mono.clock;
        unsigned long flags;
        struct timespec64 ts_new, ts_delta;
-       u64 cycle_now;
+       u64 cycle_now, nsec;
+       bool inject_sleeptime = false;
 
-       sleeptime_injected = false;
        read_persistent_clock64(&ts_new);
 
        clockevents_resume();
@@ -1693,22 +1720,19 @@ void timekeeping_resume(void)
         * usable source. The rtc part is handled separately in rtc core code.
         */
        cycle_now = tk_clock_read(&tk->tkr_mono);
-       if ((clock->flags & CLOCK_SOURCE_SUSPEND_NONSTOP) &&
-               cycle_now > tk->tkr_mono.cycle_last) {
-               u64 nsec, cyc_delta;
-
-               cyc_delta = clocksource_delta(cycle_now, tk->tkr_mono.cycle_last,
-                                             tk->tkr_mono.mask);
-               nsec = mul_u64_u32_shr(cyc_delta, clock->mult, clock->shift);
+       nsec = clocksource_stop_suspend_timing(clock, cycle_now);
+       if (nsec > 0) {
                ts_delta = ns_to_timespec64(nsec);
-               sleeptime_injected = true;
+               inject_sleeptime = true;
        } else if (timespec64_compare(&ts_new, &timekeeping_suspend_time) > 0) {
                ts_delta = timespec64_sub(ts_new, timekeeping_suspend_time);
-               sleeptime_injected = true;
+               inject_sleeptime = true;
        }
 
-       if (sleeptime_injected)
+       if (inject_sleeptime) {
+               suspend_timing_needed = false;
                __timekeeping_inject_sleeptime(tk, &ts_delta);
+       }
 
        /* Re-base the last cycle value */
        tk->tkr_mono.cycle_last = cycle_now;
@@ -1732,6 +1756,8 @@ int timekeeping_suspend(void)
        unsigned long flags;
        struct timespec64               delta, delta_delta;
        static struct timespec64        old_delta;
+       struct clocksource *curr_clock;
+       u64 cycle_now;
 
        read_persistent_clock64(&timekeeping_suspend_time);
 
@@ -1743,11 +1769,22 @@ int timekeeping_suspend(void)
        if (timekeeping_suspend_time.tv_sec || timekeeping_suspend_time.tv_nsec)
                persistent_clock_exists = true;
 
+       suspend_timing_needed = true;
+
        raw_spin_lock_irqsave(&timekeeper_lock, flags);
        write_seqcount_begin(&tk_core.seq);
        timekeeping_forward_now(tk);
        timekeeping_suspended = 1;
 
+       /*
+        * Since we've called forward_now, cycle_last stores the value
+        * just read from the current clocksource. Save this to potentially
+        * use in suspend timing.
+        */
+       curr_clock = tk->tkr_mono.clock;
+       cycle_now = tk->tkr_mono.cycle_last;
+       clocksource_start_suspend_timing(curr_clock, cycle_now);
+
        if (persistent_clock_exists) {
                /*
                 * To avoid drift caused by repeated suspend/resumes,
@@ -2021,11 +2058,11 @@ static u64 logarithmic_accumulation(struct timekeeper *tk, u64 offset,
        return offset;
 }
 
-/**
- * update_wall_time - Uses the current clocksource to increment the wall time
- *
+/*
+ * timekeeping_advance - Updates the timekeeper to the current time and
+ * current NTP tick length
  */
-void update_wall_time(void)
+static void timekeeping_advance(enum timekeeping_adv_mode mode)
 {
        struct timekeeper *real_tk = &tk_core.timekeeper;
        struct timekeeper *tk = &shadow_timekeeper;
@@ -2042,14 +2079,17 @@ void update_wall_time(void)
 
 #ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
        offset = real_tk->cycle_interval;
+
+       if (mode != TK_ADV_TICK)
+               goto out;
 #else
        offset = clocksource_delta(tk_clock_read(&tk->tkr_mono),
                                   tk->tkr_mono.cycle_last, tk->tkr_mono.mask);
-#endif
 
        /* Check if there's really nothing to do */
-       if (offset < real_tk->cycle_interval)
+       if (offset < real_tk->cycle_interval && mode == TK_ADV_TICK)
                goto out;
+#endif
 
        /* Do some additional sanity checking */
        timekeeping_check_update(tk, offset);
@@ -2105,6 +2145,15 @@ out:
                clock_was_set_delayed();
 }
 
+/**
+ * update_wall_time - Uses the current clocksource to increment the wall time
+ *
+ */
+void update_wall_time(void)
+{
+       timekeeping_advance(TK_ADV_TICK);
+}
+
 /**
  * getboottime64 - Return the real time of system boot.
  * @ts:                pointer to the timespec64 to be set
@@ -2220,7 +2269,7 @@ ktime_t ktime_get_update_offsets_now(unsigned int *cwsseq, ktime_t *offs_real,
 /**
  * timekeeping_validate_timex - Ensures the timex is ok for use in do_adjtimex
  */
-static int timekeeping_validate_timex(struct timex *txc)
+static int timekeeping_validate_timex(const struct timex *txc)
 {
        if (txc->modes & ADJ_ADJTIME) {
                /* singleshot must not be used with any other mode bits */
@@ -2310,7 +2359,7 @@ int do_adjtimex(struct timex *txc)
                        return ret;
        }
 
-       getnstimeofday64(&ts);
+       ktime_get_real_ts64(&ts);
 
        raw_spin_lock_irqsave(&timekeeper_lock, flags);
        write_seqcount_begin(&tk_core.seq);
@@ -2327,6 +2376,10 @@ int do_adjtimex(struct timex *txc)
        write_seqcount_end(&tk_core.seq);
        raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
 
+       /* Update the multiplier immediately if frequency was set directly */
+       if (txc->modes & (ADJ_FREQUENCY | ADJ_TICK))
+               timekeeping_advance(TK_ADV_FREQ);
+
        if (tai != orig_tai)
                clock_was_set();
 
index 0754cadfa9e61b044b6bfcf81f4146c8c45f63c3..238e4be6022955f5b11cefae73b236cb8ba2d39a 100644 (file)
@@ -70,7 +70,7 @@ static int __init tk_debug_sleep_time_init(void)
 }
 late_initcall(tk_debug_sleep_time_init);
 
-void tk_debug_account_sleep_time(struct timespec64 *t)
+void tk_debug_account_sleep_time(const struct timespec64 *t)
 {
        /* Cap bin index so we don't overflow the array */
        int bin = min(fls(t->tv_sec), NUM_BINS-1);
index cf5c0828ee3157bf620561f39838edd6c144e140..bcbb52db22565971d1a4885bd8e177c21265e6a5 100644 (file)
@@ -8,7 +8,7 @@
 #include <linux/time.h>
 
 #ifdef CONFIG_DEBUG_FS
-extern void tk_debug_account_sleep_time(struct timespec64 *t);
+extern void tk_debug_account_sleep_time(const struct timespec64 *t);
 #else
 #define tk_debug_account_sleep_time(x)
 #endif
index cc2d23e6ff6162ccb8101705d2bf5cb4d4554165..fa49cd753dea74ff442e49a637895ca71993703d 100644 (file)
@@ -581,7 +581,7 @@ trigger_dyntick_cpu(struct timer_base *base, struct timer_list *timer)
         * wheel:
         */
        base->next_expiry = timer->expires;
-               wake_up_nohz_cpu(base->cpu);
+       wake_up_nohz_cpu(base->cpu);
 }
 
 static void
@@ -1657,6 +1657,22 @@ static inline void __run_timers(struct timer_base *base)
 
        raw_spin_lock_irq(&base->lock);
 
+       /*
+        * timer_base::must_forward_clk must be cleared before running
+        * timers so that any timer functions that call mod_timer() will
+        * not try to forward the base. Idle tracking / clock forwarding
+        * logic is only used with BASE_STD timers.
+        *
+        * The must_forward_clk flag is cleared unconditionally also for
+        * the deferrable base. The deferrable base is not affected by idle
+        * tracking and never forwarded, so clearing the flag is a NOOP.
+        *
+        * The fact that the deferrable base is never forwarded can cause
+        * large variations in granularity for deferrable timers, but they
+        * can be deferred for long periods due to idle anyway.
+        */
+       base->must_forward_clk = false;
+
        while (time_after_eq(jiffies, base->clk)) {
 
                levels = collect_expired_timers(base, heads);
@@ -1676,19 +1692,6 @@ static __latent_entropy void run_timer_softirq(struct softirq_action *h)
 {
        struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
 
-       /*
-        * must_forward_clk must be cleared before running timers so that any
-        * timer functions that call mod_timer will not try to forward the
-        * base. idle trcking / clock forwarding logic is only used with
-        * BASE_STD timers.
-        *
-        * The deferrable base does not do idle tracking at all, so we do
-        * not forward it. This can result in very large variations in
-        * granularity for deferrable timers, but they can be deferred for
-        * long periods due to idle.
-        */
-       base->must_forward_clk = false;
-
        __run_timers(base);
        if (IS_ENABLED(CONFIG_NO_HZ_COMMON))
                __run_timers(this_cpu_ptr(&timer_bases[BASE_DEF]));
index 3de1efbecd6a3cad4c47b1e09e649969f5ea3b0d..1ac24a826589353396f02c98150049d134e6e7dd 100644 (file)
@@ -20,6 +20,9 @@
  * Author: Paul E. McKenney <paulmck@us.ibm.com>
  *     Based on kernel/rcu/torture.c.
  */
+
+#define pr_fmt(fmt) fmt
+
 #include <linux/types.h>
 #include <linux/kernel.h>
 #include <linux/init.h>
@@ -53,7 +56,7 @@ MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com>");
 
 static char *torture_type;
-static bool verbose;
+static int verbose;
 
 /* Mediate rmmod and system shutdown.  Concurrent rmmod & shutdown illegal! */
 #define FULLSTOP_DONTSTOP 0    /* Normal operation. */
@@ -98,7 +101,7 @@ bool torture_offline(int cpu, long *n_offl_attempts, long *n_offl_successes,
        if (!cpu_online(cpu) || !cpu_is_hotpluggable(cpu))
                return false;
 
-       if (verbose)
+       if (verbose > 1)
                pr_alert("%s" TORTURE_FLAG
                         "torture_onoff task: offlining %d\n",
                         torture_type, cpu);
@@ -111,7 +114,7 @@ bool torture_offline(int cpu, long *n_offl_attempts, long *n_offl_successes,
                                 "torture_onoff task: offline %d failed: errno %d\n",
                                 torture_type, cpu, ret);
        } else {
-               if (verbose)
+               if (verbose > 1)
                        pr_alert("%s" TORTURE_FLAG
                                 "torture_onoff task: offlined %d\n",
                                 torture_type, cpu);
@@ -147,7 +150,7 @@ bool torture_online(int cpu, long *n_onl_attempts, long *n_onl_successes,
        if (cpu_online(cpu) || !cpu_is_hotpluggable(cpu))
                return false;
 
-       if (verbose)
+       if (verbose > 1)
                pr_alert("%s" TORTURE_FLAG
                         "torture_onoff task: onlining %d\n",
                         torture_type, cpu);
@@ -160,7 +163,7 @@ bool torture_online(int cpu, long *n_onl_attempts, long *n_onl_successes,
                                 "torture_onoff task: online %d failed: errno %d\n",
                                 torture_type, cpu, ret);
        } else {
-               if (verbose)
+               if (verbose > 1)
                        pr_alert("%s" TORTURE_FLAG
                                 "torture_onoff task: onlined %d\n",
                                 torture_type, cpu);
@@ -647,7 +650,7 @@ static void torture_stutter_cleanup(void)
  * The runnable parameter points to a flag that controls whether or not
  * the test is currently runnable.  If there is no such flag, pass in NULL.
  */
-bool torture_init_begin(char *ttype, bool v)
+bool torture_init_begin(char *ttype, int v)
 {
        mutex_lock(&fullstop_mutex);
        if (torture_type != NULL) {
index efed9c1cfb7ea4ea12182e711dacf01623f73452..caf9cbf3581683ace69577fd2f365ba039138cea 100644 (file)
@@ -192,17 +192,6 @@ static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip,
        op->saved_func(ip, parent_ip, op, regs);
 }
 
-/**
- * clear_ftrace_function - reset the ftrace function
- *
- * This NULLs the ftrace function and in essence stops
- * tracing.  There may be lag
- */
-void clear_ftrace_function(void)
-{
-       ftrace_trace_function = ftrace_stub;
-}
-
 static void ftrace_sync(struct work_struct *work)
 {
        /*
@@ -6689,7 +6678,7 @@ void ftrace_kill(void)
 {
        ftrace_disabled = 1;
        ftrace_enabled = 0;
-       clear_ftrace_function();
+       ftrace_trace_function = ftrace_stub;
 }
 
 /**
index 6a46af21765cc60685e54297e8092ee1abf9aca3..0b0b688ea166f29bd73b22ef302f3cb3776ea424 100644 (file)
@@ -3226,6 +3226,22 @@ int ring_buffer_record_is_on(struct ring_buffer *buffer)
        return !atomic_read(&buffer->record_disabled);
 }
 
+/**
+ * ring_buffer_record_is_set_on - return true if the ring buffer is set writable
+ * @buffer: The ring buffer to see if write is set enabled
+ *
+ * Returns true if the ring buffer is set writable by ring_buffer_record_on().
+ * Note that this does NOT mean it is in a writable state.
+ *
+ * It may return true when the ring buffer has been disabled by
+ * ring_buffer_record_disable(), as that is a temporary disabling of
+ * the ring buffer.
+ */
+int ring_buffer_record_is_set_on(struct ring_buffer *buffer)
+{
+       return !(atomic_read(&buffer->record_disabled) & RB_BUFFER_OFF);
+}
+
 /**
  * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
  * @buffer: The ring buffer to stop writes to.
index c9336e98ac59a778d31c16a9ac72b184477e7177..823687997b015f5101375fc2aa0020c5c4415c02 100644 (file)
@@ -1360,8 +1360,6 @@ __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
 void
 update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
 {
-       struct ring_buffer *buf;
-
        if (tr->stop_count)
                return;
 
@@ -1375,9 +1373,13 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
 
        arch_spin_lock(&tr->max_lock);
 
-       buf = tr->trace_buffer.buffer;
-       tr->trace_buffer.buffer = tr->max_buffer.buffer;
-       tr->max_buffer.buffer = buf;
+       /* Inherit the recordable setting from trace_buffer */
+       if (ring_buffer_record_is_set_on(tr->trace_buffer.buffer))
+               ring_buffer_record_on(tr->max_buffer.buffer);
+       else
+               ring_buffer_record_off(tr->max_buffer.buffer);
+
+       swap(tr->trace_buffer.buffer, tr->max_buffer.buffer);
 
        __update_max_tr(tr, tsk, cpu);
        arch_spin_unlock(&tr->max_lock);
@@ -2957,6 +2959,7 @@ out_nobuffer:
 }
 EXPORT_SYMBOL_GPL(trace_vbprintk);
 
+__printf(3, 0)
 static int
 __trace_array_vprintk(struct ring_buffer *buffer,
                      unsigned long ip, const char *fmt, va_list args)
@@ -3011,12 +3014,14 @@ out_nobuffer:
        return len;
 }
 
+__printf(3, 0)
 int trace_array_vprintk(struct trace_array *tr,
                        unsigned long ip, const char *fmt, va_list args)
 {
        return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
 }
 
+__printf(3, 0)
 int trace_array_printk(struct trace_array *tr,
                       unsigned long ip, const char *fmt, ...)
 {
@@ -3032,6 +3037,7 @@ int trace_array_printk(struct trace_array *tr,
        return ret;
 }
 
+__printf(3, 4)
 int trace_array_printk_buf(struct ring_buffer *buffer,
                           unsigned long ip, const char *fmt, ...)
 {
@@ -3047,6 +3053,7 @@ int trace_array_printk_buf(struct ring_buffer *buffer,
        return ret;
 }
 
+__printf(2, 0)
 int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
 {
        return trace_array_vprintk(&global_trace, ip, fmt, args);
@@ -3364,8 +3371,8 @@ static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m,
 
        print_event_info(buf, m);
 
-       seq_printf(m, "#           TASK-PID   CPU#   %s  TIMESTAMP  FUNCTION\n", tgid ? "TGID     " : "");
-       seq_printf(m, "#              | |       |    %s     |         |\n",      tgid ? "  |      " : "");
+       seq_printf(m, "#           TASK-PID   %s  CPU#   TIMESTAMP  FUNCTION\n", tgid ? "TGID     " : "");
+       seq_printf(m, "#              | |     %s    |       |         |\n",      tgid ? "  |      " : "");
 }
 
 static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m,
@@ -3385,9 +3392,9 @@ static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file
                   tgid ? tgid_space : space);
        seq_printf(m, "#                          %s||| /     delay\n",
                   tgid ? tgid_space : space);
-       seq_printf(m, "#           TASK-PID   CPU#%s||||    TIMESTAMP  FUNCTION\n",
+       seq_printf(m, "#           TASK-PID %sCPU#  ||||    TIMESTAMP  FUNCTION\n",
                   tgid ? "   TGID   " : space);
-       seq_printf(m, "#              | |       | %s||||       |         |\n",
+       seq_printf(m, "#              | |   %s  |   ||||       |         |\n",
                   tgid ? "     |    " : space);
 }
 
index 630c5a24b2b255bf7cd35ef1524e25508835679f..f8f86231ad90e48b73b7eb2dc8d1def48071219a 100644 (file)
@@ -583,9 +583,7 @@ static __always_inline void trace_clear_recursion(int bit)
 static inline struct ring_buffer_iter *
 trace_buffer_iter(struct trace_iterator *iter, int cpu)
 {
-       if (iter->buffer_iter && iter->buffer_iter[cpu])
-               return iter->buffer_iter[cpu];
-       return NULL;
+       return iter->buffer_iter ? iter->buffer_iter[cpu] : NULL;
 }
 
 int tracer_init(struct tracer *t, struct trace_array *tr);
index e1c818dbc0d724c603be39463b83de8f021cf79f..893a206bcba4f76dd504e246d9667475fd7ed456 100644 (file)
@@ -78,7 +78,8 @@ static const char * ops[] = { OPS };
        C(TOO_MANY_PREDS,       "Too many terms in predicate expression"), \
        C(INVALID_FILTER,       "Meaningless filter expression"),       \
        C(IP_FIELD_ONLY,        "Only 'ip' field is supported for function trace"), \
-       C(INVALID_VALUE,        "Invalid value (did you forget quotes)?"),
+       C(INVALID_VALUE,        "Invalid value (did you forget quotes)?"), \
+       C(NO_FILTER,            "No filter found"),
 
 #undef C
 #define C(a, b)                FILT_ERR_##a
@@ -550,6 +551,13 @@ predicate_parse(const char *str, int nr_parens, int nr_preds,
                goto out_free;
        }
 
+       if (!N) {
+               /* No program? */
+               ret = -EINVAL;
+               parse_error(pe, FILT_ERR_NO_FILTER, ptr - str);
+               goto out_free;
+       }
+
        prog[N].pred = NULL;                                    /* #13 */
        prog[N].target = 1;             /* TRUE */
        prog[N+1].pred = NULL;
@@ -1693,6 +1701,7 @@ static void create_filter_finish(struct filter_parse_error *pe)
  * @filter_str: filter string
  * @set_str: remember @filter_str and enable detailed error in filter
  * @filterp: out param for created filter (always updated on return)
+ *           Must be a pointer that references a NULL pointer.
  *
  * Creates a filter for @call with @filter_str.  If @set_str is %true,
  * @filter_str is copied and recorded in the new filter.
@@ -1710,6 +1719,10 @@ static int create_filter(struct trace_event_call *call,
        struct filter_parse_error *pe = NULL;
        int err;
 
+       /* filterp must point to NULL */
+       if (WARN_ON(*filterp))
+               *filterp = NULL;
+
        err = create_filter_start(filter_string, set_str, &pe, filterp);
        if (err)
                return err;
index 046c716a6536ba4f504df2523a420b92920d4e52..aae18af94c94e61967063ac7534fe2979fdcec45 100644 (file)
@@ -393,7 +393,7 @@ static void hist_err_event(char *str, char *system, char *event, char *var)
        else if (system)
                snprintf(err, MAX_FILTER_STR_VAL, "%s.%s", system, event);
        else
-               strncpy(err, var, MAX_FILTER_STR_VAL);
+               strscpy(err, var, MAX_FILTER_STR_VAL);
 
        hist_err(str, err);
 }
index d18249683682f750a1b86387355e9b159bb1b3d5..5dea177cef53120129c7d4a4d76eacb67ad66db0 100644 (file)
@@ -679,6 +679,8 @@ event_trigger_callback(struct event_command *cmd_ops,
                goto out_free;
 
  out_reg:
+       /* Up the trigger_data count to make sure reg doesn't free it on failure */
+       event_trigger_init(trigger_ops, trigger_data);
        ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file);
        /*
         * The above returns on success the # of functions enabled,
@@ -686,11 +688,13 @@ event_trigger_callback(struct event_command *cmd_ops,
         * Consider no functions a failure too.
         */
        if (!ret) {
+               cmd_ops->unreg(glob, trigger_ops, trigger_data, file);
                ret = -ENOENT;
-               goto out_free;
-       } else if (ret < 0)
-               goto out_free;
-       ret = 0;
+       } else if (ret > 0)
+               ret = 0;
+
+       /* Down the counter of trigger_data or free it if not used anymore */
+       event_trigger_free(trigger_ops, trigger_data);
  out:
        return ret;
 
@@ -1416,6 +1420,9 @@ int event_enable_trigger_func(struct event_command *cmd_ops,
                goto out;
        }
 
+       /* Up the trigger_data count to make sure nothing frees it on failure */
+       event_trigger_init(trigger_ops, trigger_data);
+
        if (trigger) {
                number = strsep(&trigger, ":");
 
@@ -1466,6 +1473,7 @@ int event_enable_trigger_func(struct event_command *cmd_ops,
                goto out_disable;
        /* Just return zero, not the number of enabled functions */
        ret = 0;
+       event_trigger_free(trigger_ops, trigger_data);
  out:
        return ret;
 
@@ -1476,7 +1484,7 @@ int event_enable_trigger_func(struct event_command *cmd_ops,
  out_free:
        if (cmd_ops->set_filter)
                cmd_ops->set_filter(NULL, trigger_data, NULL);
-       kfree(trigger_data);
+       event_trigger_free(trigger_ops, trigger_data);
        kfree(enable_data);
        goto out;
 }
index 23c0b0cb5fb95c9875fb35cbd0d22f027430343c..169b3c44ee97f3cf00bc574b185f16fa572a12d5 100644 (file)
@@ -831,6 +831,7 @@ print_graph_entry_leaf(struct trace_iterator *iter,
        struct ftrace_graph_ret *graph_ret;
        struct ftrace_graph_ent *call;
        unsigned long long duration;
+       int cpu = iter->cpu;
        int i;
 
        graph_ret = &ret_entry->ret;
@@ -839,7 +840,6 @@ print_graph_entry_leaf(struct trace_iterator *iter,
 
        if (data) {
                struct fgraph_cpu_data *cpu_data;
-               int cpu = iter->cpu;
 
                cpu_data = per_cpu_ptr(data->cpu_data, cpu);
 
@@ -869,6 +869,9 @@ print_graph_entry_leaf(struct trace_iterator *iter,
 
        trace_seq_printf(s, "%ps();\n", (void *)call->func);
 
+       print_graph_irq(iter, graph_ret->func, TRACE_GRAPH_RET,
+                       cpu, iter->ent->pid, flags);
+
        return trace_handle_return(s);
 }
 
index daa81571b22a4646bcc6400ccee0fe638dda2515..e9d99463e5dffe82bcc42f0ea75392d0528ce2fe 100644 (file)
@@ -400,11 +400,10 @@ static struct trace_kprobe *find_trace_kprobe(const char *event,
 static int
 enable_trace_kprobe(struct trace_kprobe *tk, struct trace_event_file *file)
 {
+       struct event_file_link *link = NULL;
        int ret = 0;
 
        if (file) {
-               struct event_file_link *link;
-
                link = kmalloc(sizeof(*link), GFP_KERNEL);
                if (!link) {
                        ret = -ENOMEM;
@@ -424,6 +423,18 @@ enable_trace_kprobe(struct trace_kprobe *tk, struct trace_event_file *file)
                else
                        ret = enable_kprobe(&tk->rp.kp);
        }
+
+       if (ret) {
+               if (file) {
+                       /* Notice the if is true on not WARN() */
+                       if (!WARN_ON_ONCE(!link))
+                               list_del_rcu(&link->list);
+                       kfree(link);
+                       tk->tp.flags &= ~TP_FLAG_TRACE;
+               } else {
+                       tk->tp.flags &= ~TP_FLAG_PROFILE;
+               }
+       }
  out:
        return ret;
 }
@@ -1217,16 +1228,11 @@ kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs)
 
                /*
                 * We need to check and see if we modified the pc of the
-                * pt_regs, and if so clear the kprobe and return 1 so that we
-                * don't do the single stepping.
-                * The ftrace kprobe handler leaves it up to us to re-enable
-                * preemption here before returning if we've modified the ip.
+                * pt_regs, and if so return 1 so that we don't do the
+                * single stepping.
                 */
-               if (orig_ip != instruction_pointer(regs)) {
-                       reset_current_kprobe();
-                       preempt_enable_no_resched();
+               if (orig_ip != instruction_pointer(regs))
                        return 1;
-               }
                if (!ret)
                        return 0;
        }
@@ -1480,8 +1486,10 @@ create_local_trace_kprobe(char *func, void *addr, unsigned long offs,
        }
 
        ret = __register_trace_kprobe(tk);
-       if (ret < 0)
+       if (ret < 0) {
+               kfree(tk->tp.call.print_fmt);
                goto error;
+       }
 
        return &tk->tp.call;
 error:
@@ -1501,6 +1509,8 @@ void destroy_local_trace_kprobe(struct trace_event_call *event_call)
        }
 
        __unregister_trace_kprobe(tk);
+
+       kfree(tk->tp.call.print_fmt);
        free_trace_kprobe(tk);
 }
 #endif /* CONFIG_PERF_EVENTS */
index 90db994ac9004d2fc7163eeb95b8c79e121e991a..1c8e30fda46a8a4abab5c748868e52c06a8b30ea 100644 (file)
@@ -594,8 +594,7 @@ int trace_print_context(struct trace_iterator *iter)
 
        trace_find_cmdline(entry->pid, comm);
 
-       trace_seq_printf(s, "%16s-%-5d [%03d] ",
-                              comm, entry->pid, iter->cpu);
+       trace_seq_printf(s, "%16s-%-5d ", comm, entry->pid);
 
        if (tr->trace_flags & TRACE_ITER_RECORD_TGID) {
                unsigned int tgid = trace_find_tgid(entry->pid);
@@ -606,6 +605,8 @@ int trace_print_context(struct trace_iterator *iter)
                        trace_seq_printf(s, "(%5d) ", tgid);
        }
 
+       trace_seq_printf(s, "[%03d] ", iter->cpu);
+
        if (tr->trace_flags & TRACE_ITER_IRQ_INFO)
                trace_print_lat_fmt(s, entry);
 
index 576d1804581150d1479730166910ff6fb0a89d2e..5470dce212c0dbc9861d5d2108b926a473f89de4 100644 (file)
 #include <linux/init.h>
 #include <linux/module.h>
 #include <linux/sysctl.h>
-#include <linux/smpboot.h>
-#include <linux/sched/rt.h>
-#include <uapi/linux/sched/types.h>
 #include <linux/tick.h>
-#include <linux/workqueue.h>
 #include <linux/sched/clock.h>
 #include <linux/sched/debug.h>
 #include <linux/sched/isolation.h>
+#include <linux/stop_machine.h>
 
 #include <asm/irq_regs.h>
 #include <linux/kvm_para.h>
-#include <linux/kthread.h>
 
 static DEFINE_MUTEX(watchdog_mutex);
 
@@ -169,11 +165,10 @@ static void lockup_detector_update_enable(void)
 unsigned int __read_mostly softlockup_panic =
                        CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE;
 
-static bool softlockup_threads_initialized __read_mostly;
+static bool softlockup_initialized __read_mostly;
 static u64 __read_mostly sample_period;
 
 static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts);
-static DEFINE_PER_CPU(struct task_struct *, softlockup_watchdog);
 static DEFINE_PER_CPU(struct hrtimer, watchdog_hrtimer);
 static DEFINE_PER_CPU(bool, softlockup_touch_sync);
 static DEFINE_PER_CPU(bool, soft_watchdog_warn);
@@ -335,6 +330,27 @@ static void watchdog_interrupt_count(void)
        __this_cpu_inc(hrtimer_interrupts);
 }
 
+static DEFINE_PER_CPU(struct completion, softlockup_completion);
+static DEFINE_PER_CPU(struct cpu_stop_work, softlockup_stop_work);
+
+/*
+ * The watchdog thread function - touches the timestamp.
+ *
+ * It only runs once every sample_period seconds (4 seconds by
+ * default) to reset the softlockup timestamp. If this gets delayed
+ * for more than 2*watchdog_thresh seconds then the debug-printout
+ * triggers in watchdog_timer_fn().
+ */
+static int softlockup_fn(void *data)
+{
+       __this_cpu_write(soft_lockup_hrtimer_cnt,
+                        __this_cpu_read(hrtimer_interrupts));
+       __touch_watchdog();
+       complete(this_cpu_ptr(&softlockup_completion));
+
+       return 0;
+}
+
 /* watchdog kicker functions */
 static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
 {
@@ -350,7 +366,12 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
        watchdog_interrupt_count();
 
        /* kick the softlockup detector */
-       wake_up_process(__this_cpu_read(softlockup_watchdog));
+       if (completion_done(this_cpu_ptr(&softlockup_completion))) {
+               reinit_completion(this_cpu_ptr(&softlockup_completion));
+               stop_one_cpu_nowait(smp_processor_id(),
+                               softlockup_fn, NULL,
+                               this_cpu_ptr(&softlockup_stop_work));
+       }
 
        /* .. and repeat */
        hrtimer_forward_now(hrtimer, ns_to_ktime(sample_period));
@@ -448,16 +469,15 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
        return HRTIMER_RESTART;
 }
 
-static void watchdog_set_prio(unsigned int policy, unsigned int prio)
-{
-       struct sched_param param = { .sched_priority = prio };
-
-       sched_setscheduler(current, policy, &param);
-}
-
 static void watchdog_enable(unsigned int cpu)
 {
        struct hrtimer *hrtimer = this_cpu_ptr(&watchdog_hrtimer);
+       struct completion *done = this_cpu_ptr(&softlockup_completion);
+
+       WARN_ON_ONCE(cpu != smp_processor_id());
+
+       init_completion(done);
+       complete(done);
 
        /*
         * Start the timer first to prevent the NMI watchdog triggering
@@ -473,15 +493,14 @@ static void watchdog_enable(unsigned int cpu)
        /* Enable the perf event */
        if (watchdog_enabled & NMI_WATCHDOG_ENABLED)
                watchdog_nmi_enable(cpu);
-
-       watchdog_set_prio(SCHED_FIFO, MAX_RT_PRIO - 1);
 }
 
 static void watchdog_disable(unsigned int cpu)
 {
        struct hrtimer *hrtimer = this_cpu_ptr(&watchdog_hrtimer);
 
-       watchdog_set_prio(SCHED_NORMAL, 0);
+       WARN_ON_ONCE(cpu != smp_processor_id());
+
        /*
         * Disable the perf event first. That prevents that a large delay
         * between disabling the timer and disabling the perf event causes
@@ -489,79 +508,66 @@ static void watchdog_disable(unsigned int cpu)
         */
        watchdog_nmi_disable(cpu);
        hrtimer_cancel(hrtimer);
+       wait_for_completion(this_cpu_ptr(&softlockup_completion));
 }
 
-static void watchdog_cleanup(unsigned int cpu, bool online)
+static int softlockup_stop_fn(void *data)
 {
-       watchdog_disable(cpu);
+       watchdog_disable(smp_processor_id());
+       return 0;
 }
 
-static int watchdog_should_run(unsigned int cpu)
+static void softlockup_stop_all(void)
 {
-       return __this_cpu_read(hrtimer_interrupts) !=
-               __this_cpu_read(soft_lockup_hrtimer_cnt);
+       int cpu;
+
+       if (!softlockup_initialized)
+               return;
+
+       for_each_cpu(cpu, &watchdog_allowed_mask)
+               smp_call_on_cpu(cpu, softlockup_stop_fn, NULL, false);
+
+       cpumask_clear(&watchdog_allowed_mask);
 }
 
-/*
- * The watchdog thread function - touches the timestamp.
- *
- * It only runs once every sample_period seconds (4 seconds by
- * default) to reset the softlockup timestamp. If this gets delayed
- * for more than 2*watchdog_thresh seconds then the debug-printout
- * triggers in watchdog_timer_fn().
- */
-static void watchdog(unsigned int cpu)
+static int softlockup_start_fn(void *data)
 {
-       __this_cpu_write(soft_lockup_hrtimer_cnt,
-                        __this_cpu_read(hrtimer_interrupts));
-       __touch_watchdog();
+       watchdog_enable(smp_processor_id());
+       return 0;
 }
 
-static struct smp_hotplug_thread watchdog_threads = {
-       .store                  = &softlockup_watchdog,
-       .thread_should_run      = watchdog_should_run,
-       .thread_fn              = watchdog,
-       .thread_comm            = "watchdog/%u",
-       .setup                  = watchdog_enable,
-       .cleanup                = watchdog_cleanup,
-       .park                   = watchdog_disable,
-       .unpark                 = watchdog_enable,
-};
-
-static void softlockup_update_smpboot_threads(void)
+static void softlockup_start_all(void)
 {
-       lockdep_assert_held(&watchdog_mutex);
-
-       if (!softlockup_threads_initialized)
-               return;
+       int cpu;
 
-       smpboot_update_cpumask_percpu_thread(&watchdog_threads,
-                                            &watchdog_allowed_mask);
+       cpumask_copy(&watchdog_allowed_mask, &watchdog_cpumask);
+       for_each_cpu(cpu, &watchdog_allowed_mask)
+               smp_call_on_cpu(cpu, softlockup_start_fn, NULL, false);
 }
 
-/* Temporarily park all watchdog threads */
-static void softlockup_park_all_threads(void)
+int lockup_detector_online_cpu(unsigned int cpu)
 {
-       cpumask_clear(&watchdog_allowed_mask);
-       softlockup_update_smpboot_threads();
+       watchdog_enable(cpu);
+       return 0;
 }
 
-/* Unpark enabled threads */
-static void softlockup_unpark_threads(void)
+int lockup_detector_offline_cpu(unsigned int cpu)
 {
-       cpumask_copy(&watchdog_allowed_mask, &watchdog_cpumask);
-       softlockup_update_smpboot_threads();
+       watchdog_disable(cpu);
+       return 0;
 }
 
 static void lockup_detector_reconfigure(void)
 {
        cpus_read_lock();
        watchdog_nmi_stop();
-       softlockup_park_all_threads();
+
+       softlockup_stop_all();
        set_sample_period();
        lockup_detector_update_enable();
        if (watchdog_enabled && watchdog_thresh)
-               softlockup_unpark_threads();
+               softlockup_start_all();
+
        watchdog_nmi_start();
        cpus_read_unlock();
        /*
@@ -580,8 +586,6 @@ static void lockup_detector_reconfigure(void)
  */
 static __init void lockup_detector_setup(void)
 {
-       int ret;
-
        /*
         * If sysctl is off and watchdog got disabled on the command line,
         * nothing to do here.
@@ -592,24 +596,13 @@ static __init void lockup_detector_setup(void)
            !(watchdog_enabled && watchdog_thresh))
                return;
 
-       ret = smpboot_register_percpu_thread_cpumask(&watchdog_threads,
-                                                    &watchdog_allowed_mask);
-       if (ret) {
-               pr_err("Failed to initialize soft lockup detector threads\n");
-               return;
-       }
-
        mutex_lock(&watchdog_mutex);
-       softlockup_threads_initialized = true;
        lockup_detector_reconfigure();
+       softlockup_initialized = true;
        mutex_unlock(&watchdog_mutex);
 }
 
 #else /* CONFIG_SOFTLOCKUP_DETECTOR */
-static inline int watchdog_park_threads(void) { return 0; }
-static inline void watchdog_unpark_threads(void) { }
-static inline int watchdog_enable_all_cpus(void) { return 0; }
-static inline void watchdog_disable_all_cpus(void) { }
 static void lockup_detector_reconfigure(void)
 {
        cpus_read_lock();
index e449a23e9d5982e9069849a5f370c454dfe35383..1f7020d65d0aa2963708d3e44057e08ce2305ead 100644 (file)
@@ -175,8 +175,8 @@ static int hardlockup_detector_event_create(void)
        evt = perf_event_create_kernel_counter(wd_attr, cpu, NULL,
                                               watchdog_overflow_callback, NULL);
        if (IS_ERR(evt)) {
-               pr_info("Perf event create on CPU %d failed with %ld\n", cpu,
-                       PTR_ERR(evt));
+               pr_debug("Perf event create on CPU %d failed with %ld\n", cpu,
+                        PTR_ERR(evt));
                return PTR_ERR(evt);
        }
        this_cpu_write(watchdog_ev, evt);
index e34b04b56057a86cd0ade5cb9fcb4919730f80a2..706836ec314d2add83b84ebe59dec8fd7e13a7ad 100644 (file)
@@ -420,60 +420,15 @@ config HAS_IOPORT_MAP
        depends on HAS_IOMEM && !NO_IOPORT_MAP
        default y
 
-config HAS_DMA
-       bool
-       depends on !NO_DMA
-       default y
+source "kernel/dma/Kconfig"
 
 config SGL_ALLOC
        bool
        default n
 
-config NEED_SG_DMA_LENGTH
-       bool
-
-config NEED_DMA_MAP_STATE
-       bool
-
-config ARCH_DMA_ADDR_T_64BIT
-       def_bool 64BIT || PHYS_ADDR_T_64BIT
-
 config IOMMU_HELPER
        bool
 
-config ARCH_HAS_SYNC_DMA_FOR_DEVICE
-       bool
-
-config ARCH_HAS_SYNC_DMA_FOR_CPU
-       bool
-       select NEED_DMA_MAP_STATE
-
-config DMA_DIRECT_OPS
-       bool
-       depends on HAS_DMA
-
-config DMA_NONCOHERENT_OPS
-       bool
-       depends on HAS_DMA
-       select DMA_DIRECT_OPS
-
-config DMA_NONCOHERENT_MMAP
-       bool
-       depends on DMA_NONCOHERENT_OPS
-
-config DMA_NONCOHERENT_CACHE_SYNC
-       bool
-       depends on DMA_NONCOHERENT_OPS
-
-config DMA_VIRT_OPS
-       bool
-       depends on HAS_DMA
-
-config SWIOTLB
-       bool
-       select DMA_DIRECT_OPS
-       select NEED_DMA_MAP_STATE
-
 config CHECK_SIGNATURE
        bool
 
index 8838d1158d192bfafe61fc234480ad9c9a05971f..0b066b3c9284c8269cdbc61d914c48c8240d2f17 100644 (file)
@@ -1718,7 +1718,7 @@ config KPROBES_SANITY_TEST
        default n
        help
          This option provides for testing basic kprobes functionality on
-         boot. A sample kprobe, jprobe and kretprobe are inserted and
+         boot. Samples of kprobe and kretprobe are inserted and
          verified for functionality.
 
          Say N if you are unsure.
index 3d35d062970d2459ecee5573cf512a999061b3ab..befb127507c0b1cb05f6f83b81464e99f4ac4fb0 100644 (file)
@@ -5,7 +5,8 @@ if HAVE_ARCH_KASAN
 
 config KASAN
        bool "KASan: runtime memory debugger"
-       depends on SLUB || (SLAB && !DEBUG_SLAB)
+       depends on (SLUB && SYSFS) || (SLAB && !DEBUG_SLAB)
+       select SLUB_DEBUG if SLUB
        select CONSTRUCTORS
        select STACKDEPOT
        help
index 19d42ea75ec225d385734e5212680df0ef9609c6..98fa559ebd808698ecc3774bf88c5bdea1988f27 100644 (file)
@@ -1,9 +1,6 @@
 config ARCH_HAS_UBSAN_SANITIZE_ALL
        bool
 
-config ARCH_WANTS_UBSAN_NO_NULL
-       def_bool n
-
 config UBSAN
        bool "Undefined behaviour sanity checker"
        help
@@ -39,14 +36,6 @@ config UBSAN_ALIGNMENT
          Enabling this option on architectures that support unaligned
          accesses may produce a lot of false positives.
 
-config UBSAN_NULL
-       bool "Enable checking of null pointers"
-       depends on UBSAN
-       default y if !ARCH_WANTS_UBSAN_NO_NULL
-       help
-         This option enables detection of memory accesses via a
-         null pointer.
-
 config TEST_UBSAN
        tristate "Module for testing for undefined behavior detection"
        depends on m && UBSAN
index 956b320292fef9a4055a1a955f37b6f41c2a4b71..90dc5520b7849dc69dc4c3df3ea419c45e9451cc 100644 (file)
@@ -23,15 +23,12 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \
         sha1.o chacha20.o irq_regs.o argv_split.o \
         flex_proportions.o ratelimit.o show_mem.o \
         is_single_threaded.o plist.o decompress.o kobject_uevent.o \
-        earlycpio.o seq_buf.o siphash.o \
+        earlycpio.o seq_buf.o siphash.o dec_and_lock.o \
         nmi_backtrace.o nodemask.o win_minmax.o
 
 lib-$(CONFIG_PRINTK) += dump_stack.o
 lib-$(CONFIG_MMU) += ioremap.o
 lib-$(CONFIG_SMP) += cpumask.o
-lib-$(CONFIG_DMA_DIRECT_OPS) += dma-direct.o
-lib-$(CONFIG_DMA_NONCOHERENT_OPS) += dma-noncoherent.o
-lib-$(CONFIG_DMA_VIRT_OPS) += dma-virt.o
 
 lib-y  += kobject.o klist.o
 obj-y  += lockref.o
@@ -98,10 +95,6 @@ obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
 obj-$(CONFIG_DEBUG_LIST) += list_debug.o
 obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o
 
-ifneq ($(CONFIG_HAVE_DEC_LOCK),y)
-  lib-y += dec_and_lock.o
-endif
-
 obj-$(CONFIG_BITREVERSE) += bitrev.o
 obj-$(CONFIG_RATIONAL) += rational.o
 obj-$(CONFIG_CRC_CCITT)        += crc-ccitt.o
@@ -148,7 +141,6 @@ obj-$(CONFIG_SMP) += percpu_counter.o
 obj-$(CONFIG_AUDIT_GENERIC) += audit.o
 obj-$(CONFIG_AUDIT_COMPAT_GENERIC) += compat_audit.o
 
-obj-$(CONFIG_SWIOTLB) += swiotlb.o
 obj-$(CONFIG_IOMMU_HELPER) += iommu-helper.o
 obj-$(CONFIG_FAULT_INJECTION) += fault-inject.o
 obj-$(CONFIG_NOTIFIER_ERROR_INJECTION) += notifier-error-inject.o
@@ -169,8 +161,6 @@ obj-$(CONFIG_NLATTR) += nlattr.o
 
 obj-$(CONFIG_LRU_CACHE) += lru_cache.o
 
-obj-$(CONFIG_DMA_API_DEBUG) += dma-debug.o
-
 obj-$(CONFIG_GENERIC_CSUM) += checksum.o
 
 obj-$(CONFIG_GENERIC_ATOMIC64) += atomic64.o
index 53c2d5edc826d4dd322e1623c3aa04297b762a2b..1d91e31eceec480e4945c9fd7152f61ebab701e5 100644 (file)
@@ -178,18 +178,18 @@ long long atomic64_xchg(atomic64_t *v, long long new)
 }
 EXPORT_SYMBOL(atomic64_xchg);
 
-int atomic64_add_unless(atomic64_t *v, long long a, long long u)
+long long atomic64_fetch_add_unless(atomic64_t *v, long long a, long long u)
 {
        unsigned long flags;
        raw_spinlock_t *lock = lock_addr(v);
-       int ret = 0;
+       long long val;
 
        raw_spin_lock_irqsave(lock, flags);
-       if (v->counter != u) {
+       val = v->counter;
+       if (val != u)
                v->counter += a;
-               ret = 1;
-       }
        raw_spin_unlock_irqrestore(lock, flags);
-       return ret;
+
+       return val;
 }
-EXPORT_SYMBOL(atomic64_add_unless);
+EXPORT_SYMBOL(atomic64_fetch_add_unless);
index 994be4805ceca93dd417baf2289eff8f5446c4f5..70935ed9112599c3517829c0f6dea0de6d2435e4 100644 (file)
@@ -360,9 +360,12 @@ static void debug_object_is_on_stack(void *addr, int onstack)
 
        limit++;
        if (is_on_stack)
-               pr_warn("object is on stack, but not annotated\n");
+               pr_warn("object %p is on stack %p, but NOT annotated.\n", addr,
+                        task_stack_page(current));
        else
-               pr_warn("object is not on stack, but annotated\n");
+               pr_warn("object %p is NOT on stack %p, but annotated.\n", addr,
+                        task_stack_page(current));
+
        WARN_ON(1);
 }
 
@@ -1185,8 +1188,7 @@ void __init debug_objects_mem_init(void)
 
        if (!obj_cache || debug_objects_replace_static_objects()) {
                debug_objects_enabled = 0;
-               if (obj_cache)
-                       kmem_cache_destroy(obj_cache);
+               kmem_cache_destroy(obj_cache);
                pr_warn("out of memory.\n");
        } else
                debug_objects_selftest();
index 347fa7ac2e8a858827415d44725d256d2a9e96a3..9555b68bb774cc3277dca434d19880286d71df0e 100644 (file)
@@ -33,3 +33,19 @@ int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock)
 }
 
 EXPORT_SYMBOL(_atomic_dec_and_lock);
+
+int _atomic_dec_and_lock_irqsave(atomic_t *atomic, spinlock_t *lock,
+                                unsigned long *flags)
+{
+       /* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */
+       if (atomic_add_unless(atomic, -1, 1))
+               return 0;
+
+       /* Otherwise do it the slow way */
+       spin_lock_irqsave(lock, *flags);
+       if (atomic_dec_and_test(atomic))
+               return 1;
+       spin_unlock_irqrestore(lock, *flags);
+       return 0;
+}
+EXPORT_SYMBOL(_atomic_dec_and_lock_irqsave);
index 54e5bbaa3200317534926e65982dcd3cbab71492..517f5853ffed1726a462543f902450ddb7f9a9b4 100644 (file)
@@ -92,7 +92,7 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
                if (ioremap_pmd_enabled() &&
                    ((next - addr) == PMD_SIZE) &&
                    IS_ALIGNED(phys_addr + addr, PMD_SIZE) &&
-                   pmd_free_pte_page(pmd)) {
+                   pmd_free_pte_page(pmd, addr)) {
                        if (pmd_set_huge(pmd, phys_addr + addr, prot))
                                continue;
                }
@@ -119,7 +119,7 @@ static inline int ioremap_pud_range(p4d_t *p4d, unsigned long addr,
                if (ioremap_pud_enabled() &&
                    ((next - addr) == PUD_SIZE) &&
                    IS_ALIGNED(phys_addr + addr, PUD_SIZE) &&
-                   pud_free_pmd_page(pud)) {
+                   pud_free_pmd_page(pud, addr)) {
                        if (pud_set_huge(pud, phys_addr + addr, prot))
                                continue;
                }
index 7e43cd54c84ca3da2d77b02e7112c69386428a2b..8be175df30753c95692007a5d41503838344d9a5 100644 (file)
@@ -596,15 +596,70 @@ static unsigned long memcpy_mcsafe_to_page(struct page *page, size_t offset,
        return ret;
 }
 
+static size_t copy_pipe_to_iter_mcsafe(const void *addr, size_t bytes,
+                               struct iov_iter *i)
+{
+       struct pipe_inode_info *pipe = i->pipe;
+       size_t n, off, xfer = 0;
+       int idx;
+
+       if (!sanity(i))
+               return 0;
+
+       bytes = n = push_pipe(i, bytes, &idx, &off);
+       if (unlikely(!n))
+               return 0;
+       for ( ; n; idx = next_idx(idx, pipe), off = 0) {
+               size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
+               unsigned long rem;
+
+               rem = memcpy_mcsafe_to_page(pipe->bufs[idx].page, off, addr,
+                               chunk);
+               i->idx = idx;
+               i->iov_offset = off + chunk - rem;
+               xfer += chunk - rem;
+               if (rem)
+                       break;
+               n -= chunk;
+               addr += chunk;
+       }
+       i->count -= xfer;
+       return xfer;
+}
+
+/**
+ * _copy_to_iter_mcsafe - copy to user with source-read error exception handling
+ * @addr: source kernel address
+ * @bytes: total transfer length
+ * @iter: destination iterator
+ *
+ * The pmem driver arranges for filesystem-dax to use this facility via
+ * dax_copy_to_iter() for protecting read/write to persistent memory.
+ * Unless / until an architecture can guarantee identical performance
+ * between _copy_to_iter_mcsafe() and _copy_to_iter() it would be a
+ * performance regression to switch more users to the mcsafe version.
+ *
+ * Otherwise, the main differences between this and typical _copy_to_iter().
+ *
+ * * Typical tail/residue handling after a fault retries the copy
+ *   byte-by-byte until the fault happens again. Re-triggering machine
+ *   checks is potentially fatal so the implementation uses source
+ *   alignment and poison alignment assumptions to avoid re-triggering
+ *   hardware exceptions.
+ *
+ * * ITER_KVEC, ITER_PIPE, and ITER_BVEC can return short copies.
+ *   Compare to copy_to_iter() where only ITER_IOVEC attempts might return
+ *   a short copy.
+ *
+ * See MCSAFE_TEST for self-test.
+ */
 size_t _copy_to_iter_mcsafe(const void *addr, size_t bytes, struct iov_iter *i)
 {
        const char *from = addr;
        unsigned long rem, curr_addr, s_addr = (unsigned long) addr;
 
-       if (unlikely(i->type & ITER_PIPE)) {
-               WARN_ON(1);
-               return 0;
-       }
+       if (unlikely(i->type & ITER_PIPE))
+               return copy_pipe_to_iter_mcsafe(addr, bytes, i);
        if (iter_is_iovec(i))
                might_fault();
        iterate_and_advance(i, bytes, v,
@@ -701,6 +756,20 @@ size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
 EXPORT_SYMBOL(_copy_from_iter_nocache);
 
 #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
+/**
+ * _copy_from_iter_flushcache - write destination through cpu cache
+ * @addr: destination kernel address
+ * @bytes: total transfer length
+ * @iter: source iterator
+ *
+ * The pmem driver arranges for filesystem-dax to use this facility via
+ * dax_copy_from_iter() for ensuring that writes to persistent memory
+ * are flushed through the CPU cache. It is differentiated from
+ * _copy_from_iter_nocache() in that guarantees all data is flushed for
+ * all iterator types. The _copy_from_iter_nocache() only attempts to
+ * bypass the cache for the ITER_IOVEC case, and on some archs may use
+ * instructions that strand dirty-data in the cache.
+ */
 size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
 {
        char *to = addr;
index 9bbd9c5d375a2c8bf9a6d950ba42ab556c12063b..beb14839b41ae3c04fd698ec33a34727a2bc92d5 100644 (file)
@@ -141,7 +141,7 @@ int percpu_ida_alloc(struct percpu_ida *pool, int state)
        spin_lock_irqsave(&tags->lock, flags);
 
        /* Fastpath */
-       if (likely(tags->nr_free >= 0)) {
+       if (likely(tags->nr_free)) {
                tag = tags->freelist[--tags->nr_free];
                spin_unlock_irqrestore(&tags->lock, flags);
                return tag;
index 140fa8bb5c2352daccbd51203fe3517cc02ee07f..914ebe98fc211df0871e9f16425f1d4a29f0057e 100644 (file)
@@ -55,22 +55,24 @@ static inline void XOR(int x, int y, int z)
        asm volatile ("VX %0,%1,%2" : : "i" (x), "i" (y), "i" (z));
 }
 
-static inline void LOAD_DATA(int x, int n, u8 *ptr)
+static inline void LOAD_DATA(int x, u8 *ptr)
 {
-       typedef struct { u8 _[16*n]; } addrtype;
+       typedef struct { u8 _[16 * $#]; } addrtype;
        register addrtype *__ptr asm("1") = (addrtype *) ptr;
 
        asm volatile ("VLM %2,%3,0,%r1"
-                     : : "m" (*__ptr), "a" (__ptr), "i" (x), "i" (x + n - 1));
+                     : : "m" (*__ptr), "a" (__ptr), "i" (x),
+                         "i" (x + $# - 1));
 }
 
-static inline void STORE_DATA(int x, int n, u8 *ptr)
+static inline void STORE_DATA(int x, u8 *ptr)
 {
-       typedef struct { u8 _[16*n]; } addrtype;
+       typedef struct { u8 _[16 * $#]; } addrtype;
        register addrtype *__ptr asm("1") = (addrtype *) ptr;
 
        asm volatile ("VSTM %2,%3,0,1"
-                     : "=m" (*__ptr) : "a" (__ptr), "i" (x), "i" (x + n - 1));
+                     : "=m" (*__ptr) : "a" (__ptr), "i" (x),
+                       "i" (x + $# - 1));
 }
 
 static inline void COPY_VEC(int x, int y)
@@ -93,19 +95,19 @@ static void raid6_s390vx$#_gen_syndrome(int disks, size_t bytes, void **ptrs)
        q = dptr[z0 + 2];       /* RS syndrome */
 
        for (d = 0; d < bytes; d += $#*NSIZE) {
-               LOAD_DATA(0,$#,&dptr[z0][d]);
+               LOAD_DATA(0,&dptr[z0][d]);
                COPY_VEC(8+$$,0+$$);
                for (z = z0 - 1; z >= 0; z--) {
                        MASK(16+$$,8+$$);
                        AND(16+$$,16+$$,25);
                        SHLBYTE(8+$$,8+$$);
                        XOR(8+$$,8+$$,16+$$);
-                       LOAD_DATA(16,$#,&dptr[z][d]);
+                       LOAD_DATA(16,&dptr[z][d]);
                        XOR(0+$$,0+$$,16+$$);
                        XOR(8+$$,8+$$,16+$$);
                }
-               STORE_DATA(0,$#,&p[d]);
-               STORE_DATA(8,$#,&q[d]);
+               STORE_DATA(0,&p[d]);
+               STORE_DATA(8,&q[d]);
        }
        kernel_fpu_end(&vxstate, KERNEL_VXR);
 }
@@ -127,14 +129,14 @@ static void raid6_s390vx$#_xor_syndrome(int disks, int start, int stop,
 
        for (d = 0; d < bytes; d += $#*NSIZE) {
                /* P/Q data pages */
-               LOAD_DATA(0,$#,&dptr[z0][d]);
+               LOAD_DATA(0,&dptr[z0][d]);
                COPY_VEC(8+$$,0+$$);
                for (z = z0 - 1; z >= start; z--) {
                        MASK(16+$$,8+$$);
                        AND(16+$$,16+$$,25);
                        SHLBYTE(8+$$,8+$$);
                        XOR(8+$$,8+$$,16+$$);
-                       LOAD_DATA(16,$#,&dptr[z][d]);
+                       LOAD_DATA(16,&dptr[z][d]);
                        XOR(0+$$,0+$$,16+$$);
                        XOR(8+$$,8+$$,16+$$);
                }
@@ -145,12 +147,12 @@ static void raid6_s390vx$#_xor_syndrome(int disks, int start, int stop,
                        SHLBYTE(8+$$,8+$$);
                        XOR(8+$$,8+$$,16+$$);
                }
-               LOAD_DATA(16,$#,&p[d]);
+               LOAD_DATA(16,&p[d]);
                XOR(16+$$,16+$$,0+$$);
-               STORE_DATA(16,$#,&p[d]);
-               LOAD_DATA(16,$#,&q[d]);
+               STORE_DATA(16,&p[d]);
+               LOAD_DATA(16,&q[d]);
                XOR(16+$$,16+$$,8+$$);
-               STORE_DATA(16,$#,&q[d]);
+               STORE_DATA(16,&q[d]);
        }
        kernel_fpu_end(&vxstate, KERNEL_VXR);
 }
index 0eb48353abe30164d4ae564aa21bee901fad72c3..ebcf8cd49e05e3a93218ebe2ba728c63e02d77be 100644 (file)
  *
  */
 
+#include <linux/mutex.h>
 #include <linux/refcount.h>
+#include <linux/spinlock.h>
 #include <linux/bug.h>
 
-#ifdef CONFIG_REFCOUNT_FULL
-
 /**
- * refcount_add_not_zero - add a value to a refcount unless it is 0
+ * refcount_add_not_zero_checked - add a value to a refcount unless it is 0
  * @i: the value to add to the refcount
  * @r: the refcount
  *
@@ -58,7 +58,7 @@
  *
  * Return: false if the passed refcount is 0, true otherwise
  */
-bool refcount_add_not_zero(unsigned int i, refcount_t *r)
+bool refcount_add_not_zero_checked(unsigned int i, refcount_t *r)
 {
        unsigned int new, val = atomic_read(&r->refs);
 
@@ -79,10 +79,10 @@ bool refcount_add_not_zero(unsigned int i, refcount_t *r)
 
        return true;
 }
-EXPORT_SYMBOL(refcount_add_not_zero);
+EXPORT_SYMBOL(refcount_add_not_zero_checked);
 
 /**
- * refcount_add - add a value to a refcount
+ * refcount_add_checked - add a value to a refcount
  * @i: the value to add to the refcount
  * @r: the refcount
  *
@@ -97,14 +97,14 @@ EXPORT_SYMBOL(refcount_add_not_zero);
  * cases, refcount_inc(), or one of its variants, should instead be used to
  * increment a reference count.
  */
-void refcount_add(unsigned int i, refcount_t *r)
+void refcount_add_checked(unsigned int i, refcount_t *r)
 {
-       WARN_ONCE(!refcount_add_not_zero(i, r), "refcount_t: addition on 0; use-after-free.\n");
+       WARN_ONCE(!refcount_add_not_zero_checked(i, r), "refcount_t: addition on 0; use-after-free.\n");
 }
-EXPORT_SYMBOL(refcount_add);
+EXPORT_SYMBOL(refcount_add_checked);
 
 /**
- * refcount_inc_not_zero - increment a refcount unless it is 0
+ * refcount_inc_not_zero_checked - increment a refcount unless it is 0
  * @r: the refcount to increment
  *
  * Similar to atomic_inc_not_zero(), but will saturate at UINT_MAX and WARN.
@@ -115,7 +115,7 @@ EXPORT_SYMBOL(refcount_add);
  *
  * Return: true if the increment was successful, false otherwise
  */
-bool refcount_inc_not_zero(refcount_t *r)
+bool refcount_inc_not_zero_checked(refcount_t *r)
 {
        unsigned int new, val = atomic_read(&r->refs);
 
@@ -134,10 +134,10 @@ bool refcount_inc_not_zero(refcount_t *r)
 
        return true;
 }
-EXPORT_SYMBOL(refcount_inc_not_zero);
+EXPORT_SYMBOL(refcount_inc_not_zero_checked);
 
 /**
- * refcount_inc - increment a refcount
+ * refcount_inc_checked - increment a refcount
  * @r: the refcount to increment
  *
  * Similar to atomic_inc(), but will saturate at UINT_MAX and WARN.
@@ -148,14 +148,14 @@ EXPORT_SYMBOL(refcount_inc_not_zero);
  * Will WARN if the refcount is 0, as this represents a possible use-after-free
  * condition.
  */
-void refcount_inc(refcount_t *r)
+void refcount_inc_checked(refcount_t *r)
 {
-       WARN_ONCE(!refcount_inc_not_zero(r), "refcount_t: increment on 0; use-after-free.\n");
+       WARN_ONCE(!refcount_inc_not_zero_checked(r), "refcount_t: increment on 0; use-after-free.\n");
 }
-EXPORT_SYMBOL(refcount_inc);
+EXPORT_SYMBOL(refcount_inc_checked);
 
 /**
- * refcount_sub_and_test - subtract from a refcount and test if it is 0
+ * refcount_sub_and_test_checked - subtract from a refcount and test if it is 0
  * @i: amount to subtract from the refcount
  * @r: the refcount
  *
@@ -174,7 +174,7 @@ EXPORT_SYMBOL(refcount_inc);
  *
  * Return: true if the resulting refcount is 0, false otherwise
  */
-bool refcount_sub_and_test(unsigned int i, refcount_t *r)
+bool refcount_sub_and_test_checked(unsigned int i, refcount_t *r)
 {
        unsigned int new, val = atomic_read(&r->refs);
 
@@ -192,10 +192,10 @@ bool refcount_sub_and_test(unsigned int i, refcount_t *r)
 
        return !new;
 }
-EXPORT_SYMBOL(refcount_sub_and_test);
+EXPORT_SYMBOL(refcount_sub_and_test_checked);
 
 /**
- * refcount_dec_and_test - decrement a refcount and test if it is 0
+ * refcount_dec_and_test_checked - decrement a refcount and test if it is 0
  * @r: the refcount
  *
  * Similar to atomic_dec_and_test(), it will WARN on underflow and fail to
@@ -207,14 +207,14 @@ EXPORT_SYMBOL(refcount_sub_and_test);
  *
  * Return: true if the resulting refcount is 0, false otherwise
  */
-bool refcount_dec_and_test(refcount_t *r)
+bool refcount_dec_and_test_checked(refcount_t *r)
 {
-       return refcount_sub_and_test(1, r);
+       return refcount_sub_and_test_checked(1, r);
 }
-EXPORT_SYMBOL(refcount_dec_and_test);
+EXPORT_SYMBOL(refcount_dec_and_test_checked);
 
 /**
- * refcount_dec - decrement a refcount
+ * refcount_dec_checked - decrement a refcount
  * @r: the refcount
  *
  * Similar to atomic_dec(), it will WARN on underflow and fail to decrement
@@ -223,12 +223,11 @@ EXPORT_SYMBOL(refcount_dec_and_test);
  * Provides release memory ordering, such that prior loads and stores are done
  * before.
  */
-void refcount_dec(refcount_t *r)
+void refcount_dec_checked(refcount_t *r)
 {
-       WARN_ONCE(refcount_dec_and_test(r), "refcount_t: decrement hit 0; leaking memory.\n");
+       WARN_ONCE(refcount_dec_and_test_checked(r), "refcount_t: decrement hit 0; leaking memory.\n");
 }
-EXPORT_SYMBOL(refcount_dec);
-#endif /* CONFIG_REFCOUNT_FULL */
+EXPORT_SYMBOL(refcount_dec_checked);
 
 /**
  * refcount_dec_if_one - decrement a refcount if it is 1
@@ -350,3 +349,31 @@ bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock)
 }
 EXPORT_SYMBOL(refcount_dec_and_lock);
 
+/**
+ * refcount_dec_and_lock_irqsave - return holding spinlock with disabled
+ *                                 interrupts if able to decrement refcount to 0
+ * @r: the refcount
+ * @lock: the spinlock to be locked
+ * @flags: saved IRQ-flags if the is acquired
+ *
+ * Same as refcount_dec_and_lock() above except that the spinlock is acquired
+ * with disabled interupts.
+ *
+ * Return: true and hold spinlock if able to decrement refcount to 0, false
+ *         otherwise
+ */
+bool refcount_dec_and_lock_irqsave(refcount_t *r, spinlock_t *lock,
+                                  unsigned long *flags)
+{
+       if (refcount_dec_not_one(r))
+               return false;
+
+       spin_lock_irqsave(lock, *flags);
+       if (!refcount_dec_and_test(r)) {
+               spin_unlock_irqrestore(lock, *flags);
+               return false;
+       }
+
+       return true;
+}
+EXPORT_SYMBOL(refcount_dec_and_lock_irqsave);
index 9427b5766134cb139ef385b27f92f6027fecceca..e5c8586cf7174cfe0526dc8fb3314676601c5e57 100644 (file)
@@ -774,7 +774,7 @@ int rhashtable_walk_start_check(struct rhashtable_iter *iter)
                                skip++;
                                if (list == iter->list) {
                                        iter->p = p;
-                                       skip = skip;
+                                       iter->skip = skip;
                                        goto found;
                                }
                        }
@@ -964,8 +964,16 @@ EXPORT_SYMBOL_GPL(rhashtable_walk_stop);
 
 static size_t rounded_hashtable_size(const struct rhashtable_params *params)
 {
-       return max(roundup_pow_of_two(params->nelem_hint * 4 / 3),
-                  (unsigned long)params->min_size);
+       size_t retsize;
+
+       if (params->nelem_hint)
+               retsize = max(roundup_pow_of_two(params->nelem_hint * 4 / 3),
+                             (unsigned long)params->min_size);
+       else
+               retsize = max(HASH_DEFAULT_SIZE,
+                             (unsigned long)params->min_size);
+
+       return retsize;
 }
 
 static u32 rhashtable_jhash2(const void *key, u32 length, u32 seed)
@@ -1022,8 +1030,6 @@ int rhashtable_init(struct rhashtable *ht,
        struct bucket_table *tbl;
        size_t size;
 
-       size = HASH_DEFAULT_SIZE;
-
        if ((!params->key_len && !params->obj_hashfn) ||
            (params->obj_hashfn && !params->obj_cmpfn))
                return -EINVAL;
@@ -1050,8 +1056,7 @@ int rhashtable_init(struct rhashtable *ht,
 
        ht->p.min_size = max_t(u16, ht->p.min_size, HASH_MIN_SIZE);
 
-       if (params->nelem_hint)
-               size = rounded_hashtable_size(&ht->p);
+       size = rounded_hashtable_size(&ht->p);
 
        if (params->locks_mul)
                ht->p.locks_mul = roundup_pow_of_two(params->locks_mul);
@@ -1143,13 +1148,14 @@ void rhashtable_free_and_destroy(struct rhashtable *ht,
                                 void (*free_fn)(void *ptr, void *arg),
                                 void *arg)
 {
-       struct bucket_table *tbl;
+       struct bucket_table *tbl, *next_tbl;
        unsigned int i;
 
        cancel_work_sync(&ht->run_work);
 
        mutex_lock(&ht->mutex);
        tbl = rht_dereference(ht->tbl, ht);
+restart:
        if (free_fn) {
                for (i = 0; i < tbl->size; i++) {
                        struct rhash_head *pos, *next;
@@ -1166,7 +1172,12 @@ void rhashtable_free_and_destroy(struct rhashtable *ht,
                }
        }
 
+       next_tbl = rht_dereference(tbl->future_tbl, ht);
        bucket_table_free(tbl);
+       if (next_tbl) {
+               tbl = next_tbl;
+               goto restart;
+       }
        mutex_unlock(&ht->mutex);
 }
 EXPORT_SYMBOL_GPL(rhashtable_free_and_destroy);
index 1642fd507a960f5deb2b6d7366db83800a3e547b..7c6096a7170486449736d82a37fbd50326ac169e 100644 (file)
@@ -24,9 +24,6 @@
  **/
 struct scatterlist *sg_next(struct scatterlist *sg)
 {
-#ifdef CONFIG_DEBUG_SG
-       BUG_ON(sg->sg_magic != SG_MAGIC);
-#endif
        if (sg_is_last(sg))
                return NULL;
 
@@ -111,10 +108,7 @@ struct scatterlist *sg_last(struct scatterlist *sgl, unsigned int nents)
        for_each_sg(sgl, sg, nents, i)
                ret = sg;
 
-#ifdef CONFIG_DEBUG_SG
-       BUG_ON(sgl[0].sg_magic != SG_MAGIC);
        BUG_ON(!sg_is_last(ret));
-#endif
        return ret;
 }
 EXPORT_SYMBOL(sg_last);
index 60aedc87936106460e436fe66429d45a59f36060..08d3d59dca17343c1a91def02d0a7da931c1c0f0 100644 (file)
@@ -5282,21 +5282,31 @@ static struct bpf_test tests[] = {
        {       /* Mainly checking JIT here. */
                "BPF_MAXINSNS: Ctx heavy transformations",
                { },
+#if defined(CONFIG_BPF_JIT_ALWAYS_ON) && defined(CONFIG_S390)
+               CLASSIC | FLAG_EXPECTED_FAIL,
+#else
                CLASSIC,
+#endif
                { },
                {
                        {  1, !!(SKB_VLAN_TCI & VLAN_TAG_PRESENT) },
                        { 10, !!(SKB_VLAN_TCI & VLAN_TAG_PRESENT) }
                },
                .fill_helper = bpf_fill_maxinsns6,
+               .expected_errcode = -ENOTSUPP,
        },
        {       /* Mainly checking JIT here. */
                "BPF_MAXINSNS: Call heavy transformations",
                { },
+#if defined(CONFIG_BPF_JIT_ALWAYS_ON) && defined(CONFIG_S390)
+               CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
+#else
                CLASSIC | FLAG_NO_DATA,
+#endif
                { },
                { { 1, 0 }, { 10, 0 } },
                .fill_helper = bpf_fill_maxinsns7,
+               .expected_errcode = -ENOTSUPP,
        },
        {       /* Mainly checking JIT here. */
                "BPF_MAXINSNS: Jump heavy test",
@@ -5347,18 +5357,28 @@ static struct bpf_test tests[] = {
        {
                "BPF_MAXINSNS: exec all MSH",
                { },
+#if defined(CONFIG_BPF_JIT_ALWAYS_ON) && defined(CONFIG_S390)
+               CLASSIC | FLAG_EXPECTED_FAIL,
+#else
                CLASSIC,
+#endif
                { 0xfa, 0xfb, 0xfc, 0xfd, },
                { { 4, 0xababab83 } },
                .fill_helper = bpf_fill_maxinsns13,
+               .expected_errcode = -ENOTSUPP,
        },
        {
                "BPF_MAXINSNS: ld_abs+get_processor_id",
                { },
+#if defined(CONFIG_BPF_JIT_ALWAYS_ON) && defined(CONFIG_S390)
+               CLASSIC | FLAG_EXPECTED_FAIL,
+#else
                CLASSIC,
+#endif
                { },
                { { 1, 0xbee } },
                .fill_helper = bpf_fill_ld_abs_get_processor_id,
+               .expected_errcode = -ENOTSUPP,
        },
        /*
         * LD_IND / LD_ABS on fragmented SKBs
index b2aa8f5148449de1557e3ee48feebb8f1cab3083..cea592f402ed029d6d5dd63addd2d2bc8a8391f1 100644 (file)
@@ -260,13 +260,6 @@ plain(void)
 {
        int err;
 
-       /*
-        * Make sure crng is ready. Otherwise we get "(ptrval)" instead
-        * of a hashed address when printing '%p' in plain_hash() and
-        * plain_format().
-        */
-       wait_for_random_bytes();
-
        err = plain_hash();
        if (err) {
                pr_warn("plain 'p' does not appear to be hashed\n");
index 347cc834c04a8cbc388af1b7594e4f09bdc68b41..2e5d3df0853d928021cba0e30c70ff682afeaf68 100644 (file)
@@ -359,15 +359,8 @@ static void wb_shutdown(struct bdi_writeback *wb)
        spin_lock_bh(&wb->work_lock);
        if (!test_and_clear_bit(WB_registered, &wb->state)) {
                spin_unlock_bh(&wb->work_lock);
-               /*
-                * Wait for wb shutdown to finish if someone else is just
-                * running wb_shutdown(). Otherwise we could proceed to wb /
-                * bdi destruction before wb_shutdown() is finished.
-                */
-               wait_on_bit(&wb->state, WB_shutting_down, TASK_UNINTERRUPTIBLE);
                return;
        }
-       set_bit(WB_shutting_down, &wb->state);
        spin_unlock_bh(&wb->work_lock);
 
        cgwb_remove_from_bdi_list(wb);
@@ -379,12 +372,6 @@ static void wb_shutdown(struct bdi_writeback *wb)
        mod_delayed_work(bdi_wq, &wb->dwork, 0);
        flush_delayed_work(&wb->dwork);
        WARN_ON(!list_empty(&wb->work_list));
-       /*
-        * Make sure bit gets cleared after shutdown is finished. Matches with
-        * the barrier provided by test_and_clear_bit() above.
-        */
-       smp_wmb();
-       clear_and_wake_up_bit(WB_shutting_down, &wb->state);
 }
 
 static void wb_exit(struct bdi_writeback *wb)
@@ -508,10 +495,12 @@ static void cgwb_release_workfn(struct work_struct *work)
        struct bdi_writeback *wb = container_of(work, struct bdi_writeback,
                                                release_work);
 
+       mutex_lock(&wb->bdi->cgwb_release_mutex);
        wb_shutdown(wb);
 
        css_put(wb->memcg_css);
        css_put(wb->blkcg_css);
+       mutex_unlock(&wb->bdi->cgwb_release_mutex);
 
        fprop_local_destroy_percpu(&wb->memcg_completions);
        percpu_ref_exit(&wb->refcnt);
@@ -697,6 +686,7 @@ static int cgwb_bdi_init(struct backing_dev_info *bdi)
 
        INIT_RADIX_TREE(&bdi->cgwb_tree, GFP_ATOMIC);
        bdi->cgwb_congested_tree = RB_ROOT;
+       mutex_init(&bdi->cgwb_release_mutex);
 
        ret = wb_init(&bdi->wb, bdi, 1, GFP_KERNEL);
        if (!ret) {
@@ -717,7 +707,10 @@ static void cgwb_bdi_unregister(struct backing_dev_info *bdi)
        spin_lock_irq(&cgwb_lock);
        radix_tree_for_each_slot(slot, &bdi->cgwb_tree, &iter, 0)
                cgwb_kill(*slot);
+       spin_unlock_irq(&cgwb_lock);
 
+       mutex_lock(&bdi->cgwb_release_mutex);
+       spin_lock_irq(&cgwb_lock);
        while (!list_empty(&bdi->wb_list)) {
                wb = list_first_entry(&bdi->wb_list, struct bdi_writeback,
                                      bdi_node);
@@ -726,6 +719,7 @@ static void cgwb_bdi_unregister(struct backing_dev_info *bdi)
                spin_lock_irq(&cgwb_lock);
        }
        spin_unlock_irq(&cgwb_lock);
+       mutex_unlock(&bdi->cgwb_release_mutex);
 }
 
 /**
index 56e2d9125ea55a57632feb22c64f2c1a76f5ec6e..38c926520c9718b8929a72829931080a3a53502d 100644 (file)
@@ -43,12 +43,25 @@ const struct trace_print_flags vmaflag_names[] = {
 
 void __dump_page(struct page *page, const char *reason)
 {
+       bool page_poisoned = PagePoisoned(page);
+       int mapcount;
+
+       /*
+        * If struct page is poisoned don't access Page*() functions as that
+        * leads to recursive loop. Page*() check for poisoned pages, and calls
+        * dump_page() when detected.
+        */
+       if (page_poisoned) {
+               pr_emerg("page:%px is uninitialized and poisoned", page);
+               goto hex_only;
+       }
+
        /*
         * Avoid VM_BUG_ON() in page_mapcount().
         * page->_mapcount space in struct page is used by sl[aou]b pages to
         * encode own info.
         */
-       int mapcount = PageSlab(page) ? 0 : page_mapcount(page);
+       mapcount = PageSlab(page) ? 0 : page_mapcount(page);
 
        pr_emerg("page:%px count:%d mapcount:%d mapping:%px index:%#lx",
                  page, page_ref_count(page), mapcount,
@@ -60,6 +73,7 @@ void __dump_page(struct page *page, const char *reason)
 
        pr_emerg("flags: %#lx(%pGp)\n", page->flags, &page->flags);
 
+hex_only:
        print_hex_dump(KERN_ALERT, "raw: ", DUMP_PREFIX_NONE, 32,
                        sizeof(unsigned long), page,
                        sizeof(struct page), false);
@@ -68,7 +82,7 @@ void __dump_page(struct page *page, const char *reason)
                pr_alert("page dumped because: %s\n", reason);
 
 #ifdef CONFIG_MEMCG
-       if (page->mem_cgroup)
+       if (!page_poisoned && page->mem_cgroup)
                pr_alert("page->mem_cgroup:%px\n", page->mem_cgroup);
 #endif
 }
index b70d7ba7cc13522c5bab5594b1211679b21b01e7..fc5f98069f4ea5b2906cf45e8997327c99a5b7ce 100644 (file)
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -1238,8 +1238,6 @@ int __mm_populate(unsigned long start, unsigned long len, int ignore_errors)
        int locked = 0;
        long ret = 0;
 
-       VM_BUG_ON(start & ~PAGE_MASK);
-       VM_BUG_ON(len != PAGE_ALIGN(len));
        end = start + len;
 
        for (nstart = start; nstart < end; nstart = nend) {
index 1cd7c1a57a144320b7d1729d7caa6ec93351cc54..25346bd9936432c383b0e98218763bc9d3e27de5 100644 (file)
@@ -2084,6 +2084,8 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
                if (vma_is_dax(vma))
                        return;
                page = pmd_page(_pmd);
+               if (!PageDirty(page) && pmd_dirty(_pmd))
+                       set_page_dirty(page);
                if (!PageReferenced(page) && pmd_young(_pmd))
                        SetPageReferenced(page);
                page_remove_rmap(page, true);
index 3612fbb32e9d5412e8494e4c220fad84e3a4e779..3103099f64fd89dadc9b3acee9744602ca1fcc05 100644 (file)
@@ -2163,6 +2163,7 @@ static void __init gather_bootmem_prealloc(void)
                 */
                if (hstate_is_gigantic(h))
                        adjust_managed_page_count(page, 1 << h->order);
+               cond_resched();
        }
 }
 
@@ -3166,6 +3167,13 @@ static vm_fault_t hugetlb_vm_op_fault(struct vm_fault *vmf)
        return 0;
 }
 
+/*
+ * When a new function is introduced to vm_operations_struct and added
+ * to hugetlb_vm_ops, please consider adding the function to shm_vm_ops.
+ * This is because under System V memory model, mappings created via
+ * shmget/shmat with "huge page" specified are backed by hugetlbfs files,
+ * their original vm_ops are overwritten with shm_vm_ops.
+ */
 const struct vm_operations_struct hugetlb_vm_ops = {
        .fault = hugetlb_vm_op_fault,
        .open = hugetlb_vm_op_open,
index f0179c9c04c26084ba3e76c9954eeb25a2319d1d..a787a319211ef9aea4d0bebb84055b377b52dfae 100644 (file)
 #define INIT_MM_CONTEXT(name)
 #endif
 
+/*
+ * For dynamically allocated mm_structs, there is a dynamically sized cpumask
+ * at the end of the structure, the size of which depends on the maximum CPU
+ * number the system can see. That way we allocate only as much memory for
+ * mm_cpumask() as needed for the hundreds, or thousands of processes that
+ * a system typically runs.
+ *
+ * Since there is only one init_mm in the entire system, keep it simple
+ * and size this cpu_bitmask to NR_CPUS.
+ */
 struct mm_struct init_mm = {
        .mm_rb          = RB_ROOT,
        .pgd            = swapper_pg_dir,
@@ -25,5 +35,6 @@ struct mm_struct init_mm = {
        .arg_lock       =  __SPIN_LOCK_UNLOCKED(init_mm.arg_lock),
        .mmlist         = LIST_HEAD_INIT(init_mm.mmlist),
        .user_ns        = &init_user_ns,
+       .cpu_bitmap     = { [BITS_TO_LONGS(NR_CPUS)] = 0},
        INIT_MM_CONTEXT(init_mm)
 };
index f185455b34065d27efa2b6a90c9dd2c1dfe92ae9..c3bd5209da380d9a51a0fa4515f4fcdeefcad409 100644 (file)
@@ -619,12 +619,13 @@ void kasan_kfree_large(void *ptr, unsigned long ip)
 int kasan_module_alloc(void *addr, size_t size)
 {
        void *ret;
+       size_t scaled_size;
        size_t shadow_size;
        unsigned long shadow_start;
 
        shadow_start = (unsigned long)kasan_mem_to_shadow(addr);
-       shadow_size = round_up(size >> KASAN_SHADOW_SCALE_SHIFT,
-                       PAGE_SIZE);
+       scaled_size = (size + KASAN_SHADOW_MASK) >> KASAN_SHADOW_SCALE_SHIFT;
+       shadow_size = round_up(scaled_size, PAGE_SIZE);
 
        if (WARN_ON(!PAGE_ALIGNED(shadow_start)))
                return -EINVAL;
index cc16d70b8333890730d16c08b858631947e38d70..4b5d245fafc17cbde5c2de63ee516e5f30924cf6 100644 (file)
@@ -228,7 +228,8 @@ phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size,
                 * so we use WARN_ONCE() here to see the stack trace if
                 * fail happens.
                 */
-               WARN_ONCE(1, "memblock: bottom-up allocation failed, memory hotunplug may be affected\n");
+               WARN_ONCE(IS_ENABLED(CONFIG_MEMORY_HOTREMOVE),
+                         "memblock: bottom-up allocation failed, memory hotremove may be affected\n");
        }
 
        return __memblock_find_range_top_down(start, end, size, align, nid,
@@ -1225,6 +1226,7 @@ phys_addr_t __init memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, i
        return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE);
 }
 
+#if defined(CONFIG_NO_BOOTMEM)
 /**
  * memblock_virt_alloc_internal - allocate boot memory block
  * @size: size of memory block to be allocated in bytes
@@ -1432,6 +1434,7 @@ void * __init memblock_virt_alloc_try_nid(
              (u64)max_addr);
        return NULL;
 }
+#endif
 
 /**
  * __memblock_free_early - free boot memory block
index e6f0d5ef320aa65d2b65ceed4b202021a84fd49b..b2173f7e5164e8671d2e9788555d5342ea1e68f5 100644 (file)
@@ -850,7 +850,7 @@ static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg)
        int nid;
        int i;
 
-       while ((memcg = parent_mem_cgroup(memcg))) {
+       for (; memcg; memcg = parent_mem_cgroup(memcg)) {
                for_each_node(nid) {
                        mz = mem_cgroup_nodeinfo(memcg, nid);
                        for (i = 0; i <= DEF_PRIORITY; i++) {
@@ -4037,6 +4037,14 @@ static struct cftype mem_cgroup_legacy_files[] = {
 
 static DEFINE_IDR(mem_cgroup_idr);
 
+static void mem_cgroup_id_remove(struct mem_cgroup *memcg)
+{
+       if (memcg->id.id > 0) {
+               idr_remove(&mem_cgroup_idr, memcg->id.id);
+               memcg->id.id = 0;
+       }
+}
+
 static void mem_cgroup_id_get_many(struct mem_cgroup *memcg, unsigned int n)
 {
        VM_BUG_ON(atomic_read(&memcg->id.ref) <= 0);
@@ -4047,8 +4055,7 @@ static void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n)
 {
        VM_BUG_ON(atomic_read(&memcg->id.ref) < n);
        if (atomic_sub_and_test(n, &memcg->id.ref)) {
-               idr_remove(&mem_cgroup_idr, memcg->id.id);
-               memcg->id.id = 0;
+               mem_cgroup_id_remove(memcg);
 
                /* Memcg ID pins CSS */
                css_put(&memcg->css);
@@ -4185,8 +4192,7 @@ static struct mem_cgroup *mem_cgroup_alloc(void)
        idr_replace(&mem_cgroup_idr, memcg, memcg->id.id);
        return memcg;
 fail:
-       if (memcg->id.id > 0)
-               idr_remove(&mem_cgroup_idr, memcg->id.id);
+       mem_cgroup_id_remove(memcg);
        __mem_cgroup_free(memcg);
        return NULL;
 }
@@ -4245,6 +4251,7 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
 
        return &memcg->css;
 fail:
+       mem_cgroup_id_remove(memcg);
        mem_cgroup_free(memcg);
        return ERR_PTR(-ENOMEM);
 }
index 27069518e3c598fcbd3564a019442e933a6cf038..2bb5e257080e98a48dc284edcfc82accab949ecc 100644 (file)
@@ -326,7 +326,7 @@ SYSCALL_DEFINE2(memfd_create,
                goto err_fd;
        }
        file->f_mode |= FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE;
-       file->f_flags |= O_RDWR | O_LARGEFILE;
+       file->f_flags |= O_LARGEFILE;
 
        if (flags & MFD_ALLOW_SEALING) {
                file_seals = memfd_file_seals_ptr(file);
index 7206a634270be3641e2255aa4c9d9eee68daed51..3d0a74ab70f26bd7f18c8fe60ae9d2e22eab5929 100644 (file)
@@ -326,16 +326,20 @@ bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_
 
 #ifdef CONFIG_HAVE_RCU_TABLE_FREE
 
-/*
- * See the comment near struct mmu_table_batch.
- */
-
 static void tlb_remove_table_smp_sync(void *arg)
 {
-       /* Simply deliver the interrupt */
+       struct mm_struct __maybe_unused *mm = arg;
+       /*
+        * On most architectures this does nothing. Simply delivering the
+        * interrupt is enough to prevent races with software page table
+        * walking like that done in get_user_pages_fast.
+        *
+        * See the comment near struct mmu_table_batch.
+        */
+       tlb_flush_remove_tables_local(mm);
 }
 
-static void tlb_remove_table_one(void *table)
+static void tlb_remove_table_one(void *table, struct mmu_gather *tlb)
 {
        /*
         * This isn't an RCU grace period and hence the page-tables cannot be
@@ -344,7 +348,7 @@ static void tlb_remove_table_one(void *table)
         * It is however sufficient for software page-table walkers that rely on
         * IRQ disabling. See the comment near struct mmu_table_batch.
         */
-       smp_call_function(tlb_remove_table_smp_sync, NULL, 1);
+       smp_call_function(tlb_remove_table_smp_sync, tlb->mm, 1);
        __tlb_remove_table(table);
 }
 
@@ -365,6 +369,8 @@ void tlb_table_flush(struct mmu_gather *tlb)
 {
        struct mmu_table_batch **batch = &tlb->batch;
 
+       tlb_flush_remove_tables(tlb->mm);
+
        if (*batch) {
                call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu);
                *batch = NULL;
@@ -387,7 +393,7 @@ void tlb_remove_table(struct mmu_gather *tlb, void *table)
        if (*batch == NULL) {
                *batch = (struct mmu_table_batch *)__get_free_page(GFP_NOWAIT | __GFP_NOWARN);
                if (*batch == NULL) {
-                       tlb_remove_table_one(table);
+                       tlb_remove_table_one(table, tlb);
                        return;
                }
                (*batch)->nr = 0;
@@ -1417,11 +1423,9 @@ static inline unsigned long zap_pmd_range(struct mmu_gather *tlb,
        do {
                next = pmd_addr_end(addr, end);
                if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) {
-                       if (next - addr != HPAGE_PMD_SIZE) {
-                               VM_BUG_ON_VMA(vma_is_anonymous(vma) &&
-                                   !rwsem_is_locked(&tlb->mm->mmap_sem), vma);
+                       if (next - addr != HPAGE_PMD_SIZE)
                                __split_huge_pmd(vma, pmd, addr, false, NULL);
-                       else if (zap_huge_pmd(tlb, vma, pmd, addr))
+                       else if (zap_huge_pmd(tlb, vma, pmd, addr))
                                goto next;
                        /* fall through */
                }
@@ -4397,6 +4401,9 @@ int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
                return -EINVAL;
 
        maddr = ioremap_prot(phys_addr, PAGE_ALIGN(len + offset), prot);
+       if (!maddr)
+               return -ENOMEM;
+
        if (write)
                memcpy_toio(maddr + offset, buf, len);
        else
index 9ac49ef17b4e1e5128f3db38e0a97bbc067ed1b9..01f1a14facc461c4ca5490adc4060d50887aa053 100644 (file)
@@ -2505,6 +2505,7 @@ void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
 
                /* Create pseudo-vma that contains just the policy */
                memset(&pvma, 0, sizeof(struct vm_area_struct));
+               vma_init(&pvma, NULL);
                pvma.vm_end = TASK_SIZE;        /* policy covers entire file */
                mpol_set_shared_policy(sp, &pvma, new); /* adds ref */
 
index d1eb87ef4b1afa101fde9da06d2991644ca49dda..17bbf4d3e24f846b9cf4a60012dfcbfae008ee11 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -182,12 +182,12 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
        if (vma->vm_file)
                fput(vma->vm_file);
        mpol_put(vma_policy(vma));
-       kmem_cache_free(vm_area_cachep, vma);
+       vm_area_free(vma);
        return next;
 }
 
-static int do_brk(unsigned long addr, unsigned long len, struct list_head *uf);
-
+static int do_brk_flags(unsigned long addr, unsigned long request, unsigned long flags,
+               struct list_head *uf);
 SYSCALL_DEFINE1(brk, unsigned long, brk)
 {
        unsigned long retval;
@@ -245,7 +245,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
                goto out;
 
        /* Ok, looks good - let it rip. */
-       if (do_brk(oldbrk, newbrk-oldbrk, &uf) < 0)
+       if (do_brk_flags(oldbrk, newbrk-oldbrk, 0, &uf) < 0)
                goto out;
 
 set_brk:
@@ -911,7 +911,7 @@ again:
                        anon_vma_merge(vma, next);
                mm->map_count--;
                mpol_put(vma_policy(next));
-               kmem_cache_free(vm_area_cachep, next);
+               vm_area_free(next);
                /*
                 * In mprotect's case 6 (see comments on vma_merge),
                 * we must remove another next too. It would clutter
@@ -1729,19 +1729,17 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
         * specific mapper. the address has already been validated, but
         * not unmapped, but the maps are removed from the list.
         */
-       vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
+       vma = vm_area_alloc(mm);
        if (!vma) {
                error = -ENOMEM;
                goto unacct_error;
        }
 
-       vma->vm_mm = mm;
        vma->vm_start = addr;
        vma->vm_end = addr + len;
        vma->vm_flags = vm_flags;
        vma->vm_page_prot = vm_get_page_prot(vm_flags);
        vma->vm_pgoff = pgoff;
-       INIT_LIST_HEAD(&vma->anon_vma_chain);
 
        if (file) {
                if (vm_flags & VM_DENYWRITE) {
@@ -1780,6 +1778,8 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
                error = shmem_zero_setup(vma);
                if (error)
                        goto free_vma;
+       } else {
+               vma_set_anonymous(vma);
        }
 
        vma_link(mm, vma, prev, rb_link, rb_parent);
@@ -1832,7 +1832,7 @@ allow_write_and_free_vma:
        if (vm_flags & VM_DENYWRITE)
                allow_write_access(file);
 free_vma:
-       kmem_cache_free(vm_area_cachep, vma);
+       vm_area_free(vma);
 unacct_error:
        if (charged)
                vm_unacct_memory(charged);
@@ -2620,15 +2620,10 @@ int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
                        return err;
        }
 
-       new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
+       new = vm_area_dup(vma);
        if (!new)
                return -ENOMEM;
 
-       /* most fields are the same, copy all, and then fixup */
-       *new = *vma;
-
-       INIT_LIST_HEAD(&new->anon_vma_chain);
-
        if (new_below)
                new->vm_end = addr;
        else {
@@ -2669,7 +2664,7 @@ int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
  out_free_mpol:
        mpol_put(vma_policy(new));
  out_free_vma:
-       kmem_cache_free(vm_area_cachep, new);
+       vm_area_free(new);
        return err;
 }
 
@@ -2929,21 +2924,14 @@ static inline void verify_mm_writelocked(struct mm_struct *mm)
  *  anonymous maps.  eventually we may be able to do some
  *  brk-specific accounting here.
  */
-static int do_brk_flags(unsigned long addr, unsigned long request, unsigned long flags, struct list_head *uf)
+static int do_brk_flags(unsigned long addr, unsigned long len, unsigned long flags, struct list_head *uf)
 {
        struct mm_struct *mm = current->mm;
        struct vm_area_struct *vma, *prev;
-       unsigned long len;
        struct rb_node **rb_link, *rb_parent;
        pgoff_t pgoff = addr >> PAGE_SHIFT;
        int error;
 
-       len = PAGE_ALIGN(request);
-       if (len < request)
-               return -ENOMEM;
-       if (!len)
-               return 0;
-
        /* Until we need other flags, refuse anything except VM_EXEC. */
        if ((flags & (~VM_EXEC)) != 0)
                return -EINVAL;
@@ -2991,14 +2979,13 @@ static int do_brk_flags(unsigned long addr, unsigned long request, unsigned long
        /*
         * create a vma struct for an anonymous mapping
         */
-       vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
+       vma = vm_area_alloc(mm);
        if (!vma) {
                vm_unacct_memory(len >> PAGE_SHIFT);
                return -ENOMEM;
        }
 
-       INIT_LIST_HEAD(&vma->anon_vma_chain);
-       vma->vm_mm = mm;
+       vma_set_anonymous(vma);
        vma->vm_start = addr;
        vma->vm_end = addr + len;
        vma->vm_pgoff = pgoff;
@@ -3015,18 +3002,20 @@ out:
        return 0;
 }
 
-static int do_brk(unsigned long addr, unsigned long len, struct list_head *uf)
-{
-       return do_brk_flags(addr, len, 0, uf);
-}
-
-int vm_brk_flags(unsigned long addr, unsigned long len, unsigned long flags)
+int vm_brk_flags(unsigned long addr, unsigned long request, unsigned long flags)
 {
        struct mm_struct *mm = current->mm;
+       unsigned long len;
        int ret;
        bool populate;
        LIST_HEAD(uf);
 
+       len = PAGE_ALIGN(request);
+       if (len < request)
+               return -ENOMEM;
+       if (!len)
+               return 0;
+
        if (down_write_killable(&mm->mmap_sem))
                return -EINTR;
 
@@ -3207,16 +3196,14 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
                }
                *need_rmap_locks = (new_vma->vm_pgoff <= vma->vm_pgoff);
        } else {
-               new_vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
+               new_vma = vm_area_dup(vma);
                if (!new_vma)
                        goto out;
-               *new_vma = *vma;
                new_vma->vm_start = addr;
                new_vma->vm_end = addr + len;
                new_vma->vm_pgoff = pgoff;
                if (vma_dup_policy(vma, new_vma))
                        goto out_free_vma;
-               INIT_LIST_HEAD(&new_vma->anon_vma_chain);
                if (anon_vma_clone(new_vma, vma))
                        goto out_free_mempol;
                if (new_vma->vm_file)
@@ -3231,7 +3218,7 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
 out_free_mempol:
        mpol_put(vma_policy(new_vma));
 out_free_vma:
-       kmem_cache_free(vm_area_cachep, new_vma);
+       vm_area_free(new_vma);
 out:
        return NULL;
 }
@@ -3355,12 +3342,10 @@ static struct vm_area_struct *__install_special_mapping(
        int ret;
        struct vm_area_struct *vma;
 
-       vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
+       vma = vm_area_alloc(mm);
        if (unlikely(vma == NULL))
                return ERR_PTR(-ENOMEM);
 
-       INIT_LIST_HEAD(&vma->anon_vma_chain);
-       vma->vm_mm = mm;
        vma->vm_start = addr;
        vma->vm_end = addr + len;
 
@@ -3381,7 +3366,7 @@ static struct vm_area_struct *__install_special_mapping(
        return vma;
 
 out:
-       kmem_cache_free(vm_area_cachep, vma);
+       vm_area_free(vma);
        return ERR_PTR(ret);
 }
 
index 4452d8bd9ae4b84851f433885eac18e050d72dad..9fc9e43335b6be3d2da03f77d625cff341e1b735 100644 (file)
@@ -769,7 +769,7 @@ static void delete_vma(struct mm_struct *mm, struct vm_area_struct *vma)
        if (vma->vm_file)
                fput(vma->vm_file);
        put_nommu_region(vma->vm_region);
-       kmem_cache_free(vm_area_cachep, vma);
+       vm_area_free(vma);
 }
 
 /*
@@ -1145,6 +1145,8 @@ static int do_mmap_private(struct vm_area_struct *vma,
                if (ret < len)
                        memset(base + ret, 0, len - ret);
 
+       } else {
+               vma_set_anonymous(vma);
        }
 
        return 0;
@@ -1204,7 +1206,7 @@ unsigned long do_mmap(struct file *file,
        if (!region)
                goto error_getting_region;
 
-       vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
+       vma = vm_area_alloc(current->mm);
        if (!vma)
                goto error_getting_vma;
 
@@ -1212,7 +1214,6 @@ unsigned long do_mmap(struct file *file,
        region->vm_flags = vm_flags;
        region->vm_pgoff = pgoff;
 
-       INIT_LIST_HEAD(&vma->anon_vma_chain);
        vma->vm_flags = vm_flags;
        vma->vm_pgoff = pgoff;
 
@@ -1368,7 +1369,7 @@ error:
        kmem_cache_free(vm_region_jar, region);
        if (vma->vm_file)
                fput(vma->vm_file);
-       kmem_cache_free(vm_area_cachep, vma);
+       vm_area_free(vma);
        return ret;
 
 sharing_violation:
@@ -1469,14 +1470,13 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
        if (!region)
                return -ENOMEM;
 
-       new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
+       new = vm_area_dup(vma);
        if (!new) {
                kmem_cache_free(vm_region_jar, region);
                return -ENOMEM;
        }
 
        /* most fields are the same, copy all, and then fixup */
-       *new = *vma;
        *region = *vma->vm_region;
        new->vm_region = region;
 
index 1521100f1e63b729bba37e21723a312950d688d8..3222193c46c67298eb7e92fdd131deaeaca87aa7 100644 (file)
@@ -6383,7 +6383,7 @@ void __paginginit free_area_init_node(int nid, unsigned long *zones_size,
        free_area_init_core(pgdat);
 }
 
-#ifdef CONFIG_HAVE_MEMBLOCK
+#if defined(CONFIG_HAVE_MEMBLOCK) && !defined(CONFIG_FLAT_NODE_MEM_MAP)
 /*
  * Only struct pages that are backed by physical memory are zeroed and
  * initialized by going through __init_single_page(). But, there are some
@@ -6421,7 +6421,7 @@ void __paginginit zero_resv_unavail(void)
        if (pgcnt)
                pr_info("Reserved but unavailable: %lld pages", pgcnt);
 }
-#endif /* CONFIG_HAVE_MEMBLOCK */
+#endif /* CONFIG_HAVE_MEMBLOCK && !CONFIG_FLAT_NODE_MEM_MAP */
 
 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
 
@@ -6847,6 +6847,7 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn)
        /* Initialise every node */
        mminit_verify_pageflags_layout();
        setup_nr_node_ids();
+       zero_resv_unavail();
        for_each_online_node(nid) {
                pg_data_t *pgdat = NODE_DATA(nid);
                free_area_init_node(nid, NULL,
@@ -6857,7 +6858,6 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn)
                        node_set_state(nid, N_MEMORY);
                check_for_memory(pgdat, nid);
        }
-       zero_resv_unavail();
 }
 
 static int __init cmdline_parse_core(char *p, unsigned long *core,
@@ -6939,9 +6939,21 @@ unsigned long free_reserved_area(void *start, void *end, int poison, char *s)
        start = (void *)PAGE_ALIGN((unsigned long)start);
        end = (void *)((unsigned long)end & PAGE_MASK);
        for (pos = start; pos < end; pos += PAGE_SIZE, pages++) {
+               struct page *page = virt_to_page(pos);
+               void *direct_map_addr;
+
+               /*
+                * 'direct_map_addr' might be different from 'pos'
+                * because some architectures' virt_to_page()
+                * work with aliases.  Getting the direct map
+                * address ensures that we get a _writeable_
+                * alias for the memset().
+                */
+               direct_map_addr = page_address(page);
                if ((unsigned int)poison <= 0xFF)
-                       memset(pos, poison, PAGE_SIZE);
-               free_reserved_page(virt_to_page(pos));
+                       memset(direct_map_addr, poison, PAGE_SIZE);
+
+               free_reserved_page(page);
        }
 
        if (pages && s)
@@ -7033,9 +7045,9 @@ void __init set_dma_reserve(unsigned long new_dma_reserve)
 
 void __init free_area_init(unsigned long *zones_size)
 {
+       zero_resv_unavail();
        free_area_init_node(0, zones_size,
                        __pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL);
-       zero_resv_unavail();
 }
 
 static int page_alloc_cpu_dead(unsigned int cpu)
index 6db729dc4c5013784e65cc9e6438bef39d22b9a8..eb477809a5c0a534e2977f6fd6c1df74a05bc170 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -64,6 +64,7 @@
 #include <linux/backing-dev.h>
 #include <linux/page_idle.h>
 #include <linux/memremap.h>
+#include <linux/userfaultfd_k.h>
 
 #include <asm/tlbflush.h>
 
@@ -1481,11 +1482,16 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
                                set_pte_at(mm, address, pvmw.pte, pteval);
                        }
 
-               } else if (pte_unused(pteval)) {
+               } else if (pte_unused(pteval) && !userfaultfd_armed(vma)) {
                        /*
                         * The guest indicated that the page content is of no
                         * interest anymore. Simply discard the pte, vmscan
                         * will take care of the rest.
+                        * A future reference will then fault in a new zero
+                        * page. When userfaultfd is active, we must not drop
+                        * this page though, as its main user (postcopy
+                        * migration) will not expect userfaults on already
+                        * copied pages.
                         */
                        dec_mm_counter(mm, mm_counter(page));
                        /* We have to invalidate as we cleared the pte */
index 2cab8440305531f8ab97f3a56c95b91516bcd2ea..96bcc51fb9ec7ffcbe4df1202bbc23439e6d2f57 100644 (file)
@@ -1421,6 +1421,7 @@ static void shmem_pseudo_vma_init(struct vm_area_struct *vma,
 {
        /* Create a pseudo vma that just contains the policy */
        memset(vma, 0, sizeof(*vma));
+       vma_init(vma, NULL);
        /* Bias interleave by inode number to distribute better across nodes */
        vma->vm_pgoff = index + info->vfs_inode.i_ino;
        vma->vm_policy = mpol_shared_policy_lookup(&info->policy, index);
@@ -3896,18 +3897,11 @@ EXPORT_SYMBOL_GPL(shmem_truncate_range);
 
 /* common code */
 
-static const struct dentry_operations anon_ops = {
-       .d_dname = simple_dname
-};
-
 static struct file *__shmem_file_setup(struct vfsmount *mnt, const char *name, loff_t size,
                                       unsigned long flags, unsigned int i_flags)
 {
-       struct file *res;
        struct inode *inode;
-       struct path path;
-       struct super_block *sb;
-       struct qstr this;
+       struct file *res;
 
        if (IS_ERR(mnt))
                return ERR_CAST(mnt);
@@ -3918,41 +3912,21 @@ static struct file *__shmem_file_setup(struct vfsmount *mnt, const char *name, l
        if (shmem_acct_size(flags, size))
                return ERR_PTR(-ENOMEM);
 
-       res = ERR_PTR(-ENOMEM);
-       this.name = name;
-       this.len = strlen(name);
-       this.hash = 0; /* will go */
-       sb = mnt->mnt_sb;
-       path.mnt = mntget(mnt);
-       path.dentry = d_alloc_pseudo(sb, &this);
-       if (!path.dentry)
-               goto put_memory;
-       d_set_d_op(path.dentry, &anon_ops);
-
-       res = ERR_PTR(-ENOSPC);
-       inode = shmem_get_inode(sb, NULL, S_IFREG | 0777, 0, flags);
-       if (!inode)
-               goto put_memory;
-
+       inode = shmem_get_inode(mnt->mnt_sb, NULL, S_IFREG | S_IRWXUGO, 0,
+                               flags);
+       if (unlikely(!inode)) {
+               shmem_unacct_size(flags, size);
+               return ERR_PTR(-ENOSPC);
+       }
        inode->i_flags |= i_flags;
-       d_instantiate(path.dentry, inode);
        inode->i_size = size;
        clear_nlink(inode);     /* It is unlinked */
        res = ERR_PTR(ramfs_nommu_expand_for_mapping(inode, size));
+       if (!IS_ERR(res))
+               res = alloc_file_pseudo(inode, mnt, name, O_RDWR,
+                               &shmem_file_operations);
        if (IS_ERR(res))
-               goto put_path;
-
-       res = alloc_file(&path, FMODE_WRITE | FMODE_READ,
-                 &shmem_file_operations);
-       if (IS_ERR(res))
-               goto put_path;
-
-       return res;
-
-put_memory:
-       shmem_unacct_size(flags, size);
-put_path:
-       path_put(&path);
+               iput(inode);
        return res;
 }
 
index 890b1f04a03a3d46f80fe1b2cfccae1f14b79836..2296caf87bfbd28a626663af04f2054a0cc3c45c 100644 (file)
@@ -567,10 +567,14 @@ static int shutdown_cache(struct kmem_cache *s)
        list_del(&s->list);
 
        if (s->flags & SLAB_TYPESAFE_BY_RCU) {
+#ifdef SLAB_SUPPORTS_SYSFS
+               sysfs_slab_unlink(s);
+#endif
                list_add_tail(&s->list, &slab_caches_to_rcu_destroy);
                schedule_work(&slab_caches_to_rcu_destroy_work);
        } else {
 #ifdef SLAB_SUPPORTS_SYSFS
+               sysfs_slab_unlink(s);
                sysfs_slab_release(s);
 #else
                slab_kmem_cache_release(s);
index a3b8467c14af642138deaf35fd3ed3f7f87aed93..51258eff417836f6c5a72433a65c016c8391beb2 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -5667,7 +5667,6 @@ static void sysfs_slab_remove_workfn(struct work_struct *work)
        kset_unregister(s->memcg_kset);
 #endif
        kobject_uevent(&s->kobj, KOBJ_REMOVE);
-       kobject_del(&s->kobj);
 out:
        kobject_put(&s->kobj);
 }
@@ -5752,6 +5751,12 @@ static void sysfs_slab_remove(struct kmem_cache *s)
        schedule_work(&s->kobj_remove_work);
 }
 
+void sysfs_slab_unlink(struct kmem_cache *s)
+{
+       if (slab_state >= FULL)
+               kobject_del(&s->kobj);
+}
+
 void sysfs_slab_release(struct kmem_cache *s)
 {
        if (slab_state >= FULL)
index 75eda9c2b2602fe24b4c431f797c5e0fc563ebda..8ba0870ecddd0fd592d16ee674b060db512b5b37 100644 (file)
@@ -1796,11 +1796,9 @@ static void vmstat_update(struct work_struct *w)
                 * to occur in the future. Keep on running the
                 * update worker thread.
                 */
-               preempt_disable();
                queue_delayed_work_on(smp_processor_id(), mm_percpu_wq,
                                this_cpu_ptr(&vmstat_work),
                                round_jiffies_relative(sysctl_stat_interval));
-               preempt_enable();
        }
 }
 
index 7d34e69507e305adec0a64b5e272626385f9d651..cd91fd9d96b814d145e378b573dd289fb501e64e 100644 (file)
@@ -1026,6 +1026,15 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset,
                        ret = -ENOMEM;
                        goto reject;
                }
+
+               /* A second zswap_is_full() check after
+                * zswap_shrink() to make sure it's now
+                * under the max_pool_percent
+                */
+               if (zswap_is_full()) {
+                       ret = -ENOMEM;
+                       goto reject;
+               }
        }
 
        /* allocate entry */
index 73a65789271ba9346902dd721b0accd8ce747adc..8ccee3d01822f78184357141ced7a07c3109dc2c 100644 (file)
@@ -693,7 +693,7 @@ static struct sk_buff **vlan_gro_receive(struct sk_buff **head,
 out_unlock:
        rcu_read_unlock();
 out:
-       NAPI_GRO_CB(skb)->flush |= flush;
+       skb_gro_flush_final(skb, pp, flush);
 
        return pp;
 }
index 18c5271910dc2c1e1715efcd2b448b7cee6a844b..5c1343195292c8f474ff683c73c8f73aa51843a0 100644 (file)
@@ -225,7 +225,8 @@ static int parse_opts(char *opts, struct p9_client *clnt)
        }
 
 free_and_return:
-       v9fs_put_trans(clnt->trans_mod);
+       if (ret)
+               v9fs_put_trans(clnt->trans_mod);
        kfree(tmp_options);
        return ret;
 }
index 13ec0d5415c74486c68f8290689d16d78513e6e9..bdaf53925acd5606fdb953800620bd05cf0f259e 100644 (file)
@@ -20,11 +20,7 @@ obj-$(CONFIG_TLS)            += tls/
 obj-$(CONFIG_XFRM)             += xfrm/
 obj-$(CONFIG_UNIX)             += unix/
 obj-$(CONFIG_NET)              += ipv6/
-ifneq ($(CC_CAN_LINK),y)
-$(warning CC cannot link executables. Skipping bpfilter.)
-else
 obj-$(CONFIG_BPFILTER)         += bpfilter/
-endif
 obj-$(CONFIG_PACKET)           += packet/
 obj-$(CONFIG_NET_KEY)          += key/
 obj-$(CONFIG_BRIDGE)           += bridge/
index 55fdba05d7d9daa805d358118852aabb07746e81..9b6bc5abe94680c0a982b9193932f245080f2f85 100644 (file)
@@ -1869,7 +1869,7 @@ static const struct proto_ops atalk_dgram_ops = {
        .socketpair     = sock_no_socketpair,
        .accept         = sock_no_accept,
        .getname        = atalk_getname,
-       .poll_mask      = datagram_poll_mask,
+       .poll           = datagram_poll,
        .ioctl          = atalk_ioctl,
 #ifdef CONFIG_COMPAT
        .compat_ioctl   = atalk_compat_ioctl,
index 36b3adacc0ddc1bd9a6c5b8dd55cedba4e9bf47b..10462de734eafc00efb9490ddd58cd0bbc83b7c8 100644 (file)
@@ -252,8 +252,7 @@ static int br2684_xmit_vcc(struct sk_buff *skb, struct net_device *dev,
 
        ATM_SKB(skb)->vcc = atmvcc = brvcc->atmvcc;
        pr_debug("atm_skb(%p)->vcc(%p)->dev(%p)\n", skb, atmvcc, atmvcc->dev);
-       refcount_add(skb->truesize, &sk_atm(atmvcc)->sk_wmem_alloc);
-       ATM_SKB(skb)->atm_options = atmvcc->atm_options;
+       atm_account_tx(atmvcc, skb);
        dev->stats.tx_packets++;
        dev->stats.tx_bytes += skb->len;
 
index 66caa48a27c2307c1b2b43f4a4381f3b34e78485..d795b9c5aea4a4e35021d9db2e10254036df55fe 100644 (file)
@@ -381,8 +381,7 @@ static netdev_tx_t clip_start_xmit(struct sk_buff *skb,
                memcpy(here, llc_oui, sizeof(llc_oui));
                ((__be16 *) here)[3] = skb->protocol;
        }
-       refcount_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
-       ATM_SKB(skb)->atm_options = vcc->atm_options;
+       atm_account_tx(vcc, skb);
        entry->vccs->last_use = jiffies;
        pr_debug("atm_skb(%p)->vcc(%p)->dev(%p)\n", skb, vcc, vcc->dev);
        old = xchg(&entry->vccs->xoff, 1);      /* assume XOFF ... */
index 1f2af59935db356c003cfa8dd7d1bce388fada53..a7a68e5096288df11af1037297189962dc2fa548 100644 (file)
@@ -630,10 +630,9 @@ int vcc_sendmsg(struct socket *sock, struct msghdr *m, size_t size)
                goto out;
        }
        pr_debug("%d += %d\n", sk_wmem_alloc_get(sk), skb->truesize);
-       refcount_add(skb->truesize, &sk->sk_wmem_alloc);
+       atm_account_tx(vcc, skb);
 
        skb->dev = NULL; /* for paths shared with net_device interfaces */
-       ATM_SKB(skb)->atm_options = vcc->atm_options;
        if (!copy_from_iter_full(skb_put(skb, size), size, &m->msg_iter)) {
                kfree_skb(skb);
                error = -EFAULT;
@@ -648,11 +647,16 @@ out:
        return error;
 }
 
-__poll_t vcc_poll_mask(struct socket *sock, __poll_t events)
+__poll_t vcc_poll(struct file *file, struct socket *sock, poll_table *wait)
 {
        struct sock *sk = sock->sk;
-       struct atm_vcc *vcc = ATM_SD(sock);
-       __poll_t mask = 0;
+       struct atm_vcc *vcc;
+       __poll_t mask;
+
+       sock_poll_wait(file, sk_sleep(sk), wait);
+       mask = 0;
+
+       vcc = ATM_SD(sock);
 
        /* exceptional events */
        if (sk->sk_err)
index 526796ad230fc6a2dbdca37f0d4f66f4edf47f17..5850649068bb29b3d688b4c8e29373b4a7f7592d 100644 (file)
@@ -17,7 +17,7 @@ int vcc_connect(struct socket *sock, int itf, short vpi, int vci);
 int vcc_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
                int flags);
 int vcc_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len);
-__poll_t vcc_poll_mask(struct socket *sock, __poll_t events);
+__poll_t vcc_poll(struct file *file, struct socket *sock, poll_table *wait);
 int vcc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
 int vcc_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
 int vcc_setsockopt(struct socket *sock, int level, int optname,
index 5a95fcf6f9b6cc62ced5480910dac7e41f2e7f06..d7f5cf5b7594d0ea4e766e06fbc07e6fce590e3b 100644 (file)
@@ -182,9 +182,8 @@ lec_send(struct atm_vcc *vcc, struct sk_buff *skb)
        struct net_device *dev = skb->dev;
 
        ATM_SKB(skb)->vcc = vcc;
-       ATM_SKB(skb)->atm_options = vcc->atm_options;
+       atm_account_tx(vcc, skb);
 
-       refcount_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
        if (vcc->send(vcc, skb) < 0) {
                dev->stats.tx_dropped++;
                return;
index 75620c2f261723a915b74df013ac214479ba70c4..24b53c4c39c6a6b5323a1aa79318b2ab2907a332 100644 (file)
@@ -555,8 +555,7 @@ static int send_via_shortcut(struct sk_buff *skb, struct mpoa_client *mpc)
                                        sizeof(struct llc_snap_hdr));
        }
 
-       refcount_add(skb->truesize, &sk_atm(entry->shortcut)->sk_wmem_alloc);
-       ATM_SKB(skb)->atm_options = entry->shortcut->atm_options;
+       atm_account_tx(entry->shortcut, skb);
        entry->shortcut->send(entry->shortcut, skb);
        entry->packets_fwded++;
        mpc->in_ops->put(entry);
index 21d9d341a6199255a017437954e4b688f1ba5bfd..d84227d757170739369021949e37f04bdd93db77 100644 (file)
@@ -244,7 +244,7 @@ static int pppoatm_may_send(struct pppoatm_vcc *pvcc, int size)
         * the packet count limit, so...
         */
        if (atm_may_send(pvcc->atmvcc, size) &&
-           atomic_inc_not_zero_hint(&pvcc->inflight, NONE_INFLIGHT))
+           atomic_inc_not_zero(&pvcc->inflight))
                return 1;
 
        /*
@@ -350,8 +350,7 @@ static int pppoatm_send(struct ppp_channel *chan, struct sk_buff *skb)
                return 1;
        }
 
-       refcount_add(skb->truesize, &sk_atm(ATM_SKB(skb)->vcc)->sk_wmem_alloc);
-       ATM_SKB(skb)->atm_options = ATM_SKB(skb)->vcc->atm_options;
+       atm_account_tx(vcc, skb);
        pr_debug("atm_skb(%p)->vcc(%p)->dev(%p)\n",
                 skb, ATM_SKB(skb)->vcc, ATM_SKB(skb)->vcc->dev);
        ret = ATM_SKB(skb)->vcc->send(ATM_SKB(skb)->vcc, skb)
index 9f75092fe7785c080b2a32f9c2c8b147056bd488..2cb10af16afcf8eeb925bfe1aab33e839821109a 100644 (file)
@@ -113,7 +113,7 @@ static const struct proto_ops pvc_proto_ops = {
        .socketpair =   sock_no_socketpair,
        .accept =       sock_no_accept,
        .getname =      pvc_getname,
-       .poll_mask =    vcc_poll_mask,
+       .poll =         vcc_poll,
        .ioctl =        vcc_ioctl,
 #ifdef CONFIG_COMPAT
        .compat_ioctl = vcc_compat_ioctl,
index ee10e8d46185173067f459aa5efdf5a77f8f9f06..b3ba44aab0ee6c9425fd278ebf8e2df1590a6d7a 100644 (file)
@@ -35,8 +35,8 @@ static void atm_pop_raw(struct atm_vcc *vcc, struct sk_buff *skb)
        struct sock *sk = sk_atm(vcc);
 
        pr_debug("(%d) %d -= %d\n",
-                vcc->vci, sk_wmem_alloc_get(sk), skb->truesize);
-       WARN_ON(refcount_sub_and_test(skb->truesize, &sk->sk_wmem_alloc));
+                vcc->vci, sk_wmem_alloc_get(sk), ATM_SKB(skb)->acct_truesize);
+       WARN_ON(refcount_sub_and_test(ATM_SKB(skb)->acct_truesize, &sk->sk_wmem_alloc));
        dev_kfree_skb_any(skb);
        sk->sk_write_space(sk);
 }
index 53f4ad7087b169bccbd8d0b86c7463fd77204a8d..2f91b766ac423c97a0b9c1fd340222e31b17eefa 100644 (file)
@@ -636,7 +636,7 @@ static const struct proto_ops svc_proto_ops = {
        .socketpair =   sock_no_socketpair,
        .accept =       svc_accept,
        .getname =      svc_getname,
-       .poll_mask =    vcc_poll_mask,
+       .poll =         vcc_poll,
        .ioctl =        svc_ioctl,
 #ifdef CONFIG_COMPAT
        .compat_ioctl = svc_compat_ioctl,
index d1d2442ce573280cbc5b12beba96225bb7445a47..c603d33d54108b9f93f1745534da28d25f12c0ea 100644 (file)
@@ -1941,7 +1941,7 @@ static const struct proto_ops ax25_proto_ops = {
        .socketpair     = sock_no_socketpair,
        .accept         = ax25_accept,
        .getname        = ax25_getname,
-       .poll_mask      = datagram_poll_mask,
+       .poll           = datagram_poll,
        .ioctl          = ax25_ioctl,
        .listen         = ax25_listen,
        .shutdown       = ax25_shutdown,
index be09a98838252f4f0c23cec0625930cf896cd0ff..73bf6a93a3cf1141a34657bf1284893199e04db9 100644 (file)
@@ -2732,7 +2732,7 @@ static int batadv_iv_gw_dump_entry(struct sk_buff *msg, u32 portid, u32 seq,
 {
        struct batadv_neigh_ifinfo *router_ifinfo = NULL;
        struct batadv_neigh_node *router;
-       struct batadv_gw_node *curr_gw;
+       struct batadv_gw_node *curr_gw = NULL;
        int ret = 0;
        void *hdr;
 
@@ -2780,6 +2780,8 @@ static int batadv_iv_gw_dump_entry(struct sk_buff *msg, u32 portid, u32 seq,
        ret = 0;
 
 out:
+       if (curr_gw)
+               batadv_gw_node_put(curr_gw);
        if (router_ifinfo)
                batadv_neigh_ifinfo_put(router_ifinfo);
        if (router)
index ec93337ee2597738e46b87dd72724d5becf3f48e..6baec4e68898c6e992e7522d2ee8c78ce62a1b08 100644 (file)
@@ -927,7 +927,7 @@ static int batadv_v_gw_dump_entry(struct sk_buff *msg, u32 portid, u32 seq,
 {
        struct batadv_neigh_ifinfo *router_ifinfo = NULL;
        struct batadv_neigh_node *router;
-       struct batadv_gw_node *curr_gw;
+       struct batadv_gw_node *curr_gw = NULL;
        int ret = 0;
        void *hdr;
 
@@ -995,6 +995,8 @@ static int batadv_v_gw_dump_entry(struct sk_buff *msg, u32 portid, u32 seq,
        ret = 0;
 
 out:
+       if (curr_gw)
+               batadv_gw_node_put(curr_gw);
        if (router_ifinfo)
                batadv_neigh_ifinfo_put(router_ifinfo);
        if (router)
index 4229b01ac7b54008e023df0ed6546a6d541498ba..87479c60670ebfbe2ad3df17130f1289d657df7b 100644 (file)
@@ -19,6 +19,7 @@
 #include "debugfs.h"
 #include "main.h"
 
+#include <linux/dcache.h>
 #include <linux/debugfs.h>
 #include <linux/err.h>
 #include <linux/errno.h>
@@ -343,6 +344,25 @@ out:
        return -ENOMEM;
 }
 
+/**
+ * batadv_debugfs_rename_hardif() - Fix debugfs path for renamed hardif
+ * @hard_iface: hard interface which was renamed
+ */
+void batadv_debugfs_rename_hardif(struct batadv_hard_iface *hard_iface)
+{
+       const char *name = hard_iface->net_dev->name;
+       struct dentry *dir;
+       struct dentry *d;
+
+       dir = hard_iface->debug_dir;
+       if (!dir)
+               return;
+
+       d = debugfs_rename(dir->d_parent, dir, dir->d_parent, name);
+       if (!d)
+               pr_err("Can't rename debugfs dir to %s\n", name);
+}
+
 /**
  * batadv_debugfs_del_hardif() - delete the base directory for a hard interface
  *  in debugfs.
@@ -413,6 +433,26 @@ out:
        return -ENOMEM;
 }
 
+/**
+ * batadv_debugfs_rename_meshif() - Fix debugfs path for renamed softif
+ * @dev: net_device which was renamed
+ */
+void batadv_debugfs_rename_meshif(struct net_device *dev)
+{
+       struct batadv_priv *bat_priv = netdev_priv(dev);
+       const char *name = dev->name;
+       struct dentry *dir;
+       struct dentry *d;
+
+       dir = bat_priv->debug_dir;
+       if (!dir)
+               return;
+
+       d = debugfs_rename(dir->d_parent, dir, dir->d_parent, name);
+       if (!d)
+               pr_err("Can't rename debugfs dir to %s\n", name);
+}
+
 /**
  * batadv_debugfs_del_meshif() - Remove interface dependent debugfs entries
  * @dev: netdev struct of the soft interface
index 37b069698b04b369e68e4e8a31c3ac01575b0178..08a592ffbee5203ac4994fc49bf9c187c2e66f8e 100644 (file)
@@ -30,8 +30,10 @@ struct net_device;
 void batadv_debugfs_init(void);
 void batadv_debugfs_destroy(void);
 int batadv_debugfs_add_meshif(struct net_device *dev);
+void batadv_debugfs_rename_meshif(struct net_device *dev);
 void batadv_debugfs_del_meshif(struct net_device *dev);
 int batadv_debugfs_add_hardif(struct batadv_hard_iface *hard_iface);
+void batadv_debugfs_rename_hardif(struct batadv_hard_iface *hard_iface);
 void batadv_debugfs_del_hardif(struct batadv_hard_iface *hard_iface);
 
 #else
@@ -49,6 +51,10 @@ static inline int batadv_debugfs_add_meshif(struct net_device *dev)
        return 0;
 }
 
+static inline void batadv_debugfs_rename_meshif(struct net_device *dev)
+{
+}
+
 static inline void batadv_debugfs_del_meshif(struct net_device *dev)
 {
 }
@@ -59,6 +65,11 @@ int batadv_debugfs_add_hardif(struct batadv_hard_iface *hard_iface)
        return 0;
 }
 
+static inline
+void batadv_debugfs_rename_hardif(struct batadv_hard_iface *hard_iface)
+{
+}
+
 static inline
 void batadv_debugfs_del_hardif(struct batadv_hard_iface *hard_iface)
 {
index c405d15befd60bdabf9f50813c3bee446238d539..2f0d42f2f913e74cf10c0c6ce89320434994cac5 100644 (file)
@@ -989,6 +989,32 @@ void batadv_hardif_remove_interfaces(void)
        rtnl_unlock();
 }
 
+/**
+ * batadv_hard_if_event_softif() - Handle events for soft interfaces
+ * @event: NETDEV_* event to handle
+ * @net_dev: net_device which generated an event
+ *
+ * Return: NOTIFY_* result
+ */
+static int batadv_hard_if_event_softif(unsigned long event,
+                                      struct net_device *net_dev)
+{
+       struct batadv_priv *bat_priv;
+
+       switch (event) {
+       case NETDEV_REGISTER:
+               batadv_sysfs_add_meshif(net_dev);
+               bat_priv = netdev_priv(net_dev);
+               batadv_softif_create_vlan(bat_priv, BATADV_NO_FLAGS);
+               break;
+       case NETDEV_CHANGENAME:
+               batadv_debugfs_rename_meshif(net_dev);
+               break;
+       }
+
+       return NOTIFY_DONE;
+}
+
 static int batadv_hard_if_event(struct notifier_block *this,
                                unsigned long event, void *ptr)
 {
@@ -997,12 +1023,8 @@ static int batadv_hard_if_event(struct notifier_block *this,
        struct batadv_hard_iface *primary_if = NULL;
        struct batadv_priv *bat_priv;
 
-       if (batadv_softif_is_valid(net_dev) && event == NETDEV_REGISTER) {
-               batadv_sysfs_add_meshif(net_dev);
-               bat_priv = netdev_priv(net_dev);
-               batadv_softif_create_vlan(bat_priv, BATADV_NO_FLAGS);
-               return NOTIFY_DONE;
-       }
+       if (batadv_softif_is_valid(net_dev))
+               return batadv_hard_if_event_softif(event, net_dev);
 
        hard_iface = batadv_hardif_get_by_netdev(net_dev);
        if (!hard_iface && (event == NETDEV_REGISTER ||
@@ -1051,6 +1073,9 @@ static int batadv_hard_if_event(struct notifier_block *this,
                if (batadv_is_wifi_hardif(hard_iface))
                        hard_iface->num_bcasts = BATADV_NUM_BCASTS_WIRELESS;
                break;
+       case NETDEV_CHANGENAME:
+               batadv_debugfs_rename_hardif(hard_iface);
+               break;
        default:
                break;
        }
index 3986551397caa5ffb6ba7338eeb4769c8b8f99fb..12a2b7d21376721d15c6a31f3e794e4270d74b5c 100644 (file)
@@ -1705,7 +1705,9 @@ static bool batadv_tt_global_add(struct batadv_priv *bat_priv,
                ether_addr_copy(common->addr, tt_addr);
                common->vid = vid;
 
-               common->flags = flags;
+               if (!is_multicast_ether_addr(common->addr))
+                       common->flags = flags & (~BATADV_TT_SYNC_MASK);
+
                tt_global_entry->roam_at = 0;
                /* node must store current time in case of roaming. This is
                 * needed to purge this entry out on timeout (if nobody claims
@@ -1768,7 +1770,8 @@ static bool batadv_tt_global_add(struct batadv_priv *bat_priv,
                 * TT_CLIENT_TEMP, therefore they have to be copied in the
                 * client entry
                 */
-               common->flags |= flags & (~BATADV_TT_SYNC_MASK);
+               if (!is_multicast_ether_addr(common->addr))
+                       common->flags |= flags & (~BATADV_TT_SYNC_MASK);
 
                /* If there is the BATADV_TT_CLIENT_ROAM flag set, there is only
                 * one originator left in the list and we previously received a
index 510ab4f55df56bc1c356d5130d2dbea4be4744ff..3264e1873219bd40b8c1ccfc2ce6c40d96ca0030 100644 (file)
@@ -437,13 +437,16 @@ static inline __poll_t bt_accept_poll(struct sock *parent)
        return 0;
 }
 
-__poll_t bt_sock_poll_mask(struct socket *sock, __poll_t events)
+__poll_t bt_sock_poll(struct file *file, struct socket *sock,
+                         poll_table *wait)
 {
        struct sock *sk = sock->sk;
        __poll_t mask = 0;
 
        BT_DBG("sock %p, sk %p", sock, sk);
 
+       poll_wait(file, sk_sleep(sk), wait);
+
        if (sk->sk_state == BT_LISTEN)
                return bt_accept_poll(sk);
 
@@ -475,7 +478,7 @@ __poll_t bt_sock_poll_mask(struct socket *sock, __poll_t events)
 
        return mask;
 }
-EXPORT_SYMBOL(bt_sock_poll_mask);
+EXPORT_SYMBOL(bt_sock_poll);
 
 int bt_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
 {
index d6c0998615388d078c0910bee08784b4fac2f0c0..1506e1632394acf06e9f5873d045bd394e5b3059 100644 (file)
@@ -1975,7 +1975,7 @@ static const struct proto_ops hci_sock_ops = {
        .sendmsg        = hci_sock_sendmsg,
        .recvmsg        = hci_sock_recvmsg,
        .ioctl          = hci_sock_ioctl,
-       .poll_mask      = datagram_poll_mask,
+       .poll           = datagram_poll,
        .listen         = sock_no_listen,
        .shutdown       = sock_no_shutdown,
        .setsockopt     = hci_sock_setsockopt,
index 742a190034e6378a4be886ed730d55936c82ee27..686bdc6b35b03d1fd0965dc0fd76c5edde78c1eb 100644 (file)
@@ -1653,7 +1653,7 @@ static const struct proto_ops l2cap_sock_ops = {
        .getname        = l2cap_sock_getname,
        .sendmsg        = l2cap_sock_sendmsg,
        .recvmsg        = l2cap_sock_recvmsg,
-       .poll_mask      = bt_sock_poll_mask,
+       .poll           = bt_sock_poll,
        .ioctl          = bt_sock_ioctl,
        .mmap           = sock_no_mmap,
        .socketpair     = sock_no_socketpair,
index 1cf57622473aa70d626e1df5ad867800ab4cfe6e..d606e9212291608ea2e266238c0f65ce18d0c311 100644 (file)
@@ -1049,7 +1049,7 @@ static const struct proto_ops rfcomm_sock_ops = {
        .setsockopt     = rfcomm_sock_setsockopt,
        .getsockopt     = rfcomm_sock_getsockopt,
        .ioctl          = rfcomm_sock_ioctl,
-       .poll_mask      = bt_sock_poll_mask,
+       .poll           = bt_sock_poll,
        .socketpair     = sock_no_socketpair,
        .mmap           = sock_no_mmap
 };
index d60dbc61d170864b1393aabb0d7f7965a1e6ad17..413b8ee49feca325dea79e328c11b8ba00afbce3 100644 (file)
@@ -1197,7 +1197,7 @@ static const struct proto_ops sco_sock_ops = {
        .getname        = sco_sock_getname,
        .sendmsg        = sco_sock_sendmsg,
        .recvmsg        = sco_sock_recvmsg,
-       .poll_mask      = bt_sock_poll_mask,
+       .poll           = bt_sock_poll,
        .ioctl          = bt_sock_ioctl,
        .mmap           = sock_no_mmap,
        .socketpair     = sock_no_socketpair,
index 68c3578343b4b4d026e9df40fda98a7850757877..22a78eedf4b1447a8f42cc442615191d66ff1b99 100644 (file)
@@ -96,6 +96,7 @@ int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
        u32 size = kattr->test.data_size_in;
        u32 repeat = kattr->test.repeat;
        u32 retval, duration;
+       int hh_len = ETH_HLEN;
        struct sk_buff *skb;
        void *data;
        int ret;
@@ -131,12 +132,22 @@ int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
        skb_reset_network_header(skb);
 
        if (is_l2)
-               __skb_push(skb, ETH_HLEN);
+               __skb_push(skb, hh_len);
        if (is_direct_pkt_access)
                bpf_compute_data_pointers(skb);
        retval = bpf_test_run(prog, skb, repeat, &duration);
-       if (!is_l2)
-               __skb_push(skb, ETH_HLEN);
+       if (!is_l2) {
+               if (skb_headroom(skb) < hh_len) {
+                       int nhead = HH_DATA_ALIGN(hh_len - skb_headroom(skb));
+
+                       if (pskb_expand_head(skb, nhead, 0, GFP_USER)) {
+                               kfree_skb(skb);
+                               return -ENOMEM;
+                       }
+               }
+               memset(__skb_push(skb, hh_len), 0, hh_len);
+       }
+
        size = skb->len;
        /* bpf program can never convert linear skb to non-linear */
        if (WARN_ON_ONCE(skb_is_nonlinear(skb)))
diff --git a/net/bpfilter/.gitignore b/net/bpfilter/.gitignore
new file mode 100644 (file)
index 0000000..e97084e
--- /dev/null
@@ -0,0 +1 @@
+bpfilter_umh
index a948b072c28f36451a587a88bcb6f86c32023693..76deb661588322d9cf8ac6bdd73ba63f5d1416fc 100644 (file)
@@ -1,6 +1,5 @@
 menuconfig BPFILTER
        bool "BPF based packet filtering framework (BPFILTER)"
-       default n
        depends on NET && BPF && INET
        help
          This builds experimental bpfilter framework that is aiming to
@@ -9,6 +8,7 @@ menuconfig BPFILTER
 if BPFILTER
 config BPFILTER_UMH
        tristate "bpfilter kernel module with user mode helper"
+       depends on $(success,$(srctree)/scripts/cc-can-link.sh $(CC))
        default m
        help
          This builds bpfilter kernel module with embedded user mode helper
index e0bbe7583e58dcca5e17136d1b091ff03465b4d2..39c6980b5d9952eed1046f656d8c0a85b4a0d2d6 100644 (file)
@@ -15,18 +15,7 @@ ifeq ($(CONFIG_BPFILTER_UMH), y)
 HOSTLDFLAGS += -static
 endif
 
-# a bit of elf magic to convert bpfilter_umh binary into a binary blob
-# inside bpfilter_umh.o elf file referenced by
-# _binary_net_bpfilter_bpfilter_umh_start symbol
-# which bpfilter_kern.c passes further into umh blob loader at run-time
-quiet_cmd_copy_umh = GEN $@
-      cmd_copy_umh = echo ':' > $(obj)/.bpfilter_umh.o.cmd; \
-      $(OBJCOPY) -I binary -O `$(OBJDUMP) -f $<|grep format|cut -d' ' -f8` \
-      -B `$(OBJDUMP) -f $<|grep architecture|cut -d, -f1|cut -d' ' -f2` \
-      --rename-section .data=.init.rodata $< $@
-
-$(obj)/bpfilter_umh.o: $(obj)/bpfilter_umh
-       $(call cmd,copy_umh)
+$(obj)/bpfilter_umh_blob.o: $(obj)/bpfilter_umh
 
 obj-$(CONFIG_BPFILTER_UMH) += bpfilter.o
-bpfilter-objs += bpfilter_kern.o bpfilter_umh.o
+bpfilter-objs += bpfilter_kern.o bpfilter_umh_blob.o
index 09522573f611b01ba5fb4d52125e8264d9147f20..f0fc182d3db77eb311d91f7faef4e8a6f85886b3 100644 (file)
 #include <linux/file.h>
 #include "msgfmt.h"
 
-#define UMH_start _binary_net_bpfilter_bpfilter_umh_start
-#define UMH_end _binary_net_bpfilter_bpfilter_umh_end
-
-extern char UMH_start;
-extern char UMH_end;
+extern char bpfilter_umh_start;
+extern char bpfilter_umh_end;
 
 static struct umh_info info;
 /* since ip_getsockopt() can run in parallel, serialize access to umh */
@@ -93,7 +90,9 @@ static int __init load_umh(void)
        int err;
 
        /* fork usermode process */
-       err = fork_usermode_blob(&UMH_start, &UMH_end - &UMH_start, &info);
+       err = fork_usermode_blob(&bpfilter_umh_start,
+                                &bpfilter_umh_end - &bpfilter_umh_start,
+                                &info);
        if (err)
                return err;
        pr_info("Loaded bpfilter_umh pid %d\n", info.pid);
diff --git a/net/bpfilter/bpfilter_umh_blob.S b/net/bpfilter/bpfilter_umh_blob.S
new file mode 100644 (file)
index 0000000..40311d1
--- /dev/null
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+       .section .init.rodata, "a"
+       .global bpfilter_umh_start
+bpfilter_umh_start:
+       .incbin "net/bpfilter/bpfilter_umh"
+       .global bpfilter_umh_end
+bpfilter_umh_end:
index e0adcd123f48a1a4f66028bbf731132ed8e7ff17..711d7156efd8bc94b449d0e8066eedd4fe0d5747 100644 (file)
@@ -131,8 +131,10 @@ static void caif_flow_cb(struct sk_buff *skb)
        caifd = caif_get(skb->dev);
 
        WARN_ON(caifd == NULL);
-       if (caifd == NULL)
+       if (!caifd) {
+               rcu_read_unlock();
                return;
+       }
 
        caifd_hold(caifd);
        rcu_read_unlock();
index c7991867d62273f48bb55e88774b573e81f40536..a6fb1b3bcad9b2f3c1c24b2a3496ad21b07c69d9 100644 (file)
@@ -934,11 +934,15 @@ static int caif_release(struct socket *sock)
 }
 
 /* Copied from af_unix.c:unix_poll(), added CAIF tx_flow handling */
-static __poll_t caif_poll_mask(struct socket *sock, __poll_t events)
+static __poll_t caif_poll(struct file *file,
+                             struct socket *sock, poll_table *wait)
 {
        struct sock *sk = sock->sk;
+       __poll_t mask;
        struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
-       __poll_t mask = 0;
+
+       sock_poll_wait(file, sk_sleep(sk), wait);
+       mask = 0;
 
        /* exceptional events? */
        if (sk->sk_err)
@@ -972,7 +976,7 @@ static const struct proto_ops caif_seqpacket_ops = {
        .socketpair = sock_no_socketpair,
        .accept = sock_no_accept,
        .getname = sock_no_getname,
-       .poll_mask = caif_poll_mask,
+       .poll = caif_poll,
        .ioctl = sock_no_ioctl,
        .listen = sock_no_listen,
        .shutdown = sock_no_shutdown,
@@ -993,7 +997,7 @@ static const struct proto_ops caif_stream_ops = {
        .socketpair = sock_no_socketpair,
        .accept = sock_no_accept,
        .getname = sock_no_getname,
-       .poll_mask = caif_poll_mask,
+       .poll = caif_poll,
        .ioctl = sock_no_ioctl,
        .listen = sock_no_listen,
        .shutdown = sock_no_shutdown,
index 9393f25df08d3fce299aaa463efd79244e6527e9..0af8f0db892a3311fb5a1a898ab0bff5696adf00 100644 (file)
@@ -1660,7 +1660,7 @@ static const struct proto_ops bcm_ops = {
        .socketpair    = sock_no_socketpair,
        .accept        = sock_no_accept,
        .getname       = sock_no_getname,
-       .poll_mask     = datagram_poll_mask,
+       .poll          = datagram_poll,
        .ioctl         = can_ioctl,     /* use can_ioctl() from af_can.c */
        .listen        = sock_no_listen,
        .shutdown      = sock_no_shutdown,
index fd7e2f49ea6a20b79c43bf50c72d2b1e8b48d260..1051eee8258184f33d15a6142ee8b387839c9adc 100644 (file)
@@ -843,7 +843,7 @@ static const struct proto_ops raw_ops = {
        .socketpair    = sock_no_socketpair,
        .accept        = sock_no_accept,
        .getname       = raw_getname,
-       .poll_mask     = datagram_poll_mask,
+       .poll          = datagram_poll,
        .ioctl         = can_ioctl,     /* use can_ioctl() from af_can.c */
        .listen        = sock_no_listen,
        .shutdown      = sock_no_shutdown,
index f19bf3dc2bd6ea02cb828a95d0b91322ac8b0004..9938952c5c78f1e72ef13f44517ef054a60205b2 100644 (file)
@@ -819,8 +819,9 @@ EXPORT_SYMBOL(skb_copy_and_csum_datagram_msg);
 
 /**
  *     datagram_poll - generic datagram poll
+ *     @file: file struct
  *     @sock: socket
- *     @events to wait for
+ *     @wait: poll table
  *
  *     Datagram poll: Again totally generic. This also handles
  *     sequenced packet sockets providing the socket receive queue
@@ -830,10 +831,14 @@ EXPORT_SYMBOL(skb_copy_and_csum_datagram_msg);
  *     and you use a different write policy from sock_writeable()
  *     then please supply your own write_space callback.
  */
-__poll_t datagram_poll_mask(struct socket *sock, __poll_t events)
+__poll_t datagram_poll(struct file *file, struct socket *sock,
+                          poll_table *wait)
 {
        struct sock *sk = sock->sk;
-       __poll_t mask = 0;
+       __poll_t mask;
+
+       sock_poll_wait(file, sk_sleep(sk), wait);
+       mask = 0;
 
        /* exceptional events? */
        if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
@@ -866,4 +871,4 @@ __poll_t datagram_poll_mask(struct socket *sock, __poll_t events)
 
        return mask;
 }
-EXPORT_SYMBOL(datagram_poll_mask);
+EXPORT_SYMBOL(datagram_poll);
index 57b7bab5f70bb7c50a8be565cc90a40bc1c2d5d6..559a91271f82d09ae73e7026707e9df20033b361 100644 (file)
@@ -7149,16 +7149,19 @@ int dev_change_tx_queue_len(struct net_device *dev, unsigned long new_len)
                dev->tx_queue_len = new_len;
                res = call_netdevice_notifiers(NETDEV_CHANGE_TX_QUEUE_LEN, dev);
                res = notifier_to_errno(res);
-               if (res) {
-                       netdev_err(dev,
-                                  "refused to change device tx_queue_len\n");
-                       dev->tx_queue_len = orig_len;
-                       return res;
-               }
-               return dev_qdisc_change_tx_queue_len(dev);
+               if (res)
+                       goto err_rollback;
+               res = dev_qdisc_change_tx_queue_len(dev);
+               if (res)
+                       goto err_rollback;
        }
 
        return 0;
+
+err_rollback:
+       netdev_err(dev, "refused to change device tx_queue_len\n");
+       dev->tx_queue_len = orig_len;
+       return res;
 }
 
 /**
@@ -8643,7 +8646,8 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char
                /* We get here if we can't use the current device name */
                if (!pat)
                        goto out;
-               if (dev_get_valid_name(net, dev, pat) < 0)
+               err = dev_get_valid_name(net, dev, pat);
+               if (err < 0)
                        goto out;
        }
 
@@ -8655,7 +8659,6 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char
        dev_close(dev);
 
        /* And unlink it from device chain */
-       err = -ENODEV;
        unlist_netdevice(dev);
 
        synchronize_net();
index a04e1e88bf3ab49340d788589c365aaf45d9d3e2..50537ff961a722e18731b7b9671deb739bfce847 100644 (file)
@@ -285,16 +285,9 @@ static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
                if (ifr->ifr_qlen < 0)
                        return -EINVAL;
                if (dev->tx_queue_len ^ ifr->ifr_qlen) {
-                       unsigned int orig_len = dev->tx_queue_len;
-
-                       dev->tx_queue_len = ifr->ifr_qlen;
-                       err = call_netdevice_notifiers(
-                                       NETDEV_CHANGE_TX_QUEUE_LEN, dev);
-                       err = notifier_to_errno(err);
-                       if (err) {
-                               dev->tx_queue_len = orig_len;
+                       err = dev_change_tx_queue_len(dev, ifr->ifr_qlen);
+                       if (err)
                                return err;
-                       }
                }
                return 0;
 
index 126ffc5bc630cb412e4bcf1a48869ec6711fda54..f64aa13811eaeedf8f0040bc9f993ad9e1661eca 100644 (file)
@@ -416,6 +416,14 @@ static struct fib_rule *rule_find(struct fib_rules_ops *ops,
                if (rule->mark && r->mark != rule->mark)
                        continue;
 
+               if (rule->suppress_ifgroup != -1 &&
+                   r->suppress_ifgroup != rule->suppress_ifgroup)
+                       continue;
+
+               if (rule->suppress_prefixlen != -1 &&
+                   r->suppress_prefixlen != rule->suppress_prefixlen)
+                       continue;
+
                if (rule->mark_mask && r->mark_mask != rule->mark_mask)
                        continue;
 
@@ -436,6 +444,9 @@ static struct fib_rule *rule_find(struct fib_rules_ops *ops,
                if (rule->ip_proto && r->ip_proto != rule->ip_proto)
                        continue;
 
+               if (rule->proto && r->proto != rule->proto)
+                       continue;
+
                if (fib_rule_port_range_set(&rule->sport_range) &&
                    !fib_rule_port_range_compare(&r->sport_range,
                                                 &rule->sport_range))
@@ -645,6 +656,73 @@ errout:
        return err;
 }
 
+static int rule_exists(struct fib_rules_ops *ops, struct fib_rule_hdr *frh,
+                      struct nlattr **tb, struct fib_rule *rule)
+{
+       struct fib_rule *r;
+
+       list_for_each_entry(r, &ops->rules_list, list) {
+               if (r->action != rule->action)
+                       continue;
+
+               if (r->table != rule->table)
+                       continue;
+
+               if (r->pref != rule->pref)
+                       continue;
+
+               if (memcmp(r->iifname, rule->iifname, IFNAMSIZ))
+                       continue;
+
+               if (memcmp(r->oifname, rule->oifname, IFNAMSIZ))
+                       continue;
+
+               if (r->mark != rule->mark)
+                       continue;
+
+               if (r->suppress_ifgroup != rule->suppress_ifgroup)
+                       continue;
+
+               if (r->suppress_prefixlen != rule->suppress_prefixlen)
+                       continue;
+
+               if (r->mark_mask != rule->mark_mask)
+                       continue;
+
+               if (r->tun_id != rule->tun_id)
+                       continue;
+
+               if (r->fr_net != rule->fr_net)
+                       continue;
+
+               if (r->l3mdev != rule->l3mdev)
+                       continue;
+
+               if (!uid_eq(r->uid_range.start, rule->uid_range.start) ||
+                   !uid_eq(r->uid_range.end, rule->uid_range.end))
+                       continue;
+
+               if (r->ip_proto != rule->ip_proto)
+                       continue;
+
+               if (r->proto != rule->proto)
+                       continue;
+
+               if (!fib_rule_port_range_compare(&r->sport_range,
+                                                &rule->sport_range))
+                       continue;
+
+               if (!fib_rule_port_range_compare(&r->dport_range,
+                                                &rule->dport_range))
+                       continue;
+
+               if (!ops->compare(r, frh, tb))
+                       continue;
+               return 1;
+       }
+       return 0;
+}
+
 int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr *nlh,
                   struct netlink_ext_ack *extack)
 {
@@ -679,7 +757,7 @@ int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr *nlh,
                goto errout;
 
        if ((nlh->nlmsg_flags & NLM_F_EXCL) &&
-           rule_find(ops, frh, tb, rule, user_priority)) {
+           rule_exists(ops, frh, tb, rule)) {
                err = -EEXIST;
                goto errout_free;
        }
index 3d9ba7e5965adc4658b379a0cf55ff2f22f4b94d..9dfd145eedcc3367f8356ed9426fdf6124104583 100644 (file)
@@ -459,11 +459,21 @@ static bool convert_bpf_ld_abs(struct sock_filter *fp, struct bpf_insn **insnp)
             (!unaligned_ok && offset >= 0 &&
              offset + ip_align >= 0 &&
              offset + ip_align % size == 0))) {
+               bool ldx_off_ok = offset <= S16_MAX;
+
                *insn++ = BPF_MOV64_REG(BPF_REG_TMP, BPF_REG_H);
                *insn++ = BPF_ALU64_IMM(BPF_SUB, BPF_REG_TMP, offset);
-               *insn++ = BPF_JMP_IMM(BPF_JSLT, BPF_REG_TMP, size, 2 + endian);
-               *insn++ = BPF_LDX_MEM(BPF_SIZE(fp->code), BPF_REG_A, BPF_REG_D,
-                                     offset);
+               *insn++ = BPF_JMP_IMM(BPF_JSLT, BPF_REG_TMP,
+                                     size, 2 + endian + (!ldx_off_ok * 2));
+               if (ldx_off_ok) {
+                       *insn++ = BPF_LDX_MEM(BPF_SIZE(fp->code), BPF_REG_A,
+                                             BPF_REG_D, offset);
+               } else {
+                       *insn++ = BPF_MOV64_REG(BPF_REG_TMP, BPF_REG_D);
+                       *insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_TMP, offset);
+                       *insn++ = BPF_LDX_MEM(BPF_SIZE(fp->code), BPF_REG_A,
+                                             BPF_REG_TMP, 0);
+               }
                if (endian)
                        *insn++ = BPF_ENDIAN(BPF_FROM_BE, BPF_REG_A, size * 8);
                *insn++ = BPF_JMP_A(8);
@@ -1702,24 +1712,26 @@ static const struct bpf_func_proto bpf_skb_load_bytes_proto = {
 BPF_CALL_5(bpf_skb_load_bytes_relative, const struct sk_buff *, skb,
           u32, offset, void *, to, u32, len, u32, start_header)
 {
+       u8 *end = skb_tail_pointer(skb);
+       u8 *net = skb_network_header(skb);
+       u8 *mac = skb_mac_header(skb);
        u8 *ptr;
 
-       if (unlikely(offset > 0xffff || len > skb_headlen(skb)))
+       if (unlikely(offset > 0xffff || len > (end - mac)))
                goto err_clear;
 
        switch (start_header) {
        case BPF_HDR_START_MAC:
-               ptr = skb_mac_header(skb) + offset;
+               ptr = mac + offset;
                break;
        case BPF_HDR_START_NET:
-               ptr = skb_network_header(skb) + offset;
+               ptr = net + offset;
                break;
        default:
                goto err_clear;
        }
 
-       if (likely(ptr >= skb_mac_header(skb) &&
-                  ptr + len <= skb_tail_pointer(skb))) {
+       if (likely(ptr >= mac && ptr + len <= end)) {
                memcpy(to, ptr, len);
                return 0;
        }
@@ -1762,6 +1774,37 @@ static const struct bpf_func_proto bpf_skb_pull_data_proto = {
        .arg2_type      = ARG_ANYTHING,
 };
 
+static inline int sk_skb_try_make_writable(struct sk_buff *skb,
+                                          unsigned int write_len)
+{
+       int err = __bpf_try_make_writable(skb, write_len);
+
+       bpf_compute_data_end_sk_skb(skb);
+       return err;
+}
+
+BPF_CALL_2(sk_skb_pull_data, struct sk_buff *, skb, u32, len)
+{
+       /* Idea is the following: should the needed direct read/write
+        * test fail during runtime, we can pull in more data and redo
+        * again, since implicitly, we invalidate previous checks here.
+        *
+        * Or, since we know how much we need to make read/writeable,
+        * this can be done once at the program beginning for direct
+        * access case. By this we overcome limitations of only current
+        * headroom being accessible.
+        */
+       return sk_skb_try_make_writable(skb, len ? : skb_headlen(skb));
+}
+
+static const struct bpf_func_proto sk_skb_pull_data_proto = {
+       .func           = sk_skb_pull_data,
+       .gpl_only       = false,
+       .ret_type       = RET_INTEGER,
+       .arg1_type      = ARG_PTR_TO_CTX,
+       .arg2_type      = ARG_ANYTHING,
+};
+
 BPF_CALL_5(bpf_l3_csum_replace, struct sk_buff *, skb, u32, offset,
           u64, from, u64, to, u64, flags)
 {
@@ -2779,7 +2822,8 @@ static int bpf_skb_net_shrink(struct sk_buff *skb, u32 len_diff)
 
 static u32 __bpf_skb_max_len(const struct sk_buff *skb)
 {
-       return skb->dev->mtu + skb->dev->hard_header_len;
+       return skb->dev ? skb->dev->mtu + skb->dev->hard_header_len :
+                         SKB_MAX_ALLOC;
 }
 
 static int bpf_skb_adjust_net(struct sk_buff *skb, s32 len_diff)
@@ -2863,8 +2907,8 @@ static int bpf_skb_trim_rcsum(struct sk_buff *skb, unsigned int new_len)
        return __skb_trim_rcsum(skb, new_len);
 }
 
-BPF_CALL_3(bpf_skb_change_tail, struct sk_buff *, skb, u32, new_len,
-          u64, flags)
+static inline int __bpf_skb_change_tail(struct sk_buff *skb, u32 new_len,
+                                       u64 flags)
 {
        u32 max_len = __bpf_skb_max_len(skb);
        u32 min_len = __bpf_skb_min_len(skb);
@@ -2900,6 +2944,13 @@ BPF_CALL_3(bpf_skb_change_tail, struct sk_buff *, skb, u32, new_len,
                if (!ret && skb_is_gso(skb))
                        skb_gso_reset(skb);
        }
+       return ret;
+}
+
+BPF_CALL_3(bpf_skb_change_tail, struct sk_buff *, skb, u32, new_len,
+          u64, flags)
+{
+       int ret = __bpf_skb_change_tail(skb, new_len, flags);
 
        bpf_compute_data_pointers(skb);
        return ret;
@@ -2914,8 +2965,26 @@ static const struct bpf_func_proto bpf_skb_change_tail_proto = {
        .arg3_type      = ARG_ANYTHING,
 };
 
-BPF_CALL_3(bpf_skb_change_head, struct sk_buff *, skb, u32, head_room,
+BPF_CALL_3(sk_skb_change_tail, struct sk_buff *, skb, u32, new_len,
           u64, flags)
+{
+       int ret = __bpf_skb_change_tail(skb, new_len, flags);
+
+       bpf_compute_data_end_sk_skb(skb);
+       return ret;
+}
+
+static const struct bpf_func_proto sk_skb_change_tail_proto = {
+       .func           = sk_skb_change_tail,
+       .gpl_only       = false,
+       .ret_type       = RET_INTEGER,
+       .arg1_type      = ARG_PTR_TO_CTX,
+       .arg2_type      = ARG_ANYTHING,
+       .arg3_type      = ARG_ANYTHING,
+};
+
+static inline int __bpf_skb_change_head(struct sk_buff *skb, u32 head_room,
+                                       u64 flags)
 {
        u32 max_len = __bpf_skb_max_len(skb);
        u32 new_len = skb->len + head_room;
@@ -2941,8 +3010,16 @@ BPF_CALL_3(bpf_skb_change_head, struct sk_buff *, skb, u32, head_room,
                skb_reset_mac_header(skb);
        }
 
+       return ret;
+}
+
+BPF_CALL_3(bpf_skb_change_head, struct sk_buff *, skb, u32, head_room,
+          u64, flags)
+{
+       int ret = __bpf_skb_change_head(skb, head_room, flags);
+
        bpf_compute_data_pointers(skb);
-       return 0;
+       return ret;
 }
 
 static const struct bpf_func_proto bpf_skb_change_head_proto = {
@@ -2954,6 +3031,23 @@ static const struct bpf_func_proto bpf_skb_change_head_proto = {
        .arg3_type      = ARG_ANYTHING,
 };
 
+BPF_CALL_3(sk_skb_change_head, struct sk_buff *, skb, u32, head_room,
+          u64, flags)
+{
+       int ret = __bpf_skb_change_head(skb, head_room, flags);
+
+       bpf_compute_data_end_sk_skb(skb);
+       return ret;
+}
+
+static const struct bpf_func_proto sk_skb_change_head_proto = {
+       .func           = sk_skb_change_head,
+       .gpl_only       = false,
+       .ret_type       = RET_INTEGER,
+       .arg1_type      = ARG_PTR_TO_CTX,
+       .arg2_type      = ARG_ANYTHING,
+       .arg3_type      = ARG_ANYTHING,
+};
 static unsigned long xdp_get_metalen(const struct xdp_buff *xdp)
 {
        return xdp_data_meta_unsupported(xdp) ? 0 :
@@ -3046,12 +3140,16 @@ static int __bpf_tx_xdp(struct net_device *dev,
                        u32 index)
 {
        struct xdp_frame *xdpf;
-       int sent;
+       int err, sent;
 
        if (!dev->netdev_ops->ndo_xdp_xmit) {
                return -EOPNOTSUPP;
        }
 
+       err = xdp_ok_fwd_dev(dev, xdp->data_end - xdp->data);
+       if (unlikely(err))
+               return err;
+
        xdpf = convert_to_xdp_frame(xdp);
        if (unlikely(!xdpf))
                return -EOVERFLOW;
@@ -3214,20 +3312,6 @@ err:
 }
 EXPORT_SYMBOL_GPL(xdp_do_redirect);
 
-static int __xdp_generic_ok_fwd_dev(struct sk_buff *skb, struct net_device *fwd)
-{
-       unsigned int len;
-
-       if (unlikely(!(fwd->flags & IFF_UP)))
-               return -ENETDOWN;
-
-       len = fwd->mtu + fwd->hard_header_len + VLAN_HLEN;
-       if (skb->len > len)
-               return -EMSGSIZE;
-
-       return 0;
-}
-
 static int xdp_do_generic_redirect_map(struct net_device *dev,
                                       struct sk_buff *skb,
                                       struct xdp_buff *xdp,
@@ -3256,10 +3340,11 @@ static int xdp_do_generic_redirect_map(struct net_device *dev,
        }
 
        if (map->map_type == BPF_MAP_TYPE_DEVMAP) {
-               if (unlikely((err = __xdp_generic_ok_fwd_dev(skb, fwd))))
+               struct bpf_dtab_netdev *dst = fwd;
+
+               err = dev_map_generic_redirect(dst, skb, xdp_prog);
+               if (unlikely(err))
                        goto err;
-               skb->dev = fwd;
-               generic_xdp_tx(skb, xdp_prog);
        } else if (map->map_type == BPF_MAP_TYPE_XSKMAP) {
                struct xdp_sock *xs = fwd;
 
@@ -3298,7 +3383,8 @@ int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb,
                goto err;
        }
 
-       if (unlikely((err = __xdp_generic_ok_fwd_dev(skb, fwd))))
+       err = xdp_ok_fwd_dev(fwd, skb->len);
+       if (unlikely(err))
                goto err;
 
        skb->dev = fwd;
@@ -4086,8 +4172,9 @@ static int bpf_fib_set_fwd_params(struct bpf_fib_lookup *params,
        memcpy(params->smac, dev->dev_addr, ETH_ALEN);
        params->h_vlan_TCI = 0;
        params->h_vlan_proto = 0;
+       params->ifindex = dev->ifindex;
 
-       return dev->ifindex;
+       return 0;
 }
 #endif
 
@@ -4111,7 +4198,7 @@ static int bpf_ipv4_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
        /* verify forwarding is enabled on this interface */
        in_dev = __in_dev_get_rcu(dev);
        if (unlikely(!in_dev || !IN_DEV_FORWARD(in_dev)))
-               return 0;
+               return BPF_FIB_LKUP_RET_FWD_DISABLED;
 
        if (flags & BPF_FIB_LOOKUP_OUTPUT) {
                fl4.flowi4_iif = 1;
@@ -4136,7 +4223,7 @@ static int bpf_ipv4_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
 
                tb = fib_get_table(net, tbid);
                if (unlikely(!tb))
-                       return 0;
+                       return BPF_FIB_LKUP_RET_NOT_FWDED;
 
                err = fib_table_lookup(tb, &fl4, &res, FIB_LOOKUP_NOREF);
        } else {
@@ -4148,8 +4235,20 @@ static int bpf_ipv4_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
                err = fib_lookup(net, &fl4, &res, FIB_LOOKUP_NOREF);
        }
 
-       if (err || res.type != RTN_UNICAST)
-               return 0;
+       if (err) {
+               /* map fib lookup errors to RTN_ type */
+               if (err == -EINVAL)
+                       return BPF_FIB_LKUP_RET_BLACKHOLE;
+               if (err == -EHOSTUNREACH)
+                       return BPF_FIB_LKUP_RET_UNREACHABLE;
+               if (err == -EACCES)
+                       return BPF_FIB_LKUP_RET_PROHIBIT;
+
+               return BPF_FIB_LKUP_RET_NOT_FWDED;
+       }
+
+       if (res.type != RTN_UNICAST)
+               return BPF_FIB_LKUP_RET_NOT_FWDED;
 
        if (res.fi->fib_nhs > 1)
                fib_select_path(net, &res, &fl4, NULL);
@@ -4157,19 +4256,16 @@ static int bpf_ipv4_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
        if (check_mtu) {
                mtu = ip_mtu_from_fib_result(&res, params->ipv4_dst);
                if (params->tot_len > mtu)
-                       return 0;
+                       return BPF_FIB_LKUP_RET_FRAG_NEEDED;
        }
 
        nh = &res.fi->fib_nh[res.nh_sel];
 
        /* do not handle lwt encaps right now */
        if (nh->nh_lwtstate)
-               return 0;
+               return BPF_FIB_LKUP_RET_UNSUPP_LWT;
 
        dev = nh->nh_dev;
-       if (unlikely(!dev))
-               return 0;
-
        if (nh->nh_gw)
                params->ipv4_dst = nh->nh_gw;
 
@@ -4179,10 +4275,10 @@ static int bpf_ipv4_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
         * rcu_read_lock_bh is not needed here
         */
        neigh = __ipv4_neigh_lookup_noref(dev, (__force u32)params->ipv4_dst);
-       if (neigh)
-               return bpf_fib_set_fwd_params(params, neigh, dev);
+       if (!neigh)
+               return BPF_FIB_LKUP_RET_NO_NEIGH;
 
-       return 0;
+       return bpf_fib_set_fwd_params(params, neigh, dev);
 }
 #endif
 
@@ -4203,7 +4299,7 @@ static int bpf_ipv6_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
 
        /* link local addresses are never forwarded */
        if (rt6_need_strict(dst) || rt6_need_strict(src))
-               return 0;
+               return BPF_FIB_LKUP_RET_NOT_FWDED;
 
        dev = dev_get_by_index_rcu(net, params->ifindex);
        if (unlikely(!dev))
@@ -4211,7 +4307,7 @@ static int bpf_ipv6_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
 
        idev = __in6_dev_get_safely(dev);
        if (unlikely(!idev || !net->ipv6.devconf_all->forwarding))
-               return 0;
+               return BPF_FIB_LKUP_RET_FWD_DISABLED;
 
        if (flags & BPF_FIB_LOOKUP_OUTPUT) {
                fl6.flowi6_iif = 1;
@@ -4238,7 +4334,7 @@ static int bpf_ipv6_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
 
                tb = ipv6_stub->fib6_get_table(net, tbid);
                if (unlikely(!tb))
-                       return 0;
+                       return BPF_FIB_LKUP_RET_NOT_FWDED;
 
                f6i = ipv6_stub->fib6_table_lookup(net, tb, oif, &fl6, strict);
        } else {
@@ -4251,11 +4347,23 @@ static int bpf_ipv6_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
        }
 
        if (unlikely(IS_ERR_OR_NULL(f6i) || f6i == net->ipv6.fib6_null_entry))
-               return 0;
+               return BPF_FIB_LKUP_RET_NOT_FWDED;
+
+       if (unlikely(f6i->fib6_flags & RTF_REJECT)) {
+               switch (f6i->fib6_type) {
+               case RTN_BLACKHOLE:
+                       return BPF_FIB_LKUP_RET_BLACKHOLE;
+               case RTN_UNREACHABLE:
+                       return BPF_FIB_LKUP_RET_UNREACHABLE;
+               case RTN_PROHIBIT:
+                       return BPF_FIB_LKUP_RET_PROHIBIT;
+               default:
+                       return BPF_FIB_LKUP_RET_NOT_FWDED;
+               }
+       }
 
-       if (unlikely(f6i->fib6_flags & RTF_REJECT ||
-           f6i->fib6_type != RTN_UNICAST))
-               return 0;
+       if (f6i->fib6_type != RTN_UNICAST)
+               return BPF_FIB_LKUP_RET_NOT_FWDED;
 
        if (f6i->fib6_nsiblings && fl6.flowi6_oif == 0)
                f6i = ipv6_stub->fib6_multipath_select(net, f6i, &fl6,
@@ -4265,11 +4373,11 @@ static int bpf_ipv6_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
        if (check_mtu) {
                mtu = ipv6_stub->ip6_mtu_from_fib6(f6i, dst, src);
                if (params->tot_len > mtu)
-                       return 0;
+                       return BPF_FIB_LKUP_RET_FRAG_NEEDED;
        }
 
        if (f6i->fib6_nh.nh_lwtstate)
-               return 0;
+               return BPF_FIB_LKUP_RET_UNSUPP_LWT;
 
        if (f6i->fib6_flags & RTF_GATEWAY)
                *dst = f6i->fib6_nh.nh_gw;
@@ -4283,10 +4391,10 @@ static int bpf_ipv6_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
         */
        neigh = ___neigh_lookup_noref(ipv6_stub->nd_tbl, neigh_key_eq128,
                                      ndisc_hashfn, dst, dev);
-       if (neigh)
-               return bpf_fib_set_fwd_params(params, neigh, dev);
+       if (!neigh)
+               return BPF_FIB_LKUP_RET_NO_NEIGH;
 
-       return 0;
+       return bpf_fib_set_fwd_params(params, neigh, dev);
 }
 #endif
 
@@ -4328,7 +4436,7 @@ BPF_CALL_4(bpf_skb_fib_lookup, struct sk_buff *, skb,
           struct bpf_fib_lookup *, params, int, plen, u32, flags)
 {
        struct net *net = dev_net(skb->dev);
-       int index = -EAFNOSUPPORT;
+       int rc = -EAFNOSUPPORT;
 
        if (plen < sizeof(*params))
                return -EINVAL;
@@ -4339,25 +4447,25 @@ BPF_CALL_4(bpf_skb_fib_lookup, struct sk_buff *, skb,
        switch (params->family) {
 #if IS_ENABLED(CONFIG_INET)
        case AF_INET:
-               index = bpf_ipv4_fib_lookup(net, params, flags, false);
+               rc = bpf_ipv4_fib_lookup(net, params, flags, false);
                break;
 #endif
 #if IS_ENABLED(CONFIG_IPV6)
        case AF_INET6:
-               index = bpf_ipv6_fib_lookup(net, params, flags, false);
+               rc = bpf_ipv6_fib_lookup(net, params, flags, false);
                break;
 #endif
        }
 
-       if (index > 0) {
+       if (!rc) {
                struct net_device *dev;
 
-               dev = dev_get_by_index_rcu(net, index);
+               dev = dev_get_by_index_rcu(net, params->ifindex);
                if (!is_skb_forwardable(dev, skb))
-                       index = 0;
+                       rc = BPF_FIB_LKUP_RET_FRAG_NEEDED;
        }
 
-       return index;
+       return rc;
 }
 
 static const struct bpf_func_proto bpf_skb_fib_lookup_proto = {
@@ -4430,10 +4538,10 @@ static const struct bpf_func_proto bpf_lwt_push_encap_proto = {
        .arg4_type      = ARG_CONST_SIZE
 };
 
+#if IS_ENABLED(CONFIG_IPV6_SEG6_BPF)
 BPF_CALL_4(bpf_lwt_seg6_store_bytes, struct sk_buff *, skb, u32, offset,
           const void *, from, u32, len)
 {
-#if IS_ENABLED(CONFIG_IPV6_SEG6_BPF)
        struct seg6_bpf_srh_state *srh_state =
                this_cpu_ptr(&seg6_bpf_srh_states);
        void *srh_tlvs, *srh_end, *ptr;
@@ -4459,9 +4567,6 @@ BPF_CALL_4(bpf_lwt_seg6_store_bytes, struct sk_buff *, skb, u32, offset,
 
        memcpy(skb->data + offset, from, len);
        return 0;
-#else /* CONFIG_IPV6_SEG6_BPF */
-       return -EOPNOTSUPP;
-#endif
 }
 
 static const struct bpf_func_proto bpf_lwt_seg6_store_bytes_proto = {
@@ -4477,7 +4582,6 @@ static const struct bpf_func_proto bpf_lwt_seg6_store_bytes_proto = {
 BPF_CALL_4(bpf_lwt_seg6_action, struct sk_buff *, skb,
           u32, action, void *, param, u32, param_len)
 {
-#if IS_ENABLED(CONFIG_IPV6_SEG6_BPF)
        struct seg6_bpf_srh_state *srh_state =
                this_cpu_ptr(&seg6_bpf_srh_states);
        struct ipv6_sr_hdr *srh;
@@ -4525,9 +4629,6 @@ BPF_CALL_4(bpf_lwt_seg6_action, struct sk_buff *, skb,
        default:
                return -EINVAL;
        }
-#else /* CONFIG_IPV6_SEG6_BPF */
-       return -EOPNOTSUPP;
-#endif
 }
 
 static const struct bpf_func_proto bpf_lwt_seg6_action_proto = {
@@ -4543,7 +4644,6 @@ static const struct bpf_func_proto bpf_lwt_seg6_action_proto = {
 BPF_CALL_3(bpf_lwt_seg6_adjust_srh, struct sk_buff *, skb, u32, offset,
           s32, len)
 {
-#if IS_ENABLED(CONFIG_IPV6_SEG6_BPF)
        struct seg6_bpf_srh_state *srh_state =
                this_cpu_ptr(&seg6_bpf_srh_states);
        void *srh_end, *srh_tlvs, *ptr;
@@ -4587,9 +4687,6 @@ BPF_CALL_3(bpf_lwt_seg6_adjust_srh, struct sk_buff *, skb, u32, offset,
        srh_state->hdrlen += len;
        srh_state->valid = 0;
        return 0;
-#else /* CONFIG_IPV6_SEG6_BPF */
-       return -EOPNOTSUPP;
-#endif
 }
 
 static const struct bpf_func_proto bpf_lwt_seg6_adjust_srh_proto = {
@@ -4600,6 +4697,7 @@ static const struct bpf_func_proto bpf_lwt_seg6_adjust_srh_proto = {
        .arg2_type      = ARG_ANYTHING,
        .arg3_type      = ARG_ANYTHING,
 };
+#endif /* CONFIG_IPV6_SEG6_BPF */
 
 bool bpf_helper_changes_pkt_data(void *func)
 {
@@ -4608,9 +4706,12 @@ bool bpf_helper_changes_pkt_data(void *func)
            func == bpf_skb_store_bytes ||
            func == bpf_skb_change_proto ||
            func == bpf_skb_change_head ||
+           func == sk_skb_change_head ||
            func == bpf_skb_change_tail ||
+           func == sk_skb_change_tail ||
            func == bpf_skb_adjust_room ||
            func == bpf_skb_pull_data ||
+           func == sk_skb_pull_data ||
            func == bpf_clone_redirect ||
            func == bpf_l3_csum_replace ||
            func == bpf_l4_csum_replace ||
@@ -4618,11 +4719,12 @@ bool bpf_helper_changes_pkt_data(void *func)
            func == bpf_xdp_adjust_meta ||
            func == bpf_msg_pull_data ||
            func == bpf_xdp_adjust_tail ||
-           func == bpf_lwt_push_encap ||
+#if IS_ENABLED(CONFIG_IPV6_SEG6_BPF)
            func == bpf_lwt_seg6_store_bytes ||
            func == bpf_lwt_seg6_adjust_srh ||
-           func == bpf_lwt_seg6_action
-           )
+           func == bpf_lwt_seg6_action ||
+#endif
+           func == bpf_lwt_push_encap)
                return true;
 
        return false;
@@ -4862,11 +4964,11 @@ sk_skb_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
        case BPF_FUNC_skb_load_bytes:
                return &bpf_skb_load_bytes_proto;
        case BPF_FUNC_skb_pull_data:
-               return &bpf_skb_pull_data_proto;
+               return &sk_skb_pull_data_proto;
        case BPF_FUNC_skb_change_tail:
-               return &bpf_skb_change_tail_proto;
+               return &sk_skb_change_tail_proto;
        case BPF_FUNC_skb_change_head:
-               return &bpf_skb_change_head_proto;
+               return &sk_skb_change_head_proto;
        case BPF_FUNC_get_socket_cookie:
                return &bpf_get_socket_cookie_proto;
        case BPF_FUNC_get_socket_uid:
@@ -4957,12 +5059,14 @@ static const struct bpf_func_proto *
 lwt_seg6local_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
 {
        switch (func_id) {
+#if IS_ENABLED(CONFIG_IPV6_SEG6_BPF)
        case BPF_FUNC_lwt_seg6_store_bytes:
                return &bpf_lwt_seg6_store_bytes_proto;
        case BPF_FUNC_lwt_seg6_action:
                return &bpf_lwt_seg6_action_proto;
        case BPF_FUNC_lwt_seg6_adjust_srh:
                return &bpf_lwt_seg6_adjust_srh_proto;
+#endif
        default:
                return lwt_out_func_proto(func_id, prog);
        }
index b2b2323bdc84c44afc33304d9d2f6a22738f6523..188d693cb251a05d6483b81bbd8d815e28b77164 100644 (file)
@@ -77,8 +77,20 @@ gnet_stats_start_copy_compat(struct sk_buff *skb, int type, int tc_stats_type,
                d->lock = lock;
                spin_lock_bh(lock);
        }
-       if (d->tail)
-               return gnet_stats_copy(d, type, NULL, 0, padattr);
+       if (d->tail) {
+               int ret = gnet_stats_copy(d, type, NULL, 0, padattr);
+
+               /* The initial attribute added in gnet_stats_copy() may be
+                * preceded by a padding attribute, in which case d->tail will
+                * end up pointing at the padding instead of the real attribute.
+                * Fix this so gnet_stats_finish_copy() adjusts the length of
+                * the right attribute.
+                */
+               if (ret == 0 && d->tail->nla_type == padattr)
+                       d->tail = (struct nlattr *)((char *)d->tail +
+                                                   NLA_ALIGN(d->tail->nla_len));
+               return ret;
+       }
 
        return 0;
 }
index e7e626fb87bb34f89014cb16e58aaaf132c65417..e45098593dc00f6b8dadb5df01d91cf5cda64fc3 100644 (file)
@@ -217,7 +217,7 @@ static int bpf_parse_prog(struct nlattr *attr, struct bpf_lwt_prog *prog,
        if (!tb[LWT_BPF_PROG_FD] || !tb[LWT_BPF_PROG_NAME])
                return -EINVAL;
 
-       prog->name = nla_memdup(tb[LWT_BPF_PROG_NAME], GFP_KERNEL);
+       prog->name = nla_memdup(tb[LWT_BPF_PROG_NAME], GFP_ATOMIC);
        if (!prog->name)
                return -ENOMEM;
 
index 68bf072067442567c66db978078ba161d11df442..43a932cb609b78521c1b30ce73c1206bcab439d7 100644 (file)
@@ -269,7 +269,7 @@ static void __page_pool_empty_ring(struct page_pool *pool)
        struct page *page;
 
        /* Empty recycle ring */
-       while ((page = ptr_ring_consume(&pool->ring))) {
+       while ((page = ptr_ring_consume_bh(&pool->ring))) {
                /* Verify the refcnt invariant of cached pages */
                if (!(page_ref_count(page) == 1))
                        pr_crit("%s() page_pool refcnt %d violation\n",
index 5ef61222fdef1f305909eeca6ac278bcac88e1b0..e3f743c141b3f7b4684fba1ff7de10b27af35349 100644 (file)
@@ -2759,9 +2759,12 @@ int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm)
                        return err;
        }
 
-       dev->rtnl_link_state = RTNL_LINK_INITIALIZED;
-
-       __dev_notify_flags(dev, old_flags, ~0U);
+       if (dev->rtnl_link_state == RTNL_LINK_INITIALIZED) {
+               __dev_notify_flags(dev, old_flags, 0U);
+       } else {
+               dev->rtnl_link_state = RTNL_LINK_INITIALIZED;
+               __dev_notify_flags(dev, old_flags, ~0U);
+       }
        return 0;
 }
 EXPORT_SYMBOL(rtnl_configure_link);
index c642304f178ce0a4e1358d59e45032a39f76fb3f..fb35b62af2724025f743d61de24f9fb7eb9186a8 100644 (file)
@@ -858,6 +858,7 @@ static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb)
        n->cloned = 1;
        n->nohdr = 0;
        n->peeked = 0;
+       C(pfmemalloc);
        n->destructor = NULL;
        C(tail);
        C(end);
@@ -3719,6 +3720,7 @@ normal:
                                net_warn_ratelimited(
                                        "skb_segment: too many frags: %u %u\n",
                                        pos, mss);
+                               err = -EINVAL;
                                goto err;
                        }
 
@@ -3752,11 +3754,10 @@ skip_fraglist:
 
 perform_csum_check:
                if (!csum) {
-                       if (skb_has_shared_frag(nskb)) {
-                               err = __skb_linearize(nskb);
-                               if (err)
-                                       goto err;
-                       }
+                       if (skb_has_shared_frag(nskb) &&
+                           __skb_linearize(nskb))
+                               goto err;
+
                        if (!nskb->remcsum_offload)
                                nskb->ip_summed = CHECKSUM_NONE;
                        SKB_GSO_CB(nskb)->csum =
@@ -5276,8 +5277,7 @@ struct sk_buff *alloc_skb_with_frags(unsigned long header_len,
                        if (npages >= 1 << order) {
                                page = alloc_pages((gfp_mask & ~__GFP_DIRECT_RECLAIM) |
                                                   __GFP_COMP |
-                                                  __GFP_NOWARN |
-                                                  __GFP_NORETRY,
+                                                  __GFP_NOWARN,
                                                   order);
                                if (page)
                                        goto fill_page;
index bcc41829a16d50714bdd3c25c976c0b7296fab84..bc2d7a37297fecfbf3fbddd09ce53931fe0e28af 100644 (file)
@@ -2277,9 +2277,9 @@ int sk_alloc_sg(struct sock *sk, int len, struct scatterlist *sg,
                pfrag->offset += use;
 
                sge = sg + sg_curr - 1;
-               if (sg_curr > first_coalesce && sg_page(sg) == pfrag->page &&
-                   sg->offset + sg->length == orig_offset) {
-                       sg->length += use;
+               if (sg_curr > first_coalesce && sg_page(sge) == pfrag->page &&
+                   sge->offset + sge->length == orig_offset) {
+                       sge->length += use;
                } else {
                        sge = sg + sg_curr;
                        sg_unmark_end(sge);
@@ -3243,7 +3243,8 @@ static int req_prot_init(const struct proto *prot)
 
        rsk_prot->slab = kmem_cache_create(rsk_prot->slab_name,
                                           rsk_prot->obj_size, 0,
-                                          prot->slab_flags, NULL);
+                                          SLAB_ACCOUNT | prot->slab_flags,
+                                          NULL);
 
        if (!rsk_prot->slab) {
                pr_crit("%s: Can't create request sock SLAB cache!\n",
@@ -3258,7 +3259,8 @@ int proto_register(struct proto *prot, int alloc_slab)
        if (alloc_slab) {
                prot->slab = kmem_cache_create_usercopy(prot->name,
                                        prot->obj_size, 0,
-                                       SLAB_HWCACHE_ALIGN | prot->slab_flags,
+                                       SLAB_HWCACHE_ALIGN | SLAB_ACCOUNT |
+                                       prot->slab_flags,
                                        prot->useroffset, prot->usersize,
                                        NULL);
 
@@ -3281,6 +3283,7 @@ int proto_register(struct proto *prot, int alloc_slab)
                                kmem_cache_create(prot->twsk_prot->twsk_slab_name,
                                                  prot->twsk_prot->twsk_obj_size,
                                                  0,
+                                                 SLAB_ACCOUNT |
                                                  prot->slab_flags,
                                                  NULL);
                        if (prot->twsk_prot->twsk_slab == NULL)
index 9d1f22072d5d5b887b2a0e0bcc35af936ce38778..6771f1855b961b325356c699f6b3190bb188e0ee 100644 (file)
@@ -345,7 +345,8 @@ static void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct,
                rcu_read_lock();
                /* mem->id is valid, checked in xdp_rxq_info_reg_mem_model() */
                xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params);
-               xa->zc_alloc->free(xa->zc_alloc, handle);
+               if (!WARN_ON_ONCE(!xa))
+                       xa->zc_alloc->free(xa->zc_alloc, handle);
                rcu_read_unlock();
        default:
                /* Not possible, checked in xdp_rxq_info_reg_mem_model() */
index 2b75df469220a8a5bf3ffa09aa6616eef0a9002d..842a9c7c73a3f6ef7568e105d38c276547ea29ef 100644 (file)
@@ -229,14 +229,16 @@ static void ccid2_cwnd_restart(struct sock *sk, const u32 now)
        struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
        u32 cwnd = hc->tx_cwnd, restart_cwnd,
            iwnd = rfc3390_bytes_to_packets(dccp_sk(sk)->dccps_mss_cache);
+       s32 delta = now - hc->tx_lsndtime;
 
        hc->tx_ssthresh = max(hc->tx_ssthresh, (cwnd >> 1) + (cwnd >> 2));
 
        /* don't reduce cwnd below the initial window (IW) */
        restart_cwnd = min(cwnd, iwnd);
-       cwnd >>= (now - hc->tx_lsndtime) / hc->tx_rto;
-       hc->tx_cwnd = max(cwnd, restart_cwnd);
 
+       while ((delta -= hc->tx_rto) >= 0 && cwnd > restart_cwnd)
+               cwnd >>= 1;
+       hc->tx_cwnd = max(cwnd, restart_cwnd);
        hc->tx_cwnd_stamp = now;
        hc->tx_cwnd_used  = 0;
 
index 8b5ba6dffac7ebc88fd21075793dc3db43a74a43..12877a1514e7b8e873cd26529e58f7ebaae99c1a 100644 (file)
@@ -600,7 +600,7 @@ static void ccid3_hc_rx_send_feedback(struct sock *sk,
 {
        struct ccid3_hc_rx_sock *hc = ccid3_hc_rx_sk(sk);
        struct dccp_sock *dp = dccp_sk(sk);
-       ktime_t now = ktime_get_real();
+       ktime_t now = ktime_get();
        s64 delta = 0;
 
        switch (fbtype) {
@@ -625,15 +625,14 @@ static void ccid3_hc_rx_send_feedback(struct sock *sk,
        case CCID3_FBACK_PERIODIC:
                delta = ktime_us_delta(now, hc->rx_tstamp_last_feedback);
                if (delta <= 0)
-                       DCCP_BUG("delta (%ld) <= 0", (long)delta);
-               else
-                       hc->rx_x_recv = scaled_div32(hc->rx_bytes_recv, delta);
+                       delta = 1;
+               hc->rx_x_recv = scaled_div32(hc->rx_bytes_recv, delta);
                break;
        default:
                return;
        }
 
-       ccid3_pr_debug("Interval %ldusec, X_recv=%u, 1/p=%u\n", (long)delta,
+       ccid3_pr_debug("Interval %lldusec, X_recv=%u, 1/p=%u\n", delta,
                       hc->rx_x_recv, hc->rx_pinv);
 
        hc->rx_tstamp_last_feedback = now;
@@ -680,7 +679,8 @@ static int ccid3_hc_rx_insert_options(struct sock *sk, struct sk_buff *skb)
 static u32 ccid3_first_li(struct sock *sk)
 {
        struct ccid3_hc_rx_sock *hc = ccid3_hc_rx_sk(sk);
-       u32 x_recv, p, delta;
+       u32 x_recv, p;
+       s64 delta;
        u64 fval;
 
        if (hc->rx_rtt == 0) {
@@ -688,7 +688,9 @@ static u32 ccid3_first_li(struct sock *sk)
                hc->rx_rtt = DCCP_FALLBACK_RTT;
        }
 
-       delta  = ktime_to_us(net_timedelta(hc->rx_tstamp_last_feedback));
+       delta = ktime_us_delta(ktime_get(), hc->rx_tstamp_last_feedback);
+       if (delta <= 0)
+               delta = 1;
        x_recv = scaled_div32(hc->rx_bytes_recv, delta);
        if (x_recv == 0) {              /* would also trigger divide-by-zero */
                DCCP_WARN("X_recv==0\n");
index 0ea2ee56ac1bee6948ee4ed37c8172b300a7f9de..f91e3816806baae37e0e0793dcef72e8b291777e 100644 (file)
@@ -316,7 +316,8 @@ int dccp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
                 int flags, int *addr_len);
 void dccp_shutdown(struct sock *sk, int how);
 int inet_dccp_listen(struct socket *sock, int backlog);
-__poll_t dccp_poll_mask(struct socket *sock, __poll_t events);
+__poll_t dccp_poll(struct file *file, struct socket *sock,
+                      poll_table *wait);
 int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
 void dccp_req_err(struct sock *sk, u64 seq);
 
index a9e478cd3787c90f3d81e3bc2f71a14f7b11e280..b08feb219b44b67eadf408a33649d8c7ec9db2d0 100644 (file)
@@ -984,7 +984,7 @@ static const struct proto_ops inet_dccp_ops = {
        .accept            = inet_accept,
        .getname           = inet_getname,
        /* FIXME: work on tcp_poll to rename it to inet_csk_poll */
-       .poll_mask         = dccp_poll_mask,
+       .poll              = dccp_poll,
        .ioctl             = inet_ioctl,
        /* FIXME: work on inet_listen to rename it to sock_common_listen */
        .listen            = inet_dccp_listen,
index 17fc4e0166ba89ed435dc65bbdd5951d9018c093..6344f1b18a6a1b30cd2f3c559987a2c9e9546f81 100644 (file)
@@ -1070,7 +1070,7 @@ static const struct proto_ops inet6_dccp_ops = {
        .socketpair        = sock_no_socketpair,
        .accept            = inet_accept,
        .getname           = inet6_getname,
-       .poll_mask         = dccp_poll_mask,
+       .poll              = dccp_poll,
        .ioctl             = inet6_ioctl,
        .listen            = inet_dccp_listen,
        .shutdown          = inet_shutdown,
index ca21c1c76da013575d5bd0c8b3a4ac42eb2b229b..0d56e36a6db7b77dcdeb9697dd81bf62895e6e4c 100644 (file)
@@ -312,11 +312,20 @@ int dccp_disconnect(struct sock *sk, int flags)
 
 EXPORT_SYMBOL_GPL(dccp_disconnect);
 
-__poll_t dccp_poll_mask(struct socket *sock, __poll_t events)
+/*
+ *     Wait for a DCCP event.
+ *
+ *     Note that we don't need to lock the socket, as the upper poll layers
+ *     take care of normal races (between the test and the event) and we don't
+ *     go look at any of the socket buffers directly.
+ */
+__poll_t dccp_poll(struct file *file, struct socket *sock,
+                      poll_table *wait)
 {
        __poll_t mask;
        struct sock *sk = sock->sk;
 
+       sock_poll_wait(file, sk_sleep(sk), wait);
        if (sk->sk_state == DCCP_LISTEN)
                return inet_csk_listen_poll(sk);
 
@@ -358,7 +367,7 @@ __poll_t dccp_poll_mask(struct socket *sock, __poll_t events)
        return mask;
 }
 
-EXPORT_SYMBOL_GPL(dccp_poll_mask);
+EXPORT_SYMBOL_GPL(dccp_poll);
 
 int dccp_ioctl(struct sock *sk, int cmd, unsigned long arg)
 {
index 9a686d890bfad179c09a182245a96bba5dba21ea..7d6ff983ba2cbbf7915a61ffad57e52f66f3a193 100644 (file)
@@ -1207,11 +1207,11 @@ static int dn_getname(struct socket *sock, struct sockaddr *uaddr,int peer)
 }
 
 
-static __poll_t dn_poll_mask(struct socket *sock, __poll_t events)
+static __poll_t dn_poll(struct file *file, struct socket *sock, poll_table  *wait)
 {
        struct sock *sk = sock->sk;
        struct dn_scp *scp = DN_SK(sk);
-       __poll_t mask = datagram_poll_mask(sock, events);
+       __poll_t mask = datagram_poll(file, sock, wait);
 
        if (!skb_queue_empty(&scp->other_receive_queue))
                mask |= EPOLLRDBAND;
@@ -2331,7 +2331,7 @@ static const struct proto_ops dn_proto_ops = {
        .socketpair =   sock_no_socketpair,
        .accept =       dn_accept,
        .getname =      dn_getname,
-       .poll_mask =    dn_poll_mask,
+       .poll =         dn_poll,
        .ioctl =        dn_ioctl,
        .listen =       dn_listen,
        .shutdown =     dn_shutdown,
index 40c851693f77e35a1f573fdbf0bcd86adb94cf13..0c9478b91fa5b6c8f6b586ed8ead66c8db538ea7 100644 (file)
@@ -86,35 +86,39 @@ dns_resolver_preparse(struct key_preparsed_payload *prep)
                opt++;
                kdebug("options: '%s'", opt);
                do {
+                       int opt_len, opt_nlen;
                        const char *eq;
-                       int opt_len, opt_nlen, opt_vlen, tmp;
+                       char optval[128];
 
                        next_opt = memchr(opt, '#', end - opt) ?: end;
                        opt_len = next_opt - opt;
-                       if (opt_len <= 0 || opt_len > 128) {
+                       if (opt_len <= 0 || opt_len > sizeof(optval)) {
                                pr_warn_ratelimited("Invalid option length (%d) for dns_resolver key\n",
                                                    opt_len);
                                return -EINVAL;
                        }
 
-                       eq = memchr(opt, '=', opt_len) ?: end;
-                       opt_nlen = eq - opt;
-                       eq++;
-                       opt_vlen = next_opt - eq; /* will be -1 if no value */
+                       eq = memchr(opt, '=', opt_len);
+                       if (eq) {
+                               opt_nlen = eq - opt;
+                               eq++;
+                               memcpy(optval, eq, next_opt - eq);
+                               optval[next_opt - eq] = '\0';
+                       } else {
+                               opt_nlen = opt_len;
+                               optval[0] = '\0';
+                       }
 
-                       tmp = opt_vlen >= 0 ? opt_vlen : 0;
-                       kdebug("option '%*.*s' val '%*.*s'",
-                              opt_nlen, opt_nlen, opt, tmp, tmp, eq);
+                       kdebug("option '%*.*s' val '%s'",
+                              opt_nlen, opt_nlen, opt, optval);
 
                        /* see if it's an error number representing a DNS error
                         * that's to be recorded as the result in this key */
                        if (opt_nlen == sizeof(DNS_ERRORNO_OPTION) - 1 &&
                            memcmp(opt, DNS_ERRORNO_OPTION, opt_nlen) == 0) {
                                kdebug("dns error number option");
-                               if (opt_vlen <= 0)
-                                       goto bad_option_value;
 
-                               ret = kstrtoul(eq, 10, &derrno);
+                               ret = kstrtoul(optval, 10, &derrno);
                                if (ret < 0)
                                        goto bad_option_value;
 
index 1e3b6a6d8a40dcf69200ead186a6ab8919e63db6..9864bcd3d317f49f3c676718f5b3c302923cc713 100644 (file)
@@ -639,7 +639,7 @@ static int dsa_slave_set_eee(struct net_device *dev, struct ethtool_eee *e)
        int ret;
 
        /* Port's PHY and MAC both need to be EEE capable */
-       if (!dev->phydev)
+       if (!dev->phydev && !dp->pl)
                return -ENODEV;
 
        if (!ds->ops->set_mac_eee)
@@ -659,7 +659,7 @@ static int dsa_slave_get_eee(struct net_device *dev, struct ethtool_eee *e)
        int ret;
 
        /* Port's PHY and MAC both need to be EEE capable */
-       if (!dev->phydev)
+       if (!dev->phydev && !dp->pl)
                return -ENODEV;
 
        if (!ds->ops->get_mac_eee)
@@ -1248,6 +1248,9 @@ int dsa_slave_suspend(struct net_device *slave_dev)
 {
        struct dsa_port *dp = dsa_slave_to_port(slave_dev);
 
+       if (!netif_running(slave_dev))
+               return 0;
+
        netif_device_detach(slave_dev);
 
        rtnl_lock();
@@ -1261,6 +1264,9 @@ int dsa_slave_resume(struct net_device *slave_dev)
 {
        struct dsa_port *dp = dsa_slave_to_port(slave_dev);
 
+       if (!netif_running(slave_dev))
+               return 0;
+
        netif_device_attach(slave_dev);
 
        rtnl_lock();
index 275449b0d633586a4befec517ab3a36c5e3ba5a5..3297e7fa99458b13c40609588f187d366cf37411 100644 (file)
@@ -90,12 +90,18 @@ static int lowpan_neigh_construct(struct net_device *dev, struct neighbour *n)
        return 0;
 }
 
+static int lowpan_get_iflink(const struct net_device *dev)
+{
+       return lowpan_802154_dev(dev)->wdev->ifindex;
+}
+
 static const struct net_device_ops lowpan_netdev_ops = {
        .ndo_init               = lowpan_dev_init,
        .ndo_start_xmit         = lowpan_xmit,
        .ndo_open               = lowpan_open,
        .ndo_stop               = lowpan_stop,
        .ndo_neigh_construct    = lowpan_neigh_construct,
+       .ndo_get_iflink         = lowpan_get_iflink,
 };
 
 static void lowpan_setup(struct net_device *ldev)
index a0768d2759b8ecb8954dd544561b68f26d0c6510..a60658c85a9ad09b405f2d928e70acf64a9ebc4d 100644 (file)
@@ -423,7 +423,7 @@ static const struct proto_ops ieee802154_raw_ops = {
        .socketpair        = sock_no_socketpair,
        .accept            = sock_no_accept,
        .getname           = sock_no_getname,
-       .poll_mask         = datagram_poll_mask,
+       .poll              = datagram_poll,
        .ioctl             = ieee802154_sock_ioctl,
        .listen            = sock_no_listen,
        .shutdown          = sock_no_shutdown,
@@ -969,7 +969,7 @@ static const struct proto_ops ieee802154_dgram_ops = {
        .socketpair        = sock_no_socketpair,
        .accept            = sock_no_accept,
        .getname           = sock_no_getname,
-       .poll_mask         = datagram_poll_mask,
+       .poll              = datagram_poll,
        .ioctl             = ieee802154_sock_ioctl,
        .listen            = sock_no_listen,
        .shutdown          = sock_no_shutdown,
index 15e125558c76e5fa2fe466ab0d64be1d3183ebed..b403499fdabea7367f65c588d957a30f5a6572b5 100644 (file)
@@ -986,7 +986,7 @@ const struct proto_ops inet_stream_ops = {
        .socketpair        = sock_no_socketpair,
        .accept            = inet_accept,
        .getname           = inet_getname,
-       .poll_mask         = tcp_poll_mask,
+       .poll              = tcp_poll,
        .ioctl             = inet_ioctl,
        .listen            = inet_listen,
        .shutdown          = inet_shutdown,
@@ -1021,7 +1021,7 @@ const struct proto_ops inet_dgram_ops = {
        .socketpair        = sock_no_socketpair,
        .accept            = sock_no_accept,
        .getname           = inet_getname,
-       .poll_mask         = udp_poll_mask,
+       .poll              = udp_poll,
        .ioctl             = inet_ioctl,
        .listen            = sock_no_listen,
        .shutdown          = inet_shutdown,
@@ -1042,7 +1042,7 @@ EXPORT_SYMBOL(inet_dgram_ops);
 
 /*
  * For SOCK_RAW sockets; should be the same as inet_dgram_ops but without
- * udp_poll_mask
+ * udp_poll
  */
 static const struct proto_ops inet_sockraw_ops = {
        .family            = PF_INET,
@@ -1053,7 +1053,7 @@ static const struct proto_ops inet_sockraw_ops = {
        .socketpair        = sock_no_socketpair,
        .accept            = sock_no_accept,
        .getname           = inet_getname,
-       .poll_mask         = datagram_poll_mask,
+       .poll              = datagram_poll,
        .ioctl             = inet_ioctl,
        .listen            = sock_no_listen,
        .shutdown          = inet_shutdown,
index b21833651394233bbdb143d765e4408333b13b72..2998b0e47d4b6feb214a30cc56a75f6b01ec3adb 100644 (file)
@@ -292,18 +292,19 @@ __be32 fib_compute_spec_dst(struct sk_buff *skb)
                return ip_hdr(skb)->daddr;
 
        in_dev = __in_dev_get_rcu(dev);
-       BUG_ON(!in_dev);
 
        net = dev_net(dev);
 
        scope = RT_SCOPE_UNIVERSE;
        if (!ipv4_is_zeronet(ip_hdr(skb)->saddr)) {
+               bool vmark = in_dev && IN_DEV_SRC_VMARK(in_dev);
                struct flowi4 fl4 = {
                        .flowi4_iif = LOOPBACK_IFINDEX,
+                       .flowi4_oif = l3mdev_master_ifindex_rcu(dev),
                        .daddr = ip_hdr(skb)->saddr,
                        .flowi4_tos = RT_TOS(ip_hdr(skb)->tos),
                        .flowi4_scope = scope,
-                       .flowi4_mark = IN_DEV_SRC_VMARK(in_dev) ? skb->mark : 0,
+                       .flowi4_mark = vmark ? skb->mark : 0,
                };
                if (!fib_lookup(net, &fl4, &res, 0))
                        return FIB_RES_PREFSRC(net, res);
index 1540db65241a6fd4d96b00546f13a3e3d3cd1815..c9ec1603666bffcfb24597b933a05f53b6d83440 100644 (file)
@@ -448,9 +448,7 @@ next_proto:
 out_unlock:
        rcu_read_unlock();
 out:
-       NAPI_GRO_CB(skb)->flush |= flush;
-       skb_gro_remcsum_cleanup(skb, &grc);
-       skb->remcsum_offload = 0;
+       skb_gro_flush_final_remcsum(skb, pp, flush, &grc);
 
        return pp;
 }
index 1859c473b21a862b383edebbcf2c1656f9c58b3b..6a7d980105f60514c8180e6333f0a4a53912c3d5 100644 (file)
@@ -223,7 +223,7 @@ static struct sk_buff **gre_gro_receive(struct sk_buff **head,
 out_unlock:
        rcu_read_unlock();
 out:
-       NAPI_GRO_CB(skb)->flush |= flush;
+       skb_gro_flush_final(skb, pp, flush);
 
        return pp;
 }
index 85b617b655bc2d602563b1bd174f436554c9d046..75151be21413fb60161087c23aefeb6c31093509 100644 (file)
@@ -1200,13 +1200,13 @@ static void igmpv3_del_delrec(struct in_device *in_dev, struct ip_mc_list *im)
        spin_lock_bh(&im->lock);
        if (pmc) {
                im->interface = pmc->interface;
-               im->crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv;
-               im->sfmode = pmc->sfmode;
-               if (pmc->sfmode == MCAST_INCLUDE) {
+               if (im->sfmode == MCAST_INCLUDE) {
                        im->tomb = pmc->tomb;
                        im->sources = pmc->sources;
                        for (psf = im->sources; psf; psf = psf->sf_next)
-                               psf->sf_crcount = im->crcount;
+                               psf->sf_crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv;
+               } else {
+                       im->crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv;
                }
                in_dev_put(pmc->interface);
                kfree(pmc);
@@ -1288,7 +1288,7 @@ static void igmp_group_dropped(struct ip_mc_list *im)
 #endif
 }
 
-static void igmp_group_added(struct ip_mc_list *im)
+static void igmp_group_added(struct ip_mc_list *im, unsigned int mode)
 {
        struct in_device *in_dev = im->interface;
 #ifdef CONFIG_IP_MULTICAST
@@ -1316,7 +1316,13 @@ static void igmp_group_added(struct ip_mc_list *im)
        }
        /* else, v3 */
 
-       im->crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv;
+       /* Based on RFC3376 5.1, for newly added INCLUDE SSM, we should
+        * not send filter-mode change record as the mode should be from
+        * IN() to IN(A).
+        */
+       if (mode == MCAST_EXCLUDE)
+               im->crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv;
+
        igmp_ifc_event(in_dev);
 #endif
 }
@@ -1381,8 +1387,8 @@ static void ip_mc_hash_remove(struct in_device *in_dev,
 /*
  *     A socket has joined a multicast group on device dev.
  */
-
-void ip_mc_inc_group(struct in_device *in_dev, __be32 addr)
+static void __ip_mc_inc_group(struct in_device *in_dev, __be32 addr,
+                             unsigned int mode)
 {
        struct ip_mc_list *im;
 #ifdef CONFIG_IP_MULTICAST
@@ -1394,7 +1400,7 @@ void ip_mc_inc_group(struct in_device *in_dev, __be32 addr)
        for_each_pmc_rtnl(in_dev, im) {
                if (im->multiaddr == addr) {
                        im->users++;
-                       ip_mc_add_src(in_dev, &addr, MCAST_EXCLUDE, 0, NULL, 0);
+                       ip_mc_add_src(in_dev, &addr, mode, 0, NULL, 0);
                        goto out;
                }
        }
@@ -1408,8 +1414,8 @@ void ip_mc_inc_group(struct in_device *in_dev, __be32 addr)
        in_dev_hold(in_dev);
        im->multiaddr = addr;
        /* initial mode is (EX, empty) */
-       im->sfmode = MCAST_EXCLUDE;
-       im->sfcount[MCAST_EXCLUDE] = 1;
+       im->sfmode = mode;
+       im->sfcount[mode] = 1;
        refcount_set(&im->refcnt, 1);
        spin_lock_init(&im->lock);
 #ifdef CONFIG_IP_MULTICAST
@@ -1426,12 +1432,17 @@ void ip_mc_inc_group(struct in_device *in_dev, __be32 addr)
 #ifdef CONFIG_IP_MULTICAST
        igmpv3_del_delrec(in_dev, im);
 #endif
-       igmp_group_added(im);
+       igmp_group_added(im, mode);
        if (!in_dev->dead)
                ip_rt_multicast_event(in_dev);
 out:
        return;
 }
+
+void ip_mc_inc_group(struct in_device *in_dev, __be32 addr)
+{
+       __ip_mc_inc_group(in_dev, addr, MCAST_EXCLUDE);
+}
 EXPORT_SYMBOL(ip_mc_inc_group);
 
 static int ip_mc_check_iphdr(struct sk_buff *skb)
@@ -1688,7 +1699,7 @@ void ip_mc_remap(struct in_device *in_dev)
 #ifdef CONFIG_IP_MULTICAST
                igmpv3_del_delrec(in_dev, pmc);
 #endif
-               igmp_group_added(pmc);
+               igmp_group_added(pmc, pmc->sfmode);
        }
 }
 
@@ -1751,7 +1762,7 @@ void ip_mc_up(struct in_device *in_dev)
 #ifdef CONFIG_IP_MULTICAST
                igmpv3_del_delrec(in_dev, pmc);
 #endif
-               igmp_group_added(pmc);
+               igmp_group_added(pmc, pmc->sfmode);
        }
 }
 
@@ -2130,8 +2141,8 @@ static void ip_mc_clear_src(struct ip_mc_list *pmc)
 
 /* Join a multicast group
  */
-
-int ip_mc_join_group(struct sock *sk, struct ip_mreqn *imr)
+static int __ip_mc_join_group(struct sock *sk, struct ip_mreqn *imr,
+                             unsigned int mode)
 {
        __be32 addr = imr->imr_multiaddr.s_addr;
        struct ip_mc_socklist *iml, *i;
@@ -2172,15 +2183,30 @@ int ip_mc_join_group(struct sock *sk, struct ip_mreqn *imr)
        memcpy(&iml->multi, imr, sizeof(*imr));
        iml->next_rcu = inet->mc_list;
        iml->sflist = NULL;
-       iml->sfmode = MCAST_EXCLUDE;
+       iml->sfmode = mode;
        rcu_assign_pointer(inet->mc_list, iml);
-       ip_mc_inc_group(in_dev, addr);
+       __ip_mc_inc_group(in_dev, addr, mode);
        err = 0;
 done:
        return err;
 }
+
+/* Join ASM (Any-Source Multicast) group
+ */
+int ip_mc_join_group(struct sock *sk, struct ip_mreqn *imr)
+{
+       return __ip_mc_join_group(sk, imr, MCAST_EXCLUDE);
+}
 EXPORT_SYMBOL(ip_mc_join_group);
 
+/* Join SSM (Source-Specific Multicast) group
+ */
+int ip_mc_join_group_ssm(struct sock *sk, struct ip_mreqn *imr,
+                        unsigned int mode)
+{
+       return __ip_mc_join_group(sk, imr, mode);
+}
+
 static int ip_mc_leave_src(struct sock *sk, struct ip_mc_socklist *iml,
                           struct in_device *in_dev)
 {
index c9e35b81d0931df8429a33e8d03e719b87da0747..0d70608cc2e18bfa0df35f331e12fbe9b45b168b 100644 (file)
@@ -90,7 +90,7 @@ static void inet_frags_free_cb(void *ptr, void *arg)
 
 void inet_frags_exit_net(struct netns_frags *nf)
 {
-       nf->low_thresh = 0; /* prevent creation of new frags */
+       nf->high_thresh = 0; /* prevent creation of new frags */
 
        rhashtable_free_and_destroy(&nf->rhashtable, inet_frags_free_cb, NULL);
 }
@@ -157,9 +157,6 @@ static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf,
 {
        struct inet_frag_queue *q;
 
-       if (!nf->high_thresh || frag_mem_limit(nf) > nf->high_thresh)
-               return NULL;
-
        q = kmem_cache_zalloc(f->frags_cachep, GFP_ATOMIC);
        if (!q)
                return NULL;
@@ -204,6 +201,9 @@ struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, void *key)
 {
        struct inet_frag_queue *fq;
 
+       if (!nf->high_thresh || frag_mem_limit(nf) > nf->high_thresh)
+               return NULL;
+
        rcu_read_lock();
 
        fq = rhashtable_lookup(&nf->rhashtable, key, nf->f->rhash_params);
index 31ff46daae974645dfe73c97e6e507a0ad62dd4b..3647167c8fa313f9eb7a5c5ad34cb0cb7a7aea5e 100644 (file)
@@ -243,9 +243,9 @@ static inline int compute_score(struct sock *sk, struct net *net,
                        bool dev_match = (sk->sk_bound_dev_if == dif ||
                                          sk->sk_bound_dev_if == sdif);
 
-                       if (exact_dif && !dev_match)
+                       if (!dev_match)
                                return -1;
-                       if (sk->sk_bound_dev_if && dev_match)
+                       if (sk->sk_bound_dev_if)
                                score += 4;
                }
                if (sk->sk_incoming_cpu == raw_smp_processor_id())
index 8e9528ebaa8e1af91172466cc161a83301a217ca..d14d741fb05e571d6d0b03248da77fd993debdee 100644 (file)
@@ -383,11 +383,16 @@ found:
                int i = end - next->ip_defrag_offset; /* overlap is 'i' bytes */
 
                if (i < next->len) {
+                       int delta = -next->truesize;
+
                        /* Eat head of the next overlapped fragment
                         * and leave the loop. The next ones cannot overlap.
                         */
                        if (!pskb_pull(next, i))
                                goto err;
+                       delta += next->truesize;
+                       if (delta)
+                               add_frag_mem_limit(qp->q.net, delta);
                        next->ip_defrag_offset += i;
                        qp->q.meat -= i;
                        if (next->ip_summed != CHECKSUM_UNNECESSARY)
index af5a830ff6ad320ae68066ab86476962db978f79..0e3edd25f881f1ad09201be0930734523721ebfc 100644 (file)
@@ -523,6 +523,8 @@ static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from)
        to->dev = from->dev;
        to->mark = from->mark;
 
+       skb_copy_hash(to, from);
+
        /* Copy the flags to each fragment. */
        IPCB(to)->flags = IPCB(from)->flags;
 
@@ -1145,7 +1147,8 @@ static int ip_setup_cork(struct sock *sk, struct inet_cork *cork,
        cork->fragsize = ip_sk_use_pmtu(sk) ?
                         dst_mtu(&rt->dst) : rt->dst.dev->mtu;
 
-       cork->gso_size = sk->sk_type == SOCK_DGRAM ? ipc->gso_size : 0;
+       cork->gso_size = sk->sk_type == SOCK_DGRAM &&
+                        sk->sk_protocol == IPPROTO_UDP ? ipc->gso_size : 0;
        cork->dst = &rt->dst;
        cork->length = 0;
        cork->ttl = ipc->ttl;
index fc32fdbeefa61c18da5b9330d4da73ca6db992bd..c0fe5ad996f238091f5b9585adb586a571f653f0 100644 (file)
@@ -150,15 +150,18 @@ static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb)
 {
        struct sockaddr_in sin;
        const struct iphdr *iph = ip_hdr(skb);
-       __be16 *ports = (__be16 *)skb_transport_header(skb);
+       __be16 *ports;
+       int end;
 
-       if (skb_transport_offset(skb) + 4 > (int)skb->len)
+       end = skb_transport_offset(skb) + 4;
+       if (end > 0 && !pskb_may_pull(skb, end))
                return;
 
        /* All current transport protocols have the port numbers in the
         * first four bytes of the transport header and this function is
         * written with this assumption in mind.
         */
+       ports = (__be16 *)skb_transport_header(skb);
 
        sin.sin_family = AF_INET;
        sin.sin_addr.s_addr = iph->daddr;
@@ -984,7 +987,7 @@ static int do_ip_setsockopt(struct sock *sk, int level,
                        mreq.imr_multiaddr.s_addr = mreqs.imr_multiaddr;
                        mreq.imr_address.s_addr = mreqs.imr_interface;
                        mreq.imr_ifindex = 0;
-                       err = ip_mc_join_group(sk, &mreq);
+                       err = ip_mc_join_group_ssm(sk, &mreq, MCAST_INCLUDE);
                        if (err && err != -EADDRINUSE)
                                break;
                        omode = MCAST_INCLUDE;
@@ -1061,7 +1064,7 @@ static int do_ip_setsockopt(struct sock *sk, int level,
                        mreq.imr_multiaddr = psin->sin_addr;
                        mreq.imr_address.s_addr = 0;
                        mreq.imr_ifindex = greqs.gsr_interface;
-                       err = ip_mc_join_group(sk, &mreq);
+                       err = ip_mc_join_group_ssm(sk, &mreq, MCAST_INCLUDE);
                        if (err && err != -EADDRINUSE)
                                break;
                        greqs.gsr_interface = mreq.imr_ifindex;
index ca0dad90803a92bdcbb1e199554985ad4626fada..e77872c93c206693f4bcfdde98a044c6e7cfb780 100644 (file)
@@ -1898,6 +1898,7 @@ static struct xt_match ipt_builtin_mt[] __read_mostly = {
                .checkentry = icmp_checkentry,
                .proto      = IPPROTO_ICMP,
                .family     = NFPROTO_IPV4,
+               .me         = THIS_MODULE,
        },
 };
 
index 805e83ec3ad9347abc6ce778f296319746772f1c..16471410496592f52ac7927d218a44341f139339 100644 (file)
@@ -37,7 +37,7 @@ nf_tproxy_handle_time_wait4(struct net *net, struct sk_buff *skb,
                 * to a listener socket if there's one */
                struct sock *sk2;
 
-               sk2 = nf_tproxy_get_sock_v4(net, skb, hp, iph->protocol,
+               sk2 = nf_tproxy_get_sock_v4(net, skb, iph->protocol,
                                            iph->saddr, laddr ? laddr : iph->daddr,
                                            hp->source, lport ? lport : hp->dest,
                                            skb->dev, NF_TPROXY_LOOKUP_LISTENER);
@@ -71,7 +71,7 @@ __be32 nf_tproxy_laddr4(struct sk_buff *skb, __be32 user_laddr, __be32 daddr)
 EXPORT_SYMBOL_GPL(nf_tproxy_laddr4);
 
 struct sock *
-nf_tproxy_get_sock_v4(struct net *net, struct sk_buff *skb, void *hp,
+nf_tproxy_get_sock_v4(struct net *net, struct sk_buff *skb,
                      const u8 protocol,
                      const __be32 saddr, const __be32 daddr,
                      const __be16 sport, const __be16 dport,
@@ -79,16 +79,21 @@ nf_tproxy_get_sock_v4(struct net *net, struct sk_buff *skb, void *hp,
                      const enum nf_tproxy_lookup_t lookup_type)
 {
        struct sock *sk;
-       struct tcphdr *tcph;
 
        switch (protocol) {
-       case IPPROTO_TCP:
+       case IPPROTO_TCP: {
+               struct tcphdr _hdr, *hp;
+
+               hp = skb_header_pointer(skb, ip_hdrlen(skb),
+                                       sizeof(struct tcphdr), &_hdr);
+               if (hp == NULL)
+                       return NULL;
+
                switch (lookup_type) {
                case NF_TPROXY_LOOKUP_LISTENER:
-                       tcph = hp;
                        sk = inet_lookup_listener(net, &tcp_hashinfo, skb,
                                                    ip_hdrlen(skb) +
-                                                     __tcp_hdrlen(tcph),
+                                                     __tcp_hdrlen(hp),
                                                    saddr, sport,
                                                    daddr, dport,
                                                    in->ifindex, 0);
@@ -110,6 +115,7 @@ nf_tproxy_get_sock_v4(struct net *net, struct sk_buff *skb, void *hp,
                        BUG();
                }
                break;
+               }
        case IPPROTO_UDP:
                sk = udp4_lib_lookup(net, saddr, sport, daddr, dport,
                                     in->ifindex);
index d06247ba08b2667b1049329e8921af9388545c54..5fa335fd385254def583b9a5100fbe7b9ce94cd6 100644 (file)
@@ -189,8 +189,9 @@ static int ipv4_ping_group_range(struct ctl_table *table, int write,
        if (write && ret == 0) {
                low = make_kgid(user_ns, urange[0]);
                high = make_kgid(user_ns, urange[1]);
-               if (!gid_valid(low) || !gid_valid(high) ||
-                   (urange[1] < urange[0]) || gid_lt(high, low)) {
+               if (!gid_valid(low) || !gid_valid(high))
+                       return -EINVAL;
+               if (urange[1] < urange[0] || gid_lt(high, low)) {
                        low = make_kgid(&init_user_ns, 1);
                        high = make_kgid(&init_user_ns, 0);
                }
@@ -265,8 +266,9 @@ static int proc_tcp_fastopen_key(struct ctl_table *table, int write,
            ipv4.sysctl_tcp_fastopen);
        struct ctl_table tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) };
        struct tcp_fastopen_context *ctxt;
-       int ret;
        u32  user_key[4]; /* 16 bytes, matching TCP_FASTOPEN_KEY_LENGTH */
+       __le32 key[4];
+       int ret, i;
 
        tbl.data = kmalloc(tbl.maxlen, GFP_KERNEL);
        if (!tbl.data)
@@ -275,11 +277,14 @@ static int proc_tcp_fastopen_key(struct ctl_table *table, int write,
        rcu_read_lock();
        ctxt = rcu_dereference(net->ipv4.tcp_fastopen_ctx);
        if (ctxt)
-               memcpy(user_key, ctxt->key, TCP_FASTOPEN_KEY_LENGTH);
+               memcpy(key, ctxt->key, TCP_FASTOPEN_KEY_LENGTH);
        else
-               memset(user_key, 0, sizeof(user_key));
+               memset(key, 0, sizeof(key));
        rcu_read_unlock();
 
+       for (i = 0; i < ARRAY_SIZE(key); i++)
+               user_key[i] = le32_to_cpu(key[i]);
+
        snprintf(tbl.data, tbl.maxlen, "%08x-%08x-%08x-%08x",
                user_key[0], user_key[1], user_key[2], user_key[3]);
        ret = proc_dostring(&tbl, write, buffer, lenp, ppos);
@@ -290,13 +295,17 @@ static int proc_tcp_fastopen_key(struct ctl_table *table, int write,
                        ret = -EINVAL;
                        goto bad_key;
                }
-               tcp_fastopen_reset_cipher(net, NULL, user_key,
+
+               for (i = 0; i < ARRAY_SIZE(user_key); i++)
+                       key[i] = cpu_to_le32(user_key[i]);
+
+               tcp_fastopen_reset_cipher(net, NULL, key,
                                          TCP_FASTOPEN_KEY_LENGTH);
        }
 
 bad_key:
        pr_debug("proc FO key set 0x%x-%x-%x-%x <- 0x%s: %u\n",
-              user_key[0], user_key[1], user_key[2], user_key[3],
+               user_key[0], user_key[1], user_key[2], user_key[3],
               (char *)tbl.data, ret);
        kfree(tbl.data);
        return ret;
index 141acd92e58aeddeb9a0ba1eaacf3bd520a836a3..4491faf83f4f93cf4384f7b192ffe3022567cc0a 100644 (file)
@@ -494,21 +494,32 @@ static inline bool tcp_stream_is_readable(const struct tcp_sock *tp,
 }
 
 /*
- * Socket is not locked. We are protected from async events by poll logic and
- * correct handling of state changes made by other threads is impossible in
- * any case.
+ *     Wait for a TCP event.
+ *
+ *     Note that we don't need to lock the socket, as the upper poll layers
+ *     take care of normal races (between the test and the event) and we don't
+ *     go look at any of the socket buffers directly.
  */
-__poll_t tcp_poll_mask(struct socket *sock, __poll_t events)
+__poll_t tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
 {
+       __poll_t mask;
        struct sock *sk = sock->sk;
        const struct tcp_sock *tp = tcp_sk(sk);
-       __poll_t mask = 0;
        int state;
 
+       sock_poll_wait(file, sk_sleep(sk), wait);
+
        state = inet_sk_state_load(sk);
        if (state == TCP_LISTEN)
                return inet_csk_listen_poll(sk);
 
+       /* Socket is not locked. We are protected from async events
+        * by poll logic and correct handling of state changes
+        * made by other threads is impossible in any case.
+        */
+
+       mask = 0;
+
        /*
         * EPOLLHUP is certainly not done right. But poll() doesn't
         * have a notion of HUP in just one direction, and for a
@@ -589,7 +600,7 @@ __poll_t tcp_poll_mask(struct socket *sock, __poll_t events)
 
        return mask;
 }
-EXPORT_SYMBOL(tcp_poll_mask);
+EXPORT_SYMBOL(tcp_poll);
 
 int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
 {
@@ -1987,7 +1998,7 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
                         * shouldn't happen.
                         */
                        if (WARN(before(*seq, TCP_SKB_CB(skb)->seq),
-                                "recvmsg bug: copied %X seq %X rcvnxt %X fl %X\n",
+                                "TCP recvmsg seq # bug: copied %X, seq %X, rcvnxt %X, fl %X\n",
                                 *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt,
                                 flags))
                                break;
@@ -2002,7 +2013,7 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
                        if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
                                goto found_fin_ok;
                        WARN(!(flags & MSG_PEEK),
-                            "recvmsg bug 2: copied %X seq %X rcvnxt %X fl %X\n",
+                            "TCP recvmsg seq # bug 2: copied %X, seq %X, rcvnxt %X, fl %X\n",
                             *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt, flags);
                }
 
@@ -2551,6 +2562,8 @@ int tcp_disconnect(struct sock *sk, int flags)
 
        tcp_clear_xmit_timers(sk);
        __skb_queue_purge(&sk->sk_receive_queue);
+       tp->copied_seq = tp->rcv_nxt;
+       tp->urg_data = 0;
        tcp_write_queue_purge(sk);
        tcp_fastopen_active_disable_ofo_check(sk);
        skb_rbtree_purge(&tp->out_of_order_queue);
@@ -2810,14 +2823,17 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
        case TCP_REPAIR:
                if (!tcp_can_repair_sock(sk))
                        err = -EPERM;
-               else if (val == 1) {
+               else if (val == TCP_REPAIR_ON) {
                        tp->repair = 1;
                        sk->sk_reuse = SK_FORCE_REUSE;
                        tp->repair_queue = TCP_NO_QUEUE;
-               } else if (val == 0) {
+               } else if (val == TCP_REPAIR_OFF) {
                        tp->repair = 0;
                        sk->sk_reuse = SK_NO_REUSE;
                        tcp_send_window_probe(sk);
+               } else if (val == TCP_REPAIR_OFF_NO_WP) {
+                       tp->repair = 0;
+                       sk->sk_reuse = SK_NO_REUSE;
                } else
                        err = -EINVAL;
 
@@ -3709,8 +3725,7 @@ int tcp_abort(struct sock *sk, int err)
                        struct request_sock *req = inet_reqsk(sk);
 
                        local_bh_disable();
-                       inet_csk_reqsk_queue_drop_and_put(req->rsk_listener,
-                                                         req);
+                       inet_csk_reqsk_queue_drop(req->rsk_listener, req);
                        local_bh_enable();
                        return 0;
                }
index 58e2f479ffb4d523b4ccfbb859bdd186a55ab83d..4bfff3c87e8e2de2c9af77ae1d5bb157a28f2207 100644 (file)
@@ -354,6 +354,10 @@ static u32 bbr_target_cwnd(struct sock *sk, u32 bw, int gain)
        /* Reduce delayed ACKs by rounding up cwnd to the next even number. */
        cwnd = (cwnd + 1) & ~1U;
 
+       /* Ensure gain cycling gets inflight above BDP even for small BDPs. */
+       if (bbr->mode == BBR_PROBE_BW && gain > BBR_UNIT)
+               cwnd += 2;
+
        return cwnd;
 }
 
index 5f5e5936760e65739859d0d8d9717b3204482a43..8b637f9f23a232a137f4a7f2d685a599cc063c1b 100644 (file)
@@ -55,7 +55,6 @@ struct dctcp {
        u32 dctcp_alpha;
        u32 next_seq;
        u32 ce_state;
-       u32 delayed_ack_reserved;
        u32 loss_cwnd;
 };
 
@@ -96,7 +95,6 @@ static void dctcp_init(struct sock *sk)
 
                ca->dctcp_alpha = min(dctcp_alpha_on_init, DCTCP_MAX_ALPHA);
 
-               ca->delayed_ack_reserved = 0;
                ca->loss_cwnd = 0;
                ca->ce_state = 0;
 
@@ -131,23 +129,14 @@ static void dctcp_ce_state_0_to_1(struct sock *sk)
        struct dctcp *ca = inet_csk_ca(sk);
        struct tcp_sock *tp = tcp_sk(sk);
 
-       /* State has changed from CE=0 to CE=1 and delayed
-        * ACK has not sent yet.
-        */
-       if (!ca->ce_state && ca->delayed_ack_reserved) {
-               u32 tmp_rcv_nxt;
-
-               /* Save current rcv_nxt. */
-               tmp_rcv_nxt = tp->rcv_nxt;
-
-               /* Generate previous ack with CE=0. */
-               tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR;
-               tp->rcv_nxt = ca->prior_rcv_nxt;
-
-               tcp_send_ack(sk);
-
-               /* Recover current rcv_nxt. */
-               tp->rcv_nxt = tmp_rcv_nxt;
+       if (!ca->ce_state) {
+               /* State has changed from CE=0 to CE=1, force an immediate
+                * ACK to reflect the new CE state. If an ACK was delayed,
+                * send that first to reflect the prior CE state.
+                */
+               if (inet_csk(sk)->icsk_ack.pending & ICSK_ACK_TIMER)
+                       __tcp_send_ack(sk, ca->prior_rcv_nxt);
+               tcp_enter_quickack_mode(sk, 1);
        }
 
        ca->prior_rcv_nxt = tp->rcv_nxt;
@@ -161,23 +150,14 @@ static void dctcp_ce_state_1_to_0(struct sock *sk)
        struct dctcp *ca = inet_csk_ca(sk);
        struct tcp_sock *tp = tcp_sk(sk);
 
-       /* State has changed from CE=1 to CE=0 and delayed
-        * ACK has not sent yet.
-        */
-       if (ca->ce_state && ca->delayed_ack_reserved) {
-               u32 tmp_rcv_nxt;
-
-               /* Save current rcv_nxt. */
-               tmp_rcv_nxt = tp->rcv_nxt;
-
-               /* Generate previous ack with CE=1. */
-               tp->ecn_flags |= TCP_ECN_DEMAND_CWR;
-               tp->rcv_nxt = ca->prior_rcv_nxt;
-
-               tcp_send_ack(sk);
-
-               /* Recover current rcv_nxt. */
-               tp->rcv_nxt = tmp_rcv_nxt;
+       if (ca->ce_state) {
+               /* State has changed from CE=1 to CE=0, force an immediate
+                * ACK to reflect the new CE state. If an ACK was delayed,
+                * send that first to reflect the prior CE state.
+                */
+               if (inet_csk(sk)->icsk_ack.pending & ICSK_ACK_TIMER)
+                       __tcp_send_ack(sk, ca->prior_rcv_nxt);
+               tcp_enter_quickack_mode(sk, 1);
        }
 
        ca->prior_rcv_nxt = tp->rcv_nxt;
@@ -248,25 +228,6 @@ static void dctcp_state(struct sock *sk, u8 new_state)
        }
 }
 
-static void dctcp_update_ack_reserved(struct sock *sk, enum tcp_ca_event ev)
-{
-       struct dctcp *ca = inet_csk_ca(sk);
-
-       switch (ev) {
-       case CA_EVENT_DELAYED_ACK:
-               if (!ca->delayed_ack_reserved)
-                       ca->delayed_ack_reserved = 1;
-               break;
-       case CA_EVENT_NON_DELAYED_ACK:
-               if (ca->delayed_ack_reserved)
-                       ca->delayed_ack_reserved = 0;
-               break;
-       default:
-               /* Don't care for the rest. */
-               break;
-       }
-}
-
 static void dctcp_cwnd_event(struct sock *sk, enum tcp_ca_event ev)
 {
        switch (ev) {
@@ -276,10 +237,6 @@ static void dctcp_cwnd_event(struct sock *sk, enum tcp_ca_event ev)
        case CA_EVENT_ECN_NO_CE:
                dctcp_ce_state_1_to_0(sk);
                break;
-       case CA_EVENT_DELAYED_ACK:
-       case CA_EVENT_NON_DELAYED_ACK:
-               dctcp_update_ack_reserved(sk, ev);
-               break;
        default:
                /* Don't care for the rest. */
                break;
index 355d3dffd021ccad0f30891994289d916f7d276c..f9dcb29be12da9c637db0c2382eaf357c052d804 100644 (file)
@@ -215,7 +215,7 @@ static void tcp_incr_quickack(struct sock *sk, unsigned int max_quickacks)
                icsk->icsk_ack.quick = quickacks;
 }
 
-static void tcp_enter_quickack_mode(struct sock *sk, unsigned int max_quickacks)
+void tcp_enter_quickack_mode(struct sock *sk, unsigned int max_quickacks)
 {
        struct inet_connection_sock *icsk = inet_csk(sk);
 
@@ -223,6 +223,7 @@ static void tcp_enter_quickack_mode(struct sock *sk, unsigned int max_quickacks)
        icsk->icsk_ack.pingpong = 0;
        icsk->icsk_ack.ato = TCP_ATO_MIN;
 }
+EXPORT_SYMBOL(tcp_enter_quickack_mode);
 
 /* Send ACKs quickly, if "quick" count is not exhausted
  * and the session is not interactive.
@@ -245,8 +246,15 @@ static void tcp_ecn_queue_cwr(struct tcp_sock *tp)
 
 static void tcp_ecn_accept_cwr(struct tcp_sock *tp, const struct sk_buff *skb)
 {
-       if (tcp_hdr(skb)->cwr)
+       if (tcp_hdr(skb)->cwr) {
                tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR;
+
+               /* If the sender is telling us it has entered CWR, then its
+                * cwnd may be very low (even just 1 packet), so we should ACK
+                * immediately.
+                */
+               tcp_enter_quickack_mode((struct sock *)tp, 2);
+       }
 }
 
 static void tcp_ecn_withdraw_cwr(struct tcp_sock *tp)
@@ -265,7 +273,7 @@ static void __tcp_ecn_check_ce(struct sock *sk, const struct sk_buff *skb)
                 * it is probably a retransmit.
                 */
                if (tp->ecn_flags & TCP_ECN_SEEN)
-                       tcp_enter_quickack_mode(sk, 1);
+                       tcp_enter_quickack_mode(sk, 2);
                break;
        case INET_ECN_CE:
                if (tcp_ca_needs_ecn(sk))
@@ -273,7 +281,7 @@ static void __tcp_ecn_check_ce(struct sock *sk, const struct sk_buff *skb)
 
                if (!(tp->ecn_flags & TCP_ECN_DEMAND_CWR)) {
                        /* Better not delay acks, sender can have a very low cwnd */
-                       tcp_enter_quickack_mode(sk, 1);
+                       tcp_enter_quickack_mode(sk, 2);
                        tp->ecn_flags |= TCP_ECN_DEMAND_CWR;
                }
                tp->ecn_flags |= TCP_ECN_SEEN;
@@ -3181,6 +3189,15 @@ static int tcp_clean_rtx_queue(struct sock *sk, u32 prior_fack,
 
                if (tcp_is_reno(tp)) {
                        tcp_remove_reno_sacks(sk, pkts_acked);
+
+                       /* If any of the cumulatively ACKed segments was
+                        * retransmitted, non-SACK case cannot confirm that
+                        * progress was due to original transmission due to
+                        * lack of TCPCB_SACKED_ACKED bits even if some of
+                        * the packets may have been never retransmitted.
+                        */
+                       if (flag & FLAG_RETRANS_DATA_ACKED)
+                               flag &= ~FLAG_ORIG_SACK_ACKED;
                } else {
                        int delta;
 
@@ -4348,6 +4365,23 @@ static bool tcp_try_coalesce(struct sock *sk,
        return true;
 }
 
+static bool tcp_ooo_try_coalesce(struct sock *sk,
+                            struct sk_buff *to,
+                            struct sk_buff *from,
+                            bool *fragstolen)
+{
+       bool res = tcp_try_coalesce(sk, to, from, fragstolen);
+
+       /* In case tcp_drop() is called later, update to->gso_segs */
+       if (res) {
+               u32 gso_segs = max_t(u16, 1, skb_shinfo(to)->gso_segs) +
+                              max_t(u16, 1, skb_shinfo(from)->gso_segs);
+
+               skb_shinfo(to)->gso_segs = min_t(u32, gso_segs, 0xFFFF);
+       }
+       return res;
+}
+
 static void tcp_drop(struct sock *sk, struct sk_buff *skb)
 {
        sk_drops_add(sk, skb);
@@ -4471,8 +4505,8 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
        /* In the typical case, we are adding an skb to the end of the list.
         * Use of ooo_last_skb avoids the O(Log(N)) rbtree lookup.
         */
-       if (tcp_try_coalesce(sk, tp->ooo_last_skb,
-                            skb, &fragstolen)) {
+       if (tcp_ooo_try_coalesce(sk, tp->ooo_last_skb,
+                                skb, &fragstolen)) {
 coalesce_done:
                tcp_grow_window(sk, skb);
                kfree_skb_partial(skb, fragstolen);
@@ -4500,7 +4534,7 @@ coalesce_done:
                                /* All the bits are present. Drop. */
                                NET_INC_STATS(sock_net(sk),
                                              LINUX_MIB_TCPOFOMERGE);
-                               __kfree_skb(skb);
+                               tcp_drop(sk, skb);
                                skb = NULL;
                                tcp_dsack_set(sk, seq, end_seq);
                                goto add_sack;
@@ -4519,11 +4553,11 @@ coalesce_done:
                                                 TCP_SKB_CB(skb1)->end_seq);
                                NET_INC_STATS(sock_net(sk),
                                              LINUX_MIB_TCPOFOMERGE);
-                               __kfree_skb(skb1);
+                               tcp_drop(sk, skb1);
                                goto merge_right;
                        }
-               } else if (tcp_try_coalesce(sk, skb1,
-                                           skb, &fragstolen)) {
+               } else if (tcp_ooo_try_coalesce(sk, skb1,
+                                               skb, &fragstolen)) {
                        goto coalesce_done;
                }
                p = &parent->rb_right;
@@ -4892,6 +4926,7 @@ end:
 static void tcp_collapse_ofo_queue(struct sock *sk)
 {
        struct tcp_sock *tp = tcp_sk(sk);
+       u32 range_truesize, sum_tiny = 0;
        struct sk_buff *skb, *head;
        u32 start, end;
 
@@ -4903,6 +4938,7 @@ new_range:
        }
        start = TCP_SKB_CB(skb)->seq;
        end = TCP_SKB_CB(skb)->end_seq;
+       range_truesize = skb->truesize;
 
        for (head = skb;;) {
                skb = skb_rb_next(skb);
@@ -4913,11 +4949,20 @@ new_range:
                if (!skb ||
                    after(TCP_SKB_CB(skb)->seq, end) ||
                    before(TCP_SKB_CB(skb)->end_seq, start)) {
-                       tcp_collapse(sk, NULL, &tp->out_of_order_queue,
-                                    head, skb, start, end);
+                       /* Do not attempt collapsing tiny skbs */
+                       if (range_truesize != head->truesize ||
+                           end - start >= SKB_WITH_OVERHEAD(SK_MEM_QUANTUM)) {
+                               tcp_collapse(sk, NULL, &tp->out_of_order_queue,
+                                            head, skb, start, end);
+                       } else {
+                               sum_tiny += range_truesize;
+                               if (sum_tiny > sk->sk_rcvbuf >> 3)
+                                       return;
+                       }
                        goto new_range;
                }
 
+               range_truesize += skb->truesize;
                if (unlikely(before(TCP_SKB_CB(skb)->seq, start)))
                        start = TCP_SKB_CB(skb)->seq;
                if (after(TCP_SKB_CB(skb)->end_seq, end))
@@ -4932,6 +4977,7 @@ new_range:
  * 2) not add too big latencies if thousands of packets sit there.
  *    (But if application shrinks SO_RCVBUF, we could still end up
  *     freeing whole queue here)
+ * 3) Drop at least 12.5 % of sk_rcvbuf to avoid malicious attacks.
  *
  * Return true if queue has shrunk.
  */
@@ -4939,20 +4985,26 @@ static bool tcp_prune_ofo_queue(struct sock *sk)
 {
        struct tcp_sock *tp = tcp_sk(sk);
        struct rb_node *node, *prev;
+       int goal;
 
        if (RB_EMPTY_ROOT(&tp->out_of_order_queue))
                return false;
 
        NET_INC_STATS(sock_net(sk), LINUX_MIB_OFOPRUNED);
+       goal = sk->sk_rcvbuf >> 3;
        node = &tp->ooo_last_skb->rbnode;
        do {
                prev = rb_prev(node);
                rb_erase(node, &tp->out_of_order_queue);
+               goal -= rb_to_skb(node)->truesize;
                tcp_drop(sk, rb_to_skb(node));
-               sk_mem_reclaim(sk);
-               if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
-                   !tcp_under_memory_pressure(sk))
-                       break;
+               if (!prev || goal <= 0) {
+                       sk_mem_reclaim(sk);
+                       if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
+                           !tcp_under_memory_pressure(sk))
+                               break;
+                       goal = sk->sk_rcvbuf >> 3;
+               }
                node = prev;
        } while (node);
        tp->ooo_last_skb = rb_to_skb(prev);
@@ -4987,6 +5039,9 @@ static int tcp_prune_queue(struct sock *sk)
        else if (tcp_under_memory_pressure(sk))
                tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss);
 
+       if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf)
+               return 0;
+
        tcp_collapse_ofo_queue(sk);
        if (!skb_queue_empty(&sk->sk_receive_queue))
                tcp_collapse(sk, &sk->sk_receive_queue, NULL,
index bea17f1e8302585d70c1e0108ae1c33d149230d8..3b2711e33e4c7c06ed8caec20cf0241f36068f54 100644 (file)
@@ -156,11 +156,24 @@ int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
         */
        if (tcptw->tw_ts_recent_stamp &&
            (!twp || (reuse && get_seconds() - tcptw->tw_ts_recent_stamp > 1))) {
-               tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
-               if (tp->write_seq == 0)
-                       tp->write_seq = 1;
-               tp->rx_opt.ts_recent       = tcptw->tw_ts_recent;
-               tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
+               /* In case of repair and re-using TIME-WAIT sockets we still
+                * want to be sure that it is safe as above but honor the
+                * sequence numbers and time stamps set as part of the repair
+                * process.
+                *
+                * Without this check re-using a TIME-WAIT socket with TCP
+                * repair would accumulate a -1 on the repair assigned
+                * sequence number. The first time it is reused the sequence
+                * is -1, the second time -2, etc. This fixes that issue
+                * without appearing to create any others.
+                */
+               if (likely(!tp->repair)) {
+                       tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
+                       if (tp->write_seq == 0)
+                               tp->write_seq = 1;
+                       tp->rx_opt.ts_recent       = tcptw->tw_ts_recent;
+                       tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
+               }
                sock_hold(sktw);
                return 1;
        }
index 8e08b409c71e1f8e69422f1756d48b5bc55411c3..c4172c1fb198d4bcd1fcaace00308b3f86b0a843 100644 (file)
@@ -160,7 +160,8 @@ static void tcp_event_data_sent(struct tcp_sock *tp,
 }
 
 /* Account for an ACK we sent. */
-static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts)
+static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts,
+                                     u32 rcv_nxt)
 {
        struct tcp_sock *tp = tcp_sk(sk);
 
@@ -171,6 +172,9 @@ static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts)
                if (hrtimer_try_to_cancel(&tp->compressed_ack_timer) == 1)
                        __sock_put(sk);
        }
+
+       if (unlikely(rcv_nxt != tp->rcv_nxt))
+               return;  /* Special ACK sent by DCTCP to reflect ECN */
        tcp_dec_quickack_mode(sk, pkts);
        inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
 }
@@ -1023,8 +1027,8 @@ static void tcp_update_skb_after_send(struct tcp_sock *tp, struct sk_buff *skb)
  * We are working here with either a clone of the original
  * SKB, or a fresh unique copy made by the retransmit engine.
  */
-static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
-                           gfp_t gfp_mask)
+static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb,
+                             int clone_it, gfp_t gfp_mask, u32 rcv_nxt)
 {
        const struct inet_connection_sock *icsk = inet_csk(sk);
        struct inet_sock *inet;
@@ -1100,7 +1104,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
        th->source              = inet->inet_sport;
        th->dest                = inet->inet_dport;
        th->seq                 = htonl(tcb->seq);
-       th->ack_seq             = htonl(tp->rcv_nxt);
+       th->ack_seq             = htonl(rcv_nxt);
        *(((__be16 *)th) + 6)   = htons(((tcp_header_size >> 2) << 12) |
                                        tcb->tcp_flags);
 
@@ -1141,7 +1145,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
        icsk->icsk_af_ops->send_check(sk, skb);
 
        if (likely(tcb->tcp_flags & TCPHDR_ACK))
-               tcp_event_ack_sent(sk, tcp_skb_pcount(skb));
+               tcp_event_ack_sent(sk, tcp_skb_pcount(skb), rcv_nxt);
 
        if (skb->len != tcp_header_size) {
                tcp_event_data_sent(tp, sk);
@@ -1178,6 +1182,13 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
        return err;
 }
 
+static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
+                           gfp_t gfp_mask)
+{
+       return __tcp_transmit_skb(sk, skb, clone_it, gfp_mask,
+                                 tcp_sk(sk)->rcv_nxt);
+}
+
 /* This routine just queues the buffer for sending.
  *
  * NOTE: probe0 timer is not checked, do not forget tcp_push_pending_frames,
@@ -3523,8 +3534,6 @@ void tcp_send_delayed_ack(struct sock *sk)
        int ato = icsk->icsk_ack.ato;
        unsigned long timeout;
 
-       tcp_ca_event(sk, CA_EVENT_DELAYED_ACK);
-
        if (ato > TCP_DELACK_MIN) {
                const struct tcp_sock *tp = tcp_sk(sk);
                int max_ato = HZ / 2;
@@ -3573,7 +3582,7 @@ void tcp_send_delayed_ack(struct sock *sk)
 }
 
 /* This routine sends an ack and also updates the window. */
-void tcp_send_ack(struct sock *sk)
+void __tcp_send_ack(struct sock *sk, u32 rcv_nxt)
 {
        struct sk_buff *buff;
 
@@ -3581,8 +3590,6 @@ void tcp_send_ack(struct sock *sk)
        if (sk->sk_state == TCP_CLOSE)
                return;
 
-       tcp_ca_event(sk, CA_EVENT_NON_DELAYED_ACK);
-
        /* We are not putting this on the write queue, so
         * tcp_transmit_skb() will set the ownership to this
         * sock.
@@ -3608,9 +3615,14 @@ void tcp_send_ack(struct sock *sk)
        skb_set_tcp_pure_ack(buff);
 
        /* Send it off, this clears delayed acks for us. */
-       tcp_transmit_skb(sk, buff, 0, (__force gfp_t)0);
+       __tcp_transmit_skb(sk, buff, 0, (__force gfp_t)0, rcv_nxt);
+}
+EXPORT_SYMBOL_GPL(__tcp_send_ack);
+
+void tcp_send_ack(struct sock *sk)
+{
+       __tcp_send_ack(sk, tcp_sk(sk)->rcv_nxt);
 }
-EXPORT_SYMBOL_GPL(tcp_send_ack);
 
 /* This routine sends a packet with an out of date sequence
  * number. It assumes the other end will try to ack it.
index 9bb27df4dac5ec5f133b15e972f384bdc1d165b1..24e116ddae79ce0696e3f63290385ae15e28ac18 100644 (file)
@@ -2591,7 +2591,7 @@ int compat_udp_getsockopt(struct sock *sk, int level, int optname,
  *     udp_poll - wait for a UDP event.
  *     @file - file struct
  *     @sock - socket
- *     @events - events to wait for
+ *     @wait - poll table
  *
  *     This is same as datagram poll, except for the special case of
  *     blocking sockets. If application is using a blocking fd
@@ -2600,23 +2600,23 @@ int compat_udp_getsockopt(struct sock *sk, int level, int optname,
  *     but then block when reading it. Add special case code
  *     to work around these arguably broken applications.
  */
-__poll_t udp_poll_mask(struct socket *sock, __poll_t events)
+__poll_t udp_poll(struct file *file, struct socket *sock, poll_table *wait)
 {
-       __poll_t mask = datagram_poll_mask(sock, events);
+       __poll_t mask = datagram_poll(file, sock, wait);
        struct sock *sk = sock->sk;
 
        if (!skb_queue_empty(&udp_sk(sk)->reader_queue))
                mask |= EPOLLIN | EPOLLRDNORM;
 
        /* Check for false positives due to checksum errors */
-       if ((mask & EPOLLRDNORM) && !(sock->file->f_flags & O_NONBLOCK) &&
+       if ((mask & EPOLLRDNORM) && !(file->f_flags & O_NONBLOCK) &&
            !(sk->sk_shutdown & RCV_SHUTDOWN) && first_packet_length(sk) == -1)
                mask &= ~(EPOLLIN | EPOLLRDNORM);
 
        return mask;
 
 }
-EXPORT_SYMBOL(udp_poll_mask);
+EXPORT_SYMBOL(udp_poll);
 
 int udp_abort(struct sock *sk, int err)
 {
index 92dc9e5a7ff3d0a7509bfa2a66e9189c8341a5fa..69c54540d5b4f2664b78b56468b09e3c1f6ac888 100644 (file)
@@ -394,7 +394,7 @@ unflush:
 out_unlock:
        rcu_read_unlock();
 out:
-       NAPI_GRO_CB(skb)->flush |= flush;
+       skb_gro_flush_final(skb, pp, flush);
        return pp;
 }
 EXPORT_SYMBOL(udp_gro_receive);
index 0eff75525da101e4fce2798626a317366f94623f..b3885ca22d6fb7aa6165c2773ae02d9885099d8f 100644 (file)
@@ -108,6 +108,7 @@ config IPV6_MIP6
 config IPV6_ILA
        tristate "IPv6: Identifier Locator Addressing (ILA)"
        depends on NETFILTER
+       select DST_CACHE
        select LWTUNNEL
        ---help---
          Support for IPv6 Identifier Locator Addressing (ILA).
index c134286d6a4179516709570ad534d1ae26fd0bce..f66a1cae3366fe7b176c176027c2c7b9b39ec278 100644 (file)
@@ -2374,7 +2374,8 @@ static struct fib6_info *addrconf_get_prefix_route(const struct in6_addr *pfx,
                        continue;
                if ((rt->fib6_flags & noflags) != 0)
                        continue;
-               fib6_info_hold(rt);
+               if (!fib6_info_hold_safe(rt))
+                       continue;
                break;
        }
 out:
@@ -4528,6 +4529,7 @@ static int modify_prefix_route(struct inet6_ifaddr *ifp,
                               unsigned long expires, u32 flags)
 {
        struct fib6_info *f6i;
+       u32 prio;
 
        f6i = addrconf_get_prefix_route(&ifp->addr,
                                        ifp->prefix_len,
@@ -4536,13 +4538,15 @@ static int modify_prefix_route(struct inet6_ifaddr *ifp,
        if (!f6i)
                return -ENOENT;
 
-       if (f6i->fib6_metric != ifp->rt_priority) {
+       prio = ifp->rt_priority ? : IP6_RT_PRIO_ADDRCONF;
+       if (f6i->fib6_metric != prio) {
+               /* delete old one */
+               ip6_del_rt(dev_net(ifp->idev->dev), f6i);
+
                /* add new one */
                addrconf_prefix_route(&ifp->addr, ifp->prefix_len,
                                      ifp->rt_priority, ifp->idev->dev,
                                      expires, flags, GFP_KERNEL);
-               /* delete old one */
-               ip6_del_rt(dev_net(ifp->idev->dev), f6i);
        } else {
                if (!expires)
                        fib6_clean_expires(f6i);
index 74f2a261e8df4dc78a3baddb31609cdc70ba6035..9ed0eae91758f8506b4f6ca0fe3a9c2dc3fe1323 100644 (file)
@@ -570,7 +570,7 @@ const struct proto_ops inet6_stream_ops = {
        .socketpair        = sock_no_socketpair,        /* a do nothing */
        .accept            = inet_accept,               /* ok           */
        .getname           = inet6_getname,
-       .poll_mask         = tcp_poll_mask,             /* ok           */
+       .poll              = tcp_poll,                  /* ok           */
        .ioctl             = inet6_ioctl,               /* must change  */
        .listen            = inet_listen,               /* ok           */
        .shutdown          = inet_shutdown,             /* ok           */
@@ -603,7 +603,7 @@ const struct proto_ops inet6_dgram_ops = {
        .socketpair        = sock_no_socketpair,        /* a do nothing */
        .accept            = sock_no_accept,            /* a do nothing */
        .getname           = inet6_getname,
-       .poll_mask         = udp_poll_mask,             /* ok           */
+       .poll              = udp_poll,                  /* ok           */
        .ioctl             = inet6_ioctl,               /* must change  */
        .listen            = sock_no_listen,            /* ok           */
        .shutdown          = inet_shutdown,             /* ok           */
index 1323b9679cf718d0023bf5880dcd60fb8602d9db..1c0bb9fb76e61fa7d12317190ebac38847530858 100644 (file)
@@ -799,8 +799,7 @@ static int calipso_opt_update(struct sock *sk, struct ipv6_opt_hdr *hop)
 {
        struct ipv6_txoptions *old = txopt_get(inet6_sk(sk)), *txopts;
 
-       txopts = ipv6_renew_options_kern(sk, old, IPV6_HOPOPTS,
-                                        hop, hop ? ipv6_optlen(hop) : 0);
+       txopts = ipv6_renew_options(sk, old, IPV6_HOPOPTS, hop);
        txopt_put(old);
        if (IS_ERR(txopts))
                return PTR_ERR(txopts);
@@ -1222,8 +1221,7 @@ static int calipso_req_setattr(struct request_sock *req,
        if (IS_ERR(new))
                return PTR_ERR(new);
 
-       txopts = ipv6_renew_options_kern(sk, req_inet->ipv6_opt, IPV6_HOPOPTS,
-                                        new, new ? ipv6_optlen(new) : 0);
+       txopts = ipv6_renew_options(sk, req_inet->ipv6_opt, IPV6_HOPOPTS, new);
 
        kfree(new);
 
@@ -1260,8 +1258,7 @@ static void calipso_req_delattr(struct request_sock *req)
        if (calipso_opt_del(req_inet->ipv6_opt->hopopt, &new))
                return; /* Nothing to do */
 
-       txopts = ipv6_renew_options_kern(sk, req_inet->ipv6_opt, IPV6_HOPOPTS,
-                                        new, new ? ipv6_optlen(new) : 0);
+       txopts = ipv6_renew_options(sk, req_inet->ipv6_opt, IPV6_HOPOPTS, new);
 
        if (!IS_ERR(txopts)) {
                txopts = xchg(&req_inet->ipv6_opt, txopts);
index 2ee08b6a86a4881210f5a0c81206a64a562e5a56..1a1f876f8e282d636a13ae1f48c3f90a9f754bbc 100644 (file)
@@ -700,13 +700,16 @@ void ip6_datagram_recv_specific_ctl(struct sock *sk, struct msghdr *msg,
        }
        if (np->rxopt.bits.rxorigdstaddr) {
                struct sockaddr_in6 sin6;
-               __be16 *ports = (__be16 *) skb_transport_header(skb);
+               __be16 *ports;
+               int end;
 
-               if (skb_transport_offset(skb) + 4 <= (int)skb->len) {
+               end = skb_transport_offset(skb) + 4;
+               if (end <= 0 || pskb_may_pull(skb, end)) {
                        /* All current transport protocols have the port numbers in the
                         * first four bytes of the transport header and this function is
                         * written with this assumption in mind.
                         */
+                       ports = (__be16 *)skb_transport_header(skb);
 
                        sin6.sin6_family = AF_INET6;
                        sin6.sin6_addr = ipv6_hdr(skb)->daddr;
index 97513f35bcc584b276d83686ad249c6718ea4178..88a7579c23bdb3ae432a126bb0781493bc8d60b7 100644 (file)
@@ -669,8 +669,10 @@ skip_cow:
 
        sg_init_table(sg, nfrags);
        ret = skb_to_sgvec(skb, sg, 0, skb->len);
-       if (unlikely(ret < 0))
+       if (unlikely(ret < 0)) {
+               kfree(tmp);
                goto out;
+       }
 
        skb->ip_summed = CHECKSUM_NONE;
 
index 5bc2bf3733abd387de8d21932c95ef32eea30d80..20291c2036fcdcd23ccdc2f5b5ae2a1734b2833d 100644 (file)
@@ -1015,29 +1015,21 @@ ipv6_dup_options(struct sock *sk, struct ipv6_txoptions *opt)
 }
 EXPORT_SYMBOL_GPL(ipv6_dup_options);
 
-static int ipv6_renew_option(void *ohdr,
-                            struct ipv6_opt_hdr __user *newopt, int newoptlen,
-                            int inherit,
-                            struct ipv6_opt_hdr **hdr,
-                            char **p)
+static void ipv6_renew_option(int renewtype,
+                             struct ipv6_opt_hdr **dest,
+                             struct ipv6_opt_hdr *old,
+                             struct ipv6_opt_hdr *new,
+                             int newtype, char **p)
 {
-       if (inherit) {
-               if (ohdr) {
-                       memcpy(*p, ohdr, ipv6_optlen((struct ipv6_opt_hdr *)ohdr));
-                       *hdr = (struct ipv6_opt_hdr *)*p;
-                       *p += CMSG_ALIGN(ipv6_optlen(*hdr));
-               }
-       } else {
-               if (newopt) {
-                       if (copy_from_user(*p, newopt, newoptlen))
-                               return -EFAULT;
-                       *hdr = (struct ipv6_opt_hdr *)*p;
-                       if (ipv6_optlen(*hdr) > newoptlen)
-                               return -EINVAL;
-                       *p += CMSG_ALIGN(newoptlen);
-               }
-       }
-       return 0;
+       struct ipv6_opt_hdr *src;
+
+       src = (renewtype == newtype ? new : old);
+       if (!src)
+               return;
+
+       memcpy(*p, src, ipv6_optlen(src));
+       *dest = (struct ipv6_opt_hdr *)*p;
+       *p += CMSG_ALIGN(ipv6_optlen(*dest));
 }
 
 /**
@@ -1063,13 +1055,11 @@ static int ipv6_renew_option(void *ohdr,
  */
 struct ipv6_txoptions *
 ipv6_renew_options(struct sock *sk, struct ipv6_txoptions *opt,
-                  int newtype,
-                  struct ipv6_opt_hdr __user *newopt, int newoptlen)
+                  int newtype, struct ipv6_opt_hdr *newopt)
 {
        int tot_len = 0;
        char *p;
        struct ipv6_txoptions *opt2;
-       int err;
 
        if (opt) {
                if (newtype != IPV6_HOPOPTS && opt->hopopt)
@@ -1082,8 +1072,8 @@ ipv6_renew_options(struct sock *sk, struct ipv6_txoptions *opt,
                        tot_len += CMSG_ALIGN(ipv6_optlen(opt->dst1opt));
        }
 
-       if (newopt && newoptlen)
-               tot_len += CMSG_ALIGN(newoptlen);
+       if (newopt)
+               tot_len += CMSG_ALIGN(ipv6_optlen(newopt));
 
        if (!tot_len)
                return NULL;
@@ -1098,29 +1088,19 @@ ipv6_renew_options(struct sock *sk, struct ipv6_txoptions *opt,
        opt2->tot_len = tot_len;
        p = (char *)(opt2 + 1);
 
-       err = ipv6_renew_option(opt ? opt->hopopt : NULL, newopt, newoptlen,
-                               newtype != IPV6_HOPOPTS,
-                               &opt2->hopopt, &p);
-       if (err)
-               goto out;
-
-       err = ipv6_renew_option(opt ? opt->dst0opt : NULL, newopt, newoptlen,
-                               newtype != IPV6_RTHDRDSTOPTS,
-                               &opt2->dst0opt, &p);
-       if (err)
-               goto out;
-
-       err = ipv6_renew_option(opt ? opt->srcrt : NULL, newopt, newoptlen,
-                               newtype != IPV6_RTHDR,
-                               (struct ipv6_opt_hdr **)&opt2->srcrt, &p);
-       if (err)
-               goto out;
-
-       err = ipv6_renew_option(opt ? opt->dst1opt : NULL, newopt, newoptlen,
-                               newtype != IPV6_DSTOPTS,
-                               &opt2->dst1opt, &p);
-       if (err)
-               goto out;
+       ipv6_renew_option(IPV6_HOPOPTS, &opt2->hopopt,
+                         (opt ? opt->hopopt : NULL),
+                         newopt, newtype, &p);
+       ipv6_renew_option(IPV6_RTHDRDSTOPTS, &opt2->dst0opt,
+                         (opt ? opt->dst0opt : NULL),
+                         newopt, newtype, &p);
+       ipv6_renew_option(IPV6_RTHDR,
+                         (struct ipv6_opt_hdr **)&opt2->srcrt,
+                         (opt ? (struct ipv6_opt_hdr *)opt->srcrt : NULL),
+                         newopt, newtype, &p);
+       ipv6_renew_option(IPV6_DSTOPTS, &opt2->dst1opt,
+                         (opt ? opt->dst1opt : NULL),
+                         newopt, newtype, &p);
 
        opt2->opt_nflen = (opt2->hopopt ? ipv6_optlen(opt2->hopopt) : 0) +
                          (opt2->dst0opt ? ipv6_optlen(opt2->dst0opt) : 0) +
@@ -1128,37 +1108,6 @@ ipv6_renew_options(struct sock *sk, struct ipv6_txoptions *opt,
        opt2->opt_flen = (opt2->dst1opt ? ipv6_optlen(opt2->dst1opt) : 0);
 
        return opt2;
-out:
-       sock_kfree_s(sk, opt2, opt2->tot_len);
-       return ERR_PTR(err);
-}
-
-/**
- * ipv6_renew_options_kern - replace a specific ext hdr with a new one.
- *
- * @sk: sock from which to allocate memory
- * @opt: original options
- * @newtype: option type to replace in @opt
- * @newopt: new option of type @newtype to replace (kernel-mem)
- * @newoptlen: length of @newopt
- *
- * See ipv6_renew_options().  The difference is that @newopt is
- * kernel memory, rather than user memory.
- */
-struct ipv6_txoptions *
-ipv6_renew_options_kern(struct sock *sk, struct ipv6_txoptions *opt,
-                       int newtype, struct ipv6_opt_hdr *newopt,
-                       int newoptlen)
-{
-       struct ipv6_txoptions *ret_val;
-       const mm_segment_t old_fs = get_fs();
-
-       set_fs(KERNEL_DS);
-       ret_val = ipv6_renew_options(sk, opt, newtype,
-                                    (struct ipv6_opt_hdr __user *)newopt,
-                                    newoptlen);
-       set_fs(old_fs);
-       return ret_val;
 }
 
 struct ipv6_txoptions *ipv6_fixup_options(struct ipv6_txoptions *opt_space,
index be491bf6ab6e9ff4d1a9d84bc78c4582f4fe8e01..ef2505aefc159d9a5a3fc544179bc5d086377dd2 100644 (file)
@@ -402,9 +402,10 @@ static int icmp6_iif(const struct sk_buff *skb)
 
        /* for local traffic to local address, skb dev is the loopback
         * device. Check if there is a dst attached to the skb and if so
-        * get the real device index.
+        * get the real device index. Same is needed for replies to a link
+        * local address on a device enslaved to an L3 master device
         */
-       if (unlikely(iif == LOOPBACK_IFINDEX)) {
+       if (unlikely(iif == LOOPBACK_IFINDEX || netif_is_l3_master(skb->dev))) {
                const struct rt6_info *rt6 = skb_rt6_info(skb);
 
                if (rt6)
index 2febe26de6a150155e269da0c38e5cb1122aca8d..595ad408dba09184eb814eee1870e04c17b79f77 100644 (file)
@@ -113,9 +113,9 @@ static inline int compute_score(struct sock *sk, struct net *net,
                        bool dev_match = (sk->sk_bound_dev_if == dif ||
                                          sk->sk_bound_dev_if == sdif);
 
-                       if (exact_dif && !dev_match)
+                       if (!dev_match)
                                return -1;
-                       if (sk->sk_bound_dev_if && dev_match)
+                       if (sk->sk_bound_dev_if)
                                score++;
                }
                if (sk->sk_incoming_cpu == raw_smp_processor_id())
index 39d1d487eca25faceacbc3619fc6c4c38088d62a..d212738e9d100d4e3270f9188466da6b8a3d186c 100644 (file)
@@ -167,8 +167,9 @@ struct fib6_info *fib6_info_alloc(gfp_t gfp_flags)
        return f6i;
 }
 
-void fib6_info_destroy(struct fib6_info *f6i)
+void fib6_info_destroy_rcu(struct rcu_head *head)
 {
+       struct fib6_info *f6i = container_of(head, struct fib6_info, rcu);
        struct rt6_exception_bucket *bucket;
        struct dst_metrics *m;
 
@@ -206,7 +207,7 @@ void fib6_info_destroy(struct fib6_info *f6i)
 
        kfree(f6i);
 }
-EXPORT_SYMBOL_GPL(fib6_info_destroy);
+EXPORT_SYMBOL_GPL(fib6_info_destroy_rcu);
 
 static struct fib6_node *node_alloc(struct net *net)
 {
@@ -934,20 +935,19 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct fib6_info *rt,
 {
        struct fib6_info *leaf = rcu_dereference_protected(fn->leaf,
                                    lockdep_is_held(&rt->fib6_table->tb6_lock));
-       enum fib_event_type event = FIB_EVENT_ENTRY_ADD;
-       struct fib6_info *iter = NULL, *match = NULL;
+       struct fib6_info *iter = NULL;
        struct fib6_info __rcu **ins;
+       struct fib6_info __rcu **fallback_ins = NULL;
        int replace = (info->nlh &&
                       (info->nlh->nlmsg_flags & NLM_F_REPLACE));
-       int append = (info->nlh &&
-                      (info->nlh->nlmsg_flags & NLM_F_APPEND));
        int add = (!info->nlh ||
                   (info->nlh->nlmsg_flags & NLM_F_CREATE));
        int found = 0;
+       bool rt_can_ecmp = rt6_qualify_for_ecmp(rt);
        u16 nlflags = NLM_F_EXCL;
        int err;
 
-       if (append)
+       if (info->nlh && (info->nlh->nlmsg_flags & NLM_F_APPEND))
                nlflags |= NLM_F_APPEND;
 
        ins = &fn->leaf;
@@ -969,8 +969,13 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct fib6_info *rt,
 
                        nlflags &= ~NLM_F_EXCL;
                        if (replace) {
-                               found++;
-                               break;
+                               if (rt_can_ecmp == rt6_qualify_for_ecmp(iter)) {
+                                       found++;
+                                       break;
+                               }
+                               if (rt_can_ecmp)
+                                       fallback_ins = fallback_ins ?: ins;
+                               goto next_iter;
                        }
 
                        if (rt6_duplicate_nexthop(iter, rt)) {
@@ -985,51 +990,71 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct fib6_info *rt,
                                fib6_metric_set(iter, RTAX_MTU, rt->fib6_pmtu);
                                return -EEXIST;
                        }
-
-                       /* first route that matches */
-                       if (!match)
-                               match = iter;
+                       /* If we have the same destination and the same metric,
+                        * but not the same gateway, then the route we try to
+                        * add is sibling to this route, increment our counter
+                        * of siblings, and later we will add our route to the
+                        * list.
+                        * Only static routes (which don't have flag
+                        * RTF_EXPIRES) are used for ECMPv6.
+                        *
+                        * To avoid long list, we only had siblings if the
+                        * route have a gateway.
+                        */
+                       if (rt_can_ecmp &&
+                           rt6_qualify_for_ecmp(iter))
+                               rt->fib6_nsiblings++;
                }
 
                if (iter->fib6_metric > rt->fib6_metric)
                        break;
 
+next_iter:
                ins = &iter->fib6_next;
        }
 
+       if (fallback_ins && !found) {
+               /* No ECMP-able route found, replace first non-ECMP one */
+               ins = fallback_ins;
+               iter = rcu_dereference_protected(*ins,
+                                   lockdep_is_held(&rt->fib6_table->tb6_lock));
+               found++;
+       }
+
        /* Reset round-robin state, if necessary */
        if (ins == &fn->leaf)
                fn->rr_ptr = NULL;
 
        /* Link this route to others same route. */
-       if (append && match) {
+       if (rt->fib6_nsiblings) {
+               unsigned int fib6_nsiblings;
                struct fib6_info *sibling, *temp_sibling;
 
-               if (rt->fib6_flags & RTF_REJECT) {
-                       NL_SET_ERR_MSG(extack,
-                                      "Can not append a REJECT route");
-                       return -EINVAL;
-               } else if (match->fib6_flags & RTF_REJECT) {
-                       NL_SET_ERR_MSG(extack,
-                                      "Can not append to a REJECT route");
-                       return -EINVAL;
+               /* Find the first route that have the same metric */
+               sibling = leaf;
+               while (sibling) {
+                       if (sibling->fib6_metric == rt->fib6_metric &&
+                           rt6_qualify_for_ecmp(sibling)) {
+                               list_add_tail(&rt->fib6_siblings,
+                                             &sibling->fib6_siblings);
+                               break;
+                       }
+                       sibling = rcu_dereference_protected(sibling->fib6_next,
+                                   lockdep_is_held(&rt->fib6_table->tb6_lock));
                }
-               event = FIB_EVENT_ENTRY_APPEND;
-               rt->fib6_nsiblings = match->fib6_nsiblings;
-               list_add_tail(&rt->fib6_siblings, &match->fib6_siblings);
-               match->fib6_nsiblings++;
-
                /* For each sibling in the list, increment the counter of
                 * siblings. BUG() if counters does not match, list of siblings
                 * is broken!
                 */
+               fib6_nsiblings = 0;
                list_for_each_entry_safe(sibling, temp_sibling,
-                                        &match->fib6_siblings, fib6_siblings) {
+                                        &rt->fib6_siblings, fib6_siblings) {
                        sibling->fib6_nsiblings++;
-                       BUG_ON(sibling->fib6_nsiblings != match->fib6_nsiblings);
+                       BUG_ON(sibling->fib6_nsiblings != rt->fib6_nsiblings);
+                       fib6_nsiblings++;
                }
-
-               rt6_multipath_rebalance(match);
+               BUG_ON(fib6_nsiblings != rt->fib6_nsiblings);
+               rt6_multipath_rebalance(temp_sibling);
        }
 
        /*
@@ -1042,8 +1067,9 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct fib6_info *rt,
 add:
                nlflags |= NLM_F_CREATE;
 
-               err = call_fib6_entry_notifiers(info->nl_net, event, rt,
-                                               extack);
+               err = call_fib6_entry_notifiers(info->nl_net,
+                                               FIB_EVENT_ENTRY_ADD,
+                                               rt, extack);
                if (err)
                        return err;
 
@@ -1061,7 +1087,7 @@ add:
                }
 
        } else {
-               struct fib6_info *tmp;
+               int nsiblings;
 
                if (!found) {
                        if (add)
@@ -1076,57 +1102,48 @@ add:
                if (err)
                        return err;
 
-               /* if route being replaced has siblings, set tmp to
-                * last one, otherwise tmp is current route. this is
-                * used to set fib6_next for new route
-                */
-               if (iter->fib6_nsiblings)
-                       tmp = list_last_entry(&iter->fib6_siblings,
-                                             struct fib6_info,
-                                             fib6_siblings);
-               else
-                       tmp = iter;
-
-               /* insert new route */
                atomic_inc(&rt->fib6_ref);
                rcu_assign_pointer(rt->fib6_node, fn);
-               rt->fib6_next = tmp->fib6_next;
+               rt->fib6_next = iter->fib6_next;
                rcu_assign_pointer(*ins, rt);
-
                if (!info->skip_notify)
                        inet6_rt_notify(RTM_NEWROUTE, rt, info, NLM_F_REPLACE);
                if (!(fn->fn_flags & RTN_RTINFO)) {
                        info->nl_net->ipv6.rt6_stats->fib_route_nodes++;
                        fn->fn_flags |= RTN_RTINFO;
                }
+               nsiblings = iter->fib6_nsiblings;
+               iter->fib6_node = NULL;
+               fib6_purge_rt(iter, fn, info->nl_net);
+               if (rcu_access_pointer(fn->rr_ptr) == iter)
+                       fn->rr_ptr = NULL;
+               fib6_info_release(iter);
 
-               /* delete old route */
-               rt = iter;
-
-               if (rt->fib6_nsiblings) {
-                       struct fib6_info *tmp;
-
+               if (nsiblings) {
                        /* Replacing an ECMP route, remove all siblings */
-                       list_for_each_entry_safe(iter, tmp, &rt->fib6_siblings,
-                                                fib6_siblings) {
-                               iter->fib6_node = NULL;
-                               fib6_purge_rt(iter, fn, info->nl_net);
-                               if (rcu_access_pointer(fn->rr_ptr) == iter)
-                                       fn->rr_ptr = NULL;
-                               fib6_info_release(iter);
-
-                               rt->fib6_nsiblings--;
-                               info->nl_net->ipv6.rt6_stats->fib_rt_entries--;
+                       ins = &rt->fib6_next;
+                       iter = rcu_dereference_protected(*ins,
+                                   lockdep_is_held(&rt->fib6_table->tb6_lock));
+                       while (iter) {
+                               if (iter->fib6_metric > rt->fib6_metric)
+                                       break;
+                               if (rt6_qualify_for_ecmp(iter)) {
+                                       *ins = iter->fib6_next;
+                                       iter->fib6_node = NULL;
+                                       fib6_purge_rt(iter, fn, info->nl_net);
+                                       if (rcu_access_pointer(fn->rr_ptr) == iter)
+                                               fn->rr_ptr = NULL;
+                                       fib6_info_release(iter);
+                                       nsiblings--;
+                                       info->nl_net->ipv6.rt6_stats->fib_rt_entries--;
+                               } else {
+                                       ins = &iter->fib6_next;
+                               }
+                               iter = rcu_dereference_protected(*ins,
+                                       lockdep_is_held(&rt->fib6_table->tb6_lock));
                        }
+                       WARN_ON(nsiblings != 0);
                }
-
-               WARN_ON(rt->fib6_nsiblings != 0);
-
-               rt->fib6_node = NULL;
-               fib6_purge_rt(rt, fn, info->nl_net);
-               if (rcu_access_pointer(fn->rr_ptr) == rt)
-                       fn->rr_ptr = NULL;
-               fib6_info_release(rt);
        }
 
        return 0;
index c8cf2fdbb13b88cc1bf6b494a75407cdc16977eb..cd2cfb04e5d82010a5eb1800a53fc8007479c6f9 100644 (file)
@@ -927,7 +927,6 @@ tx_err:
 static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
                                         struct net_device *dev)
 {
-       struct ipv6hdr *ipv6h = ipv6_hdr(skb);
        struct ip6_tnl *t = netdev_priv(dev);
        struct dst_entry *dst = skb_dst(skb);
        struct net_device_stats *stats;
@@ -1010,6 +1009,8 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
                        goto tx_err;
                }
        } else {
+               struct ipv6hdr *ipv6h = ipv6_hdr(skb);
+
                switch (skb->protocol) {
                case htons(ETH_P_IP):
                        memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
index 021e5aef6ba31b7a9face6eb363a6409761385a7..3168847c30d1d4a0021b7effc8653befce1d4d22 100644 (file)
@@ -570,6 +570,8 @@ static void ip6_copy_metadata(struct sk_buff *to, struct sk_buff *from)
        to->dev = from->dev;
        to->mark = from->mark;
 
+       skb_copy_hash(to, from);
+
 #ifdef CONFIG_NET_SCHED
        to->tc_index = from->tc_index;
 #endif
@@ -1219,7 +1221,8 @@ static int ip6_setup_cork(struct sock *sk, struct inet_cork_full *cork,
        if (mtu < IPV6_MIN_MTU)
                return -EINVAL;
        cork->base.fragsize = mtu;
-       cork->base.gso_size = sk->sk_type == SOCK_DGRAM ? ipc6->gso_size : 0;
+       cork->base.gso_size = sk->sk_type == SOCK_DGRAM &&
+                             sk->sk_protocol == IPPROTO_UDP ? ipc6->gso_size : 0;
 
        if (dst_allfrag(xfrm_dst_path(&rt->dst)))
                cork->base.flags |= IPCORK_ALLFRAG;
index 00e138a44cbba2e7c03cb003f5823b42e18a923a..1cc9650af9fbc0bf3f49d2bd4258f9b3d1e5790c 100644 (file)
@@ -1133,12 +1133,8 @@ route_lookup:
                max_headroom += 8;
                mtu -= 8;
        }
-       if (skb->protocol == htons(ETH_P_IPV6)) {
-               if (mtu < IPV6_MIN_MTU)
-                       mtu = IPV6_MIN_MTU;
-       } else if (mtu < 576) {
-               mtu = 576;
-       }
+       mtu = max(mtu, skb->protocol == htons(ETH_P_IPV6) ?
+                      IPV6_MIN_MTU : IPV4_MIN_MTU);
 
        skb_dst_update_pmtu(skb, mtu);
        if (skb->len - t->tun_hlen - eth_hlen > mtu && !skb_is_gso(skb)) {
index b7f28deddaeaf8aa35f23da3256f10440d84af85..c72ae3a4fe0978b314a0603be59a6668705ce75c 100644 (file)
@@ -480,10 +480,6 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
                goto tx_err_dst_release;
        }
 
-       skb_scrub_packet(skb, !net_eq(t->net, dev_net(dev)));
-       skb_dst_set(skb, dst);
-       skb->dev = skb_dst(skb)->dev;
-
        mtu = dst_mtu(dst);
        if (!skb->ignore_df && skb->len > mtu) {
                skb_dst_update_pmtu(skb, mtu);
@@ -498,9 +494,14 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
                                  htonl(mtu));
                }
 
-               return -EMSGSIZE;
+               err = -EMSGSIZE;
+               goto tx_err_dst_release;
        }
 
+       skb_scrub_packet(skb, !net_eq(t->net, dev_net(dev)));
+       skb_dst_set(skb, dst);
+       skb->dev = skb_dst(skb)->dev;
+
        err = dst_output(t->net, skb->sk, skb);
        if (net_xmit_eval(err) == 0) {
                struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats);
index 4d780c7f013060732dda2db760d7ba0474c812e3..568ca4187cd101e745988ee262f79431ef8d28cc 100644 (file)
@@ -398,6 +398,12 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
        case IPV6_DSTOPTS:
        {
                struct ipv6_txoptions *opt;
+               struct ipv6_opt_hdr *new = NULL;
+
+               /* hop-by-hop / destination options are privileged option */
+               retv = -EPERM;
+               if (optname != IPV6_RTHDR && !ns_capable(net->user_ns, CAP_NET_RAW))
+                       break;
 
                /* remove any sticky options header with a zero option
                 * length, per RFC3542.
@@ -409,17 +415,22 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
                else if (optlen < sizeof(struct ipv6_opt_hdr) ||
                         optlen & 0x7 || optlen > 8 * 255)
                        goto e_inval;
-
-               /* hop-by-hop / destination options are privileged option */
-               retv = -EPERM;
-               if (optname != IPV6_RTHDR && !ns_capable(net->user_ns, CAP_NET_RAW))
-                       break;
+               else {
+                       new = memdup_user(optval, optlen);
+                       if (IS_ERR(new)) {
+                               retv = PTR_ERR(new);
+                               break;
+                       }
+                       if (unlikely(ipv6_optlen(new) > optlen)) {
+                               kfree(new);
+                               goto e_inval;
+                       }
+               }
 
                opt = rcu_dereference_protected(np->opt,
                                                lockdep_sock_is_held(sk));
-               opt = ipv6_renew_options(sk, opt, optname,
-                                        (struct ipv6_opt_hdr __user *)optval,
-                                        optlen);
+               opt = ipv6_renew_options(sk, opt, optname, new);
+               kfree(new);
                if (IS_ERR(opt)) {
                        retv = PTR_ERR(opt);
                        break;
@@ -718,8 +729,9 @@ done:
                        struct sockaddr_in6 *psin6;
 
                        psin6 = (struct sockaddr_in6 *)&greqs.gsr_group;
-                       retv = ipv6_sock_mc_join(sk, greqs.gsr_interface,
-                                                &psin6->sin6_addr);
+                       retv = ipv6_sock_mc_join_ssm(sk, greqs.gsr_interface,
+                                                    &psin6->sin6_addr,
+                                                    MCAST_INCLUDE);
                        /* prior join w/ different source is ok */
                        if (retv && retv != -EADDRINUSE)
                                break;
index 975021df7c1cf2eae6897e3dd57ea20998f4ea90..f60f310785fd6989ac37dfd05a35c60e58b7986a 100644 (file)
@@ -95,6 +95,8 @@ static int ip6_mc_add_src(struct inet6_dev *idev, const struct in6_addr *pmca,
                          int delta);
 static int ip6_mc_leave_src(struct sock *sk, struct ipv6_mc_socklist *iml,
                            struct inet6_dev *idev);
+static int __ipv6_dev_mc_inc(struct net_device *dev,
+                            const struct in6_addr *addr, unsigned int mode);
 
 #define MLD_QRV_DEFAULT                2
 /* RFC3810, 9.2. Query Interval */
@@ -132,7 +134,8 @@ static int unsolicited_report_interval(struct inet6_dev *idev)
        return iv > 0 ? iv : 1;
 }
 
-int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
+static int __ipv6_sock_mc_join(struct sock *sk, int ifindex,
+                              const struct in6_addr *addr, unsigned int mode)
 {
        struct net_device *dev = NULL;
        struct ipv6_mc_socklist *mc_lst;
@@ -179,7 +182,7 @@ int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
        }
 
        mc_lst->ifindex = dev->ifindex;
-       mc_lst->sfmode = MCAST_EXCLUDE;
+       mc_lst->sfmode = mode;
        rwlock_init(&mc_lst->sflock);
        mc_lst->sflist = NULL;
 
@@ -187,7 +190,7 @@ int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
         *      now add/increase the group membership on the device
         */
 
-       err = ipv6_dev_mc_inc(dev, addr);
+       err = __ipv6_dev_mc_inc(dev, addr, mode);
 
        if (err) {
                sock_kfree_s(sk, mc_lst, sizeof(*mc_lst));
@@ -199,8 +202,19 @@ int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
 
        return 0;
 }
+
+int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
+{
+       return __ipv6_sock_mc_join(sk, ifindex, addr, MCAST_EXCLUDE);
+}
 EXPORT_SYMBOL(ipv6_sock_mc_join);
 
+int ipv6_sock_mc_join_ssm(struct sock *sk, int ifindex,
+                         const struct in6_addr *addr, unsigned int mode)
+{
+       return __ipv6_sock_mc_join(sk, ifindex, addr, mode);
+}
+
 /*
  *     socket leave on multicast group
  */
@@ -646,7 +660,7 @@ bool inet6_mc_check(struct sock *sk, const struct in6_addr *mc_addr,
        return rv;
 }
 
-static void igmp6_group_added(struct ifmcaddr6 *mc)
+static void igmp6_group_added(struct ifmcaddr6 *mc, unsigned int mode)
 {
        struct net_device *dev = mc->idev->dev;
        char buf[MAX_ADDR_LEN];
@@ -672,7 +686,13 @@ static void igmp6_group_added(struct ifmcaddr6 *mc)
        }
        /* else v2 */
 
-       mc->mca_crcount = mc->idev->mc_qrv;
+       /* Based on RFC3810 6.1, for newly added INCLUDE SSM, we
+        * should not send filter-mode change record as the mode
+        * should be from IN() to IN(A).
+        */
+       if (mode == MCAST_EXCLUDE)
+               mc->mca_crcount = mc->idev->mc_qrv;
+
        mld_ifc_event(mc->idev);
 }
 
@@ -770,13 +790,13 @@ static void mld_del_delrec(struct inet6_dev *idev, struct ifmcaddr6 *im)
        spin_lock_bh(&im->mca_lock);
        if (pmc) {
                im->idev = pmc->idev;
-               im->mca_crcount = idev->mc_qrv;
-               im->mca_sfmode = pmc->mca_sfmode;
-               if (pmc->mca_sfmode == MCAST_INCLUDE) {
+               if (im->mca_sfmode == MCAST_INCLUDE) {
                        im->mca_tomb = pmc->mca_tomb;
                        im->mca_sources = pmc->mca_sources;
                        for (psf = im->mca_sources; psf; psf = psf->sf_next)
-                               psf->sf_crcount = im->mca_crcount;
+                               psf->sf_crcount = idev->mc_qrv;
+               } else {
+                       im->mca_crcount = idev->mc_qrv;
                }
                in6_dev_put(pmc->idev);
                kfree(pmc);
@@ -831,7 +851,8 @@ static void ma_put(struct ifmcaddr6 *mc)
 }
 
 static struct ifmcaddr6 *mca_alloc(struct inet6_dev *idev,
-                                  const struct in6_addr *addr)
+                                  const struct in6_addr *addr,
+                                  unsigned int mode)
 {
        struct ifmcaddr6 *mc;
 
@@ -849,9 +870,8 @@ static struct ifmcaddr6 *mca_alloc(struct inet6_dev *idev,
        refcount_set(&mc->mca_refcnt, 1);
        spin_lock_init(&mc->mca_lock);
 
-       /* initial mode is (EX, empty) */
-       mc->mca_sfmode = MCAST_EXCLUDE;
-       mc->mca_sfcount[MCAST_EXCLUDE] = 1;
+       mc->mca_sfmode = mode;
+       mc->mca_sfcount[mode] = 1;
 
        if (ipv6_addr_is_ll_all_nodes(&mc->mca_addr) ||
            IPV6_ADDR_MC_SCOPE(&mc->mca_addr) < IPV6_ADDR_SCOPE_LINKLOCAL)
@@ -863,7 +883,8 @@ static struct ifmcaddr6 *mca_alloc(struct inet6_dev *idev,
 /*
  *     device multicast group inc (add if not found)
  */
-int ipv6_dev_mc_inc(struct net_device *dev, const struct in6_addr *addr)
+static int __ipv6_dev_mc_inc(struct net_device *dev,
+                            const struct in6_addr *addr, unsigned int mode)
 {
        struct ifmcaddr6 *mc;
        struct inet6_dev *idev;
@@ -887,14 +908,13 @@ int ipv6_dev_mc_inc(struct net_device *dev, const struct in6_addr *addr)
                if (ipv6_addr_equal(&mc->mca_addr, addr)) {
                        mc->mca_users++;
                        write_unlock_bh(&idev->lock);
-                       ip6_mc_add_src(idev, &mc->mca_addr, MCAST_EXCLUDE, 0,
-                               NULL, 0);
+                       ip6_mc_add_src(idev, &mc->mca_addr, mode, 0, NULL, 0);
                        in6_dev_put(idev);
                        return 0;
                }
        }
 
-       mc = mca_alloc(idev, addr);
+       mc = mca_alloc(idev, addr, mode);
        if (!mc) {
                write_unlock_bh(&idev->lock);
                in6_dev_put(idev);
@@ -911,11 +931,16 @@ int ipv6_dev_mc_inc(struct net_device *dev, const struct in6_addr *addr)
        write_unlock_bh(&idev->lock);
 
        mld_del_delrec(idev, mc);
-       igmp6_group_added(mc);
+       igmp6_group_added(mc, mode);
        ma_put(mc);
        return 0;
 }
 
+int ipv6_dev_mc_inc(struct net_device *dev, const struct in6_addr *addr)
+{
+       return __ipv6_dev_mc_inc(dev, addr, MCAST_EXCLUDE);
+}
+
 /*
  *     device multicast group del
  */
@@ -1751,7 +1776,7 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc,
 
                psf_next = psf->sf_next;
 
-               if (!is_in(pmc, psf, type, gdeleted, sdeleted)) {
+               if (!is_in(pmc, psf, type, gdeleted, sdeleted) && !crsend) {
                        psf_prev = psf;
                        continue;
                }
@@ -2066,7 +2091,7 @@ static void mld_send_initial_cr(struct inet6_dev *idev)
                if (pmc->mca_sfcount[MCAST_EXCLUDE])
                        type = MLD2_CHANGE_TO_EXCLUDE;
                else
-                       type = MLD2_CHANGE_TO_INCLUDE;
+                       type = MLD2_ALLOW_NEW_SOURCES;
                skb = add_grec(skb, pmc, type, 0, 0, 1);
                spin_unlock_bh(&pmc->mca_lock);
        }
@@ -2082,7 +2107,8 @@ void ipv6_mc_dad_complete(struct inet6_dev *idev)
                mld_send_initial_cr(idev);
                idev->mc_dad_count--;
                if (idev->mc_dad_count)
-                       mld_dad_start_timer(idev, idev->mc_maxdelay);
+                       mld_dad_start_timer(idev,
+                                           unsolicited_report_interval(idev));
        }
 }
 
@@ -2094,7 +2120,8 @@ static void mld_dad_timer_expire(struct timer_list *t)
        if (idev->mc_dad_count) {
                idev->mc_dad_count--;
                if (idev->mc_dad_count)
-                       mld_dad_start_timer(idev, idev->mc_maxdelay);
+                       mld_dad_start_timer(idev,
+                                           unsolicited_report_interval(idev));
        }
        in6_dev_put(idev);
 }
@@ -2452,7 +2479,8 @@ static void mld_ifc_timer_expire(struct timer_list *t)
        if (idev->mc_ifc_count) {
                idev->mc_ifc_count--;
                if (idev->mc_ifc_count)
-                       mld_ifc_start_timer(idev, idev->mc_maxdelay);
+                       mld_ifc_start_timer(idev,
+                                           unsolicited_report_interval(idev));
        }
        in6_dev_put(idev);
 }
@@ -2543,7 +2571,7 @@ void ipv6_mc_up(struct inet6_dev *idev)
        ipv6_mc_reset(idev);
        for (i = idev->mc_list; i; i = i->next) {
                mld_del_delrec(idev, i);
-               igmp6_group_added(i);
+               igmp6_group_added(i, i->mca_sfmode);
        }
        read_unlock_bh(&idev->lock);
 }
index e640d2f3c55cf00568ba195a5f667a6da616ca47..0ec273997d1dc6eff71f62c66bbe214e369ab8f9 100644 (file)
@@ -811,7 +811,7 @@ static void ndisc_recv_ns(struct sk_buff *skb)
                        return;
                }
        }
-       if (ndopts.nd_opts_nonce)
+       if (ndopts.nd_opts_nonce && ndopts.nd_opts_nonce->nd_opt_len == 1)
                memcpy(&nonce, (u8 *)(ndopts.nd_opts_nonce + 1), 6);
 
        inc = ipv6_addr_is_multicast(daddr);
index 7eab959734bc736cc103551fb50bce84f9aeaec7..daf2e9e9193d19f8f89890f96ca0439d8d55c1c6 100644 (file)
@@ -1909,6 +1909,7 @@ static struct xt_match ip6t_builtin_mt[] __read_mostly = {
                .checkentry = icmp6_checkentry,
                .proto      = IPPROTO_ICMPV6,
                .family     = NFPROTO_IPV6,
+               .me         = THIS_MODULE,
        },
 };
 
index 5e0332014c1738999e680c1853829f384e880284..e4d9e6976d3c295e68b13c0ceecd5fa76db4fbc1 100644 (file)
@@ -107,7 +107,7 @@ static int nf_ct_frag6_sysctl_register(struct net *net)
        if (hdr == NULL)
                goto err_reg;
 
-       net->nf_frag.sysctl.frags_hdr = hdr;
+       net->nf_frag_frags_hdr = hdr;
        return 0;
 
 err_reg:
@@ -121,8 +121,8 @@ static void __net_exit nf_ct_frags6_sysctl_unregister(struct net *net)
 {
        struct ctl_table *table;
 
-       table = net->nf_frag.sysctl.frags_hdr->ctl_table_arg;
-       unregister_net_sysctl_table(net->nf_frag.sysctl.frags_hdr);
+       table = net->nf_frag_frags_hdr->ctl_table_arg;
+       unregister_net_sysctl_table(net->nf_frag_frags_hdr);
        if (!net_eq(net, &init_net))
                kfree(table);
 }
@@ -585,6 +585,8 @@ int nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user)
            fq->q.meat == fq->q.len &&
            nf_ct_frag6_reasm(fq, skb, dev))
                ret = 0;
+       else
+               skb_dst_drop(skb);
 
 out_unlock:
        spin_unlock_bh(&fq->q.lock);
index bf1d6c421e3bd0d5524559d507eb14ce9874496f..5dfd33af64515518a2f94b13a62a8ae4dce846da 100644 (file)
@@ -55,7 +55,7 @@ nf_tproxy_handle_time_wait6(struct sk_buff *skb, int tproto, int thoff,
                 * to a listener socket if there's one */
                struct sock *sk2;
 
-               sk2 = nf_tproxy_get_sock_v6(net, skb, thoff, hp, tproto,
+               sk2 = nf_tproxy_get_sock_v6(net, skb, thoff, tproto,
                                            &iph->saddr,
                                            nf_tproxy_laddr6(skb, laddr, &iph->daddr),
                                            hp->source,
@@ -72,7 +72,7 @@ nf_tproxy_handle_time_wait6(struct sk_buff *skb, int tproto, int thoff,
 EXPORT_SYMBOL_GPL(nf_tproxy_handle_time_wait6);
 
 struct sock *
-nf_tproxy_get_sock_v6(struct net *net, struct sk_buff *skb, int thoff, void *hp,
+nf_tproxy_get_sock_v6(struct net *net, struct sk_buff *skb, int thoff,
                      const u8 protocol,
                      const struct in6_addr *saddr, const struct in6_addr *daddr,
                      const __be16 sport, const __be16 dport,
@@ -80,15 +80,20 @@ nf_tproxy_get_sock_v6(struct net *net, struct sk_buff *skb, int thoff, void *hp,
                      const enum nf_tproxy_lookup_t lookup_type)
 {
        struct sock *sk;
-       struct tcphdr *tcph;
 
        switch (protocol) {
-       case IPPROTO_TCP:
+       case IPPROTO_TCP: {
+               struct tcphdr _hdr, *hp;
+
+               hp = skb_header_pointer(skb, thoff,
+                                       sizeof(struct tcphdr), &_hdr);
+               if (hp == NULL)
+                       return NULL;
+
                switch (lookup_type) {
                case NF_TPROXY_LOOKUP_LISTENER:
-                       tcph = hp;
                        sk = inet6_lookup_listener(net, &tcp_hashinfo, skb,
-                                                  thoff + __tcp_hdrlen(tcph),
+                                                  thoff + __tcp_hdrlen(hp),
                                                   saddr, sport,
                                                   daddr, ntohs(dport),
                                                   in->ifindex, 0);
@@ -110,6 +115,7 @@ nf_tproxy_get_sock_v6(struct net *net, struct sk_buff *skb, int thoff, void *hp,
                        BUG();
                }
                break;
+               }
        case IPPROTO_UDP:
                sk = udp6_lib_lookup(net, saddr, sport, daddr, dport,
                                     in->ifindex);
index ce6f0d15b5dd5d8a9531a8316a932d3d30a3491b..afc307c89d1a977a00693999ec0f54b50005b7bd 100644 (file)
@@ -1334,7 +1334,7 @@ void raw6_proc_exit(void)
 }
 #endif /* CONFIG_PROC_FS */
 
-/* Same as inet6_dgram_ops, sans udp_poll_mask.  */
+/* Same as inet6_dgram_ops, sans udp_poll.  */
 const struct proto_ops inet6_sockraw_ops = {
        .family            = PF_INET6,
        .owner             = THIS_MODULE,
@@ -1344,7 +1344,7 @@ const struct proto_ops inet6_sockraw_ops = {
        .socketpair        = sock_no_socketpair,        /* a do nothing */
        .accept            = sock_no_accept,            /* a do nothing */
        .getname           = inet6_getname,
-       .poll_mask         = datagram_poll_mask,        /* ok           */
+       .poll              = datagram_poll,             /* ok           */
        .ioctl             = inet6_ioctl,               /* must change  */
        .listen            = sock_no_listen,            /* ok           */
        .shutdown          = inet_shutdown,             /* ok           */
index 86a0e4333d42212d03f53e0d54fcf4e03a328607..7208c16302f61adc15636f6a332ff0c02325cfcf 100644 (file)
@@ -972,18 +972,15 @@ static void ip6_rt_init_dst(struct rt6_info *rt, struct fib6_info *ort)
        rt->dst.lastuse = jiffies;
 }
 
+/* Caller must already hold reference to @from */
 static void rt6_set_from(struct rt6_info *rt, struct fib6_info *from)
 {
        rt->rt6i_flags &= ~RTF_EXPIRES;
-       fib6_info_hold(from);
        rcu_assign_pointer(rt->from, from);
        dst_init_metrics(&rt->dst, from->fib6_metrics->metrics, true);
-       if (from->fib6_metrics != &dst_default_metrics) {
-               rt->dst._metrics |= DST_METRICS_REFCOUNTED;
-               refcount_inc(&from->fib6_metrics->refcnt);
-       }
 }
 
+/* Caller must already hold reference to @ort */
 static void ip6_rt_copy_init(struct rt6_info *rt, struct fib6_info *ort)
 {
        struct net_device *dev = fib6_info_nh_dev(ort);
@@ -1044,9 +1041,14 @@ static struct rt6_info *ip6_create_rt_rcu(struct fib6_info *rt)
        struct net_device *dev = rt->fib6_nh.nh_dev;
        struct rt6_info *nrt;
 
+       if (!fib6_info_hold_safe(rt))
+               return NULL;
+
        nrt = ip6_dst_alloc(dev_net(dev), dev, flags);
        if (nrt)
                ip6_rt_copy_init(nrt, rt);
+       else
+               fib6_info_release(rt);
 
        return nrt;
 }
@@ -1178,10 +1180,15 @@ static struct rt6_info *ip6_rt_cache_alloc(struct fib6_info *ort,
         *      Clone the route.
         */
 
+       if (!fib6_info_hold_safe(ort))
+               return NULL;
+
        dev = ip6_rt_get_dev_rcu(ort);
        rt = ip6_dst_alloc(dev_net(dev), dev, 0);
-       if (!rt)
+       if (!rt) {
+               fib6_info_release(ort);
                return NULL;
+       }
 
        ip6_rt_copy_init(rt, ort);
        rt->rt6i_flags |= RTF_CACHE;
@@ -1210,12 +1217,17 @@ static struct rt6_info *ip6_rt_pcpu_alloc(struct fib6_info *rt)
        struct net_device *dev;
        struct rt6_info *pcpu_rt;
 
+       if (!fib6_info_hold_safe(rt))
+               return NULL;
+
        rcu_read_lock();
        dev = ip6_rt_get_dev_rcu(rt);
        pcpu_rt = ip6_dst_alloc(dev_net(dev), dev, flags);
        rcu_read_unlock();
-       if (!pcpu_rt)
+       if (!pcpu_rt) {
+               fib6_info_release(rt);
                return NULL;
+       }
        ip6_rt_copy_init(pcpu_rt, rt);
        pcpu_rt->rt6i_flags |= RTF_PCPU;
        return pcpu_rt;
@@ -2486,7 +2498,7 @@ restart:
 
 out:
        if (ret)
-               dst_hold(&ret->dst);
+               ip6_hold_safe(net, &ret, true);
        else
                ret = ip6_create_rt_rcu(rt);
 
@@ -3303,7 +3315,8 @@ static int ip6_route_del(struct fib6_config *cfg,
                                continue;
                        if (cfg->fc_protocol && cfg->fc_protocol != rt->fib6_protocol)
                                continue;
-                       fib6_info_hold(rt);
+                       if (!fib6_info_hold_safe(rt))
+                               continue;
                        rcu_read_unlock();
 
                        /* if gateway was specified only delete the one hop */
@@ -3409,6 +3422,9 @@ static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_bu
 
        rcu_read_lock();
        from = rcu_dereference(rt->from);
+       /* This fib6_info_hold() is safe here because we hold reference to rt
+        * and rt already holds reference to fib6_info.
+        */
        fib6_info_hold(from);
        rcu_read_unlock();
 
@@ -3470,7 +3486,8 @@ static struct fib6_info *rt6_get_route_info(struct net *net,
                        continue;
                if (!ipv6_addr_equal(&rt->fib6_nh.nh_gw, gwaddr))
                        continue;
-               fib6_info_hold(rt);
+               if (!fib6_info_hold_safe(rt))
+                       continue;
                break;
        }
 out:
@@ -3530,8 +3547,8 @@ struct fib6_info *rt6_get_dflt_router(struct net *net,
                    ipv6_addr_equal(&rt->fib6_nh.nh_gw, addr))
                        break;
        }
-       if (rt)
-               fib6_info_hold(rt);
+       if (rt && !fib6_info_hold_safe(rt))
+               rt = NULL;
        rcu_read_unlock();
        return rt;
 }
@@ -3579,8 +3596,8 @@ restart:
                struct inet6_dev *idev = dev ? __in6_dev_get(dev) : NULL;
 
                if (rt->fib6_flags & (RTF_DEFAULT | RTF_ADDRCONF) &&
-                   (!idev || idev->cnf.accept_ra != 2)) {
-                       fib6_info_hold(rt);
+                   (!idev || idev->cnf.accept_ra != 2) &&
+                   fib6_info_hold_safe(rt)) {
                        rcu_read_unlock();
                        ip6_del_rt(net, rt);
                        goto restart;
@@ -3842,7 +3859,7 @@ static struct fib6_info *rt6_multipath_first_sibling(const struct fib6_info *rt)
                        lockdep_is_held(&rt->fib6_table->tb6_lock));
        while (iter) {
                if (iter->fib6_metric == rt->fib6_metric &&
-                   iter->fib6_nsiblings)
+                   rt6_qualify_for_ecmp(iter))
                        return iter;
                iter = rcu_dereference_protected(iter->fib6_next,
                                lockdep_is_held(&rt->fib6_table->tb6_lock));
@@ -4388,6 +4405,13 @@ static int ip6_route_multipath_add(struct fib6_config *cfg,
                        rt = NULL;
                        goto cleanup;
                }
+               if (!rt6_qualify_for_ecmp(rt)) {
+                       err = -EINVAL;
+                       NL_SET_ERR_MSG(extack,
+                                      "Device only routes can not be added for IPv6 using the multipath API.");
+                       fib6_info_release(rt);
+                       goto cleanup;
+               }
 
                rt->fib6_nh.nh_weight = rtnh->rtnh_hops + 1;
 
@@ -4439,7 +4463,6 @@ static int ip6_route_multipath_add(struct fib6_config *cfg,
                 */
                cfg->fc_nlinfo.nlh->nlmsg_flags &= ~(NLM_F_EXCL |
                                                     NLM_F_REPLACE);
-               cfg->fc_nlinfo.nlh->nlmsg_flags |= NLM_F_APPEND;
                nhn++;
        }
 
index 33fb35cbfac132b1a85cd2c9ce62b4344cbe8afe..558fe8cc6d43858ca828cbd8dc8ea65e63bc6602 100644 (file)
@@ -373,7 +373,7 @@ static int seg6_hmac_init_algo(void)
                        return -ENOMEM;
 
                for_each_possible_cpu(cpu) {
-                       tfm = crypto_alloc_shash(algo->name, 0, GFP_KERNEL);
+                       tfm = crypto_alloc_shash(algo->name, 0, 0);
                        if (IS_ERR(tfm))
                                return PTR_ERR(tfm);
                        p_tfm = per_cpu_ptr(algo->tfms, cpu);
index 19ccf0dc996ca7da1f47bd887b18e4755257e462..a8854dd3e9c5ef64a7a480bb6ff891fac0e6d1ea 100644 (file)
@@ -101,7 +101,7 @@ static __be32 seg6_make_flowlabel(struct net *net, struct sk_buff *skb,
 
        if (do_flowlabel > 0) {
                hash = skb_get_hash(skb);
-               rol32(hash, 16);
+               hash = rol32(hash, 16);
                flowlabel = (__force __be32)hash & IPV6_FLOWLABEL_MASK;
        } else if (!do_flowlabel && skb->protocol == htons(ETH_P_IPV6)) {
                flowlabel = ip6_flowlabel(inner_hdr);
index 7efa9fd7e1094dc43ca464e5c6f06ea36031d476..03e6b7a2bc530d1a19c565f00a03575b898b6f88 100644 (file)
@@ -938,7 +938,8 @@ static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb)
                                           &tcp_hashinfo, NULL, 0,
                                           &ipv6h->saddr,
                                           th->source, &ipv6h->daddr,
-                                          ntohs(th->source), tcp_v6_iif(skb),
+                                          ntohs(th->source),
+                                          tcp_v6_iif_l3_slave(skb),
                                           tcp_v6_sdif(skb));
                if (!sk1)
                        goto out;
@@ -1609,7 +1610,8 @@ do_time_wait:
                                            skb, __tcp_hdrlen(th),
                                            &ipv6_hdr(skb)->saddr, th->source,
                                            &ipv6_hdr(skb)->daddr,
-                                           ntohs(th->dest), tcp_v6_iif(skb),
+                                           ntohs(th->dest),
+                                           tcp_v6_iif_l3_slave(skb),
                                            sdif);
                if (sk2) {
                        struct inet_timewait_sock *tw = inet_twsk(sk);
index 68e86257a549988b5f87098b24c8e3d0bd1dc1ce..893a022f962081416fa1b9e5f96416a8c2e92e5c 100644 (file)
@@ -1488,11 +1488,14 @@ static inline __poll_t iucv_accept_poll(struct sock *parent)
        return 0;
 }
 
-static __poll_t iucv_sock_poll_mask(struct socket *sock, __poll_t events)
+__poll_t iucv_sock_poll(struct file *file, struct socket *sock,
+                           poll_table *wait)
 {
        struct sock *sk = sock->sk;
        __poll_t mask = 0;
 
+       sock_poll_wait(file, sk_sleep(sk), wait);
+
        if (sk->sk_state == IUCV_LISTEN)
                return iucv_accept_poll(sk);
 
@@ -2385,7 +2388,7 @@ static const struct proto_ops iucv_sock_ops = {
        .getname        = iucv_sock_getname,
        .sendmsg        = iucv_sock_sendmsg,
        .recvmsg        = iucv_sock_recvmsg,
-       .poll_mask      = iucv_sock_poll_mask,
+       .poll           = iucv_sock_poll,
        .ioctl          = sock_no_ioctl,
        .mmap           = sock_no_mmap,
        .socketpair     = sock_no_socketpair,
index 84b7d5c6fec81a7c62ed4744d48726dee8c7e426..d3601d421571b9825ff0a6cea9b75cb52fd51dea 100644 (file)
@@ -1336,9 +1336,9 @@ static void init_kcm_sock(struct kcm_sock *kcm, struct kcm_mux *mux)
        struct list_head *head;
        int index = 0;
 
-       /* For SOCK_SEQPACKET sock type, datagram_poll_mask checks the sk_state,
-        * so  we set sk_state, otherwise epoll_wait always returns right away
-        * with EPOLLHUP
+       /* For SOCK_SEQPACKET sock type, datagram_poll checks the sk_state, so
+        * we set sk_state, otherwise epoll_wait always returns right away with
+        * EPOLLHUP
         */
        kcm->sk.sk_state = TCP_ESTABLISHED;
 
@@ -1903,7 +1903,7 @@ static const struct proto_ops kcm_dgram_ops = {
        .socketpair =   sock_no_socketpair,
        .accept =       sock_no_accept,
        .getname =      sock_no_getname,
-       .poll_mask =    datagram_poll_mask,
+       .poll =         datagram_poll,
        .ioctl =        kcm_ioctl,
        .listen =       sock_no_listen,
        .shutdown =     sock_no_shutdown,
@@ -1924,7 +1924,7 @@ static const struct proto_ops kcm_seqpacket_ops = {
        .socketpair =   sock_no_socketpair,
        .accept =       sock_no_accept,
        .getname =      sock_no_getname,
-       .poll_mask =    datagram_poll_mask,
+       .poll =         datagram_poll,
        .ioctl =        kcm_ioctl,
        .listen =       sock_no_listen,
        .shutdown =     sock_no_shutdown,
index 8bdc1cbe490a4ae819db32851ea6a8184b0727b0..5e1d2946ffbf2a2cf4e65db44658c7f374e72e25 100644 (file)
@@ -3751,7 +3751,7 @@ static const struct proto_ops pfkey_ops = {
 
        /* Now the operations that really occur. */
        .release        =       pfkey_release,
-       .poll_mask      =       datagram_poll_mask,
+       .poll           =       datagram_poll,
        .sendmsg        =       pfkey_sendmsg,
        .recvmsg        =       pfkey_recvmsg,
 };
index 181073bf69251392c3a7fd23197a278f37dd67f0..a9c05b2bc1b0bc3471bbf62dc3b7c11e971a7f08 100644 (file)
@@ -613,7 +613,7 @@ static const struct proto_ops l2tp_ip_ops = {
        .socketpair        = sock_no_socketpair,
        .accept            = sock_no_accept,
        .getname           = l2tp_ip_getname,
-       .poll_mask         = datagram_poll_mask,
+       .poll              = datagram_poll,
        .ioctl             = inet_ioctl,
        .listen            = sock_no_listen,
        .shutdown          = inet_shutdown,
index 336e4c00abbcdaef7385c90e24d2088131efe095..957369192ca181d6da21c9dda03d0e8a9726643e 100644 (file)
@@ -754,7 +754,7 @@ static const struct proto_ops l2tp_ip6_ops = {
        .socketpair        = sock_no_socketpair,
        .accept            = sock_no_accept,
        .getname           = l2tp_ip6_getname,
-       .poll_mask         = datagram_poll_mask,
+       .poll              = datagram_poll,
        .ioctl             = inet6_ioctl,
        .listen            = sock_no_listen,
        .shutdown          = inet_shutdown,
index 55188382845c310c98eb86cdfc3b78e1d03e8e0f..cf6cca260e7b5b42ef72a3fe99c6f0c5ad08bb8a 100644 (file)
@@ -1201,13 +1201,18 @@ static int pppol2tp_tunnel_ioctl(struct l2tp_tunnel *tunnel,
                                l2tp_session_get(sock_net(sk), tunnel,
                                                 stats.session_id);
 
-                       if (session && session->pwtype == L2TP_PWTYPE_PPP) {
-                               err = pppol2tp_session_ioctl(session, cmd,
-                                                            arg);
+                       if (!session) {
+                               err = -EBADR;
+                               break;
+                       }
+                       if (session->pwtype != L2TP_PWTYPE_PPP) {
                                l2tp_session_dec_refcount(session);
-                       } else {
                                err = -EBADR;
+                               break;
                        }
+
+                       err = pppol2tp_session_ioctl(session, cmd, arg);
+                       l2tp_session_dec_refcount(session);
                        break;
                }
 #ifdef CONFIG_XFRM
@@ -1818,7 +1823,7 @@ static const struct proto_ops pppol2tp_ops = {
        .socketpair     = sock_no_socketpair,
        .accept         = sock_no_accept,
        .getname        = pppol2tp_getname,
-       .poll_mask      = datagram_poll_mask,
+       .poll           = datagram_poll,
        .listen         = sock_no_listen,
        .shutdown       = sock_no_shutdown,
        .setsockopt     = pppol2tp_setsockopt,
index 804de84901868a4cffd2ec5d6c9e979af937cb59..1beeea9549fa6ec1f7b0e5f9af8ff3250a316f59 100644 (file)
@@ -1192,7 +1192,7 @@ static const struct proto_ops llc_ui_ops = {
        .socketpair  = sock_no_socketpair,
        .accept      = llc_ui_accept,
        .getname     = llc_ui_getname,
-       .poll_mask   = datagram_poll_mask,
+       .poll        = datagram_poll,
        .ioctl       = llc_ui_ioctl,
        .listen      = llc_ui_listen,
        .shutdown    = llc_ui_shutdown,
index 89041260784c0871195556074f7138eee01edb33..260b3dc1b4a2ab4545982b0b36478d6a6cf0b71f 100644 (file)
@@ -73,8 +73,8 @@ struct llc_sap *llc_sap_find(unsigned char sap_value)
 
        rcu_read_lock_bh();
        sap = __llc_sap_find(sap_value);
-       if (sap)
-               llc_sap_hold(sap);
+       if (!sap || !llc_sap_hold_safe(sap))
+               sap = NULL;
        rcu_read_unlock_bh();
        return sap;
 }
index 0a38cc1cbebcee97ed7e8779ab487e2e0943e84c..932985ca4e66829ffa559fac1a10243e93043101 100644 (file)
@@ -2254,11 +2254,8 @@ static void ieee80211_deliver_skb_to_local_stack(struct sk_buff *skb,
                     sdata->control_port_over_nl80211)) {
                struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
                bool noencrypt = status->flag & RX_FLAG_DECRYPTED;
-               struct ethhdr *ehdr = eth_hdr(skb);
 
-               cfg80211_rx_control_port(dev, skb->data, skb->len,
-                                        ehdr->h_source,
-                                        be16_to_cpu(skb->protocol), noencrypt);
+               cfg80211_rx_control_port(dev, skb, noencrypt);
                dev_kfree_skb(skb);
        } else {
                /* deliver to local stack */
index 44b5dfe8727d936d39338006bc89b125c848d12b..fa1f1e63a2640fd405e42e5aeae9718b4ef12d2a 100644 (file)
@@ -4845,7 +4845,9 @@ int ieee80211_tx_control_port(struct wiphy *wiphy, struct net_device *dev,
        skb_reset_network_header(skb);
        skb_reset_mac_header(skb);
 
+       local_bh_disable();
        __ieee80211_subif_start_xmit(skb, skb->dev, flags);
+       local_bh_enable();
 
        return 0;
 }
index 5e2e511c4a6f69cf0b613c1b3facd0665d672cfd..d02fbfec37835bce6a27ecfdc146b95ba0ca077f 100644 (file)
@@ -2111,7 +2111,8 @@ int ieee80211_reconfig(struct ieee80211_local *local)
                if (!sta->uploaded)
                        continue;
 
-               if (sta->sdata->vif.type != NL80211_IFTYPE_AP)
+               if (sta->sdata->vif.type != NL80211_IFTYPE_AP &&
+                   sta->sdata->vif.type != NL80211_IFTYPE_AP_VLAN)
                        continue;
 
                for (state = IEEE80211_STA_NOTEXIST;
index e7b05de1e6d1e136eb509293c4fde81468e12642..25e483e8278bd0404bf044c1a1748fdd1db77580 100644 (file)
@@ -73,8 +73,8 @@ static int ncsi_aen_handler_lsc(struct ncsi_dev_priv *ndp,
        ncm->data[2] = data;
        ncm->data[4] = ntohl(lsc->oem_status);
 
-       netdev_info(ndp->ndev.dev, "NCSI: LSC AEN - channel %u state %s\n",
-                   nc->id, data & 0x1 ? "up" : "down");
+       netdev_dbg(ndp->ndev.dev, "NCSI: LSC AEN - channel %u state %s\n",
+                  nc->id, data & 0x1 ? "up" : "down");
 
        chained = !list_empty(&nc->link);
        state = nc->state;
@@ -148,9 +148,9 @@ static int ncsi_aen_handler_hncdsc(struct ncsi_dev_priv *ndp,
        hncdsc = (struct ncsi_aen_hncdsc_pkt *)h;
        ncm->data[3] = ntohl(hncdsc->status);
        spin_unlock_irqrestore(&nc->lock, flags);
-       netdev_printk(KERN_DEBUG, ndp->ndev.dev,
-                     "NCSI: host driver %srunning on channel %u\n",
-                     ncm->data[3] & 0x1 ? "" : "not ", nc->id);
+       netdev_dbg(ndp->ndev.dev,
+                  "NCSI: host driver %srunning on channel %u\n",
+                  ncm->data[3] & 0x1 ? "" : "not ", nc->id);
 
        return 0;
 }
index 5561e221b71f10b223b381c2ed4b0752bedbc225..091284760d21fa02dc0f9997a2c68ce7f1f618e6 100644 (file)
@@ -788,8 +788,8 @@ static void ncsi_configure_channel(struct ncsi_dev_priv *ndp)
                }
                break;
        case ncsi_dev_state_config_done:
-               netdev_printk(KERN_DEBUG, ndp->ndev.dev,
-                             "NCSI: channel %u config done\n", nc->id);
+               netdev_dbg(ndp->ndev.dev, "NCSI: channel %u config done\n",
+                          nc->id);
                spin_lock_irqsave(&nc->lock, flags);
                if (nc->reconfigure_needed) {
                        /* This channel's configuration has been updated
@@ -804,8 +804,7 @@ static void ncsi_configure_channel(struct ncsi_dev_priv *ndp)
                        list_add_tail_rcu(&nc->link, &ndp->channel_queue);
                        spin_unlock_irqrestore(&ndp->lock, flags);
 
-                       netdev_printk(KERN_DEBUG, dev,
-                                     "Dirty NCSI channel state reset\n");
+                       netdev_dbg(dev, "Dirty NCSI channel state reset\n");
                        ncsi_process_next_channel(ndp);
                        break;
                }
@@ -816,9 +815,9 @@ static void ncsi_configure_channel(struct ncsi_dev_priv *ndp)
                } else {
                        hot_nc = NULL;
                        nc->state = NCSI_CHANNEL_INACTIVE;
-                       netdev_warn(ndp->ndev.dev,
-                                   "NCSI: channel %u link down after config\n",
-                                   nc->id);
+                       netdev_dbg(ndp->ndev.dev,
+                                  "NCSI: channel %u link down after config\n",
+                                  nc->id);
                }
                spin_unlock_irqrestore(&nc->lock, flags);
 
@@ -908,9 +907,9 @@ static int ncsi_choose_active_channel(struct ncsi_dev_priv *ndp)
        }
 
        ncm = &found->modes[NCSI_MODE_LINK];
-       netdev_printk(KERN_DEBUG, ndp->ndev.dev,
-                     "NCSI: Channel %u added to queue (link %s)\n",
-                     found->id, ncm->data[2] & 0x1 ? "up" : "down");
+       netdev_dbg(ndp->ndev.dev,
+                  "NCSI: Channel %u added to queue (link %s)\n",
+                  found->id, ncm->data[2] & 0x1 ? "up" : "down");
 
 out:
        spin_lock_irqsave(&ndp->lock, flags);
@@ -1199,14 +1198,14 @@ int ncsi_process_next_channel(struct ncsi_dev_priv *ndp)
        switch (old_state) {
        case NCSI_CHANNEL_INACTIVE:
                ndp->ndev.state = ncsi_dev_state_config;
-               netdev_info(ndp->ndev.dev, "NCSI: configuring channel %u\n",
-                           nc->id);
+               netdev_dbg(ndp->ndev.dev, "NCSI: configuring channel %u\n",
+                          nc->id);
                ncsi_configure_channel(ndp);
                break;
        case NCSI_CHANNEL_ACTIVE:
                ndp->ndev.state = ncsi_dev_state_suspend;
-               netdev_info(ndp->ndev.dev, "NCSI: suspending channel %u\n",
-                           nc->id);
+               netdev_dbg(ndp->ndev.dev, "NCSI: suspending channel %u\n",
+                          nc->id);
                ncsi_suspend_channel(ndp);
                break;
        default:
@@ -1226,8 +1225,6 @@ out:
                return ncsi_choose_active_channel(ndp);
        }
 
-       netdev_printk(KERN_DEBUG, ndp->ndev.dev,
-                     "NCSI: No more channels to process\n");
        ncsi_report_link(ndp, false);
        return -ENODEV;
 }
@@ -1318,9 +1315,9 @@ static int ncsi_kick_channels(struct ncsi_dev_priv *ndp)
                                if ((ndp->ndev.state & 0xff00) ==
                                                ncsi_dev_state_config ||
                                                !list_empty(&nc->link)) {
-                                       netdev_printk(KERN_DEBUG, nd->dev,
-                                                     "NCSI: channel %p marked dirty\n",
-                                                     nc);
+                                       netdev_dbg(nd->dev,
+                                                  "NCSI: channel %p marked dirty\n",
+                                                  nc);
                                        nc->reconfigure_needed = true;
                                }
                                spin_unlock_irqrestore(&nc->lock, flags);
@@ -1338,8 +1335,7 @@ static int ncsi_kick_channels(struct ncsi_dev_priv *ndp)
                        list_add_tail_rcu(&nc->link, &ndp->channel_queue);
                        spin_unlock_irqrestore(&ndp->lock, flags);
 
-                       netdev_printk(KERN_DEBUG, nd->dev,
-                                     "NCSI: kicked channel %p\n", nc);
+                       netdev_dbg(nd->dev, "NCSI: kicked channel %p\n", nc);
                        n++;
                }
        }
@@ -1370,8 +1366,8 @@ int ncsi_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
        list_for_each_entry_rcu(vlan, &ndp->vlan_vids, list) {
                n_vids++;
                if (vlan->vid == vid) {
-                       netdev_printk(KERN_DEBUG, dev,
-                                     "NCSI: vid %u already registered\n", vid);
+                       netdev_dbg(dev, "NCSI: vid %u already registered\n",
+                                  vid);
                        return 0;
                }
        }
@@ -1390,7 +1386,7 @@ int ncsi_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
        vlan->vid = vid;
        list_add_rcu(&vlan->list, &ndp->vlan_vids);
 
-       netdev_printk(KERN_DEBUG, dev, "NCSI: Added new vid %u\n", vid);
+       netdev_dbg(dev, "NCSI: Added new vid %u\n", vid);
 
        found = ncsi_kick_channels(ndp) != 0;
 
@@ -1419,8 +1415,7 @@ int ncsi_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
        /* Remove the VLAN id from our internal list */
        list_for_each_entry_safe(vlan, tmp, &ndp->vlan_vids, list)
                if (vlan->vid == vid) {
-                       netdev_printk(KERN_DEBUG, dev,
-                                     "NCSI: vid %u found, removing\n", vid);
+                       netdev_dbg(dev, "NCSI: vid %u found, removing\n", vid);
                        list_del_rcu(&vlan->list);
                        found = true;
                        kfree(vlan);
@@ -1547,7 +1542,7 @@ void ncsi_stop_dev(struct ncsi_dev *nd)
                }
        }
 
-       netdev_printk(KERN_DEBUG, ndp->ndev.dev, "NCSI: Stopping device\n");
+       netdev_dbg(ndp->ndev.dev, "NCSI: Stopping device\n");
        ncsi_report_link(ndp, true);
 }
 EXPORT_SYMBOL_GPL(ncsi_stop_dev);
index dbd7d1fad277ebe3fb09f7ec68f7178433a9c438..f0a1c536ef15a0d35a3078bf85b5f4bee704f894 100644 (file)
@@ -460,6 +460,13 @@ config NF_TABLES
 
 if NF_TABLES
 
+config NF_TABLES_SET
+       tristate "Netfilter nf_tables set infrastructure"
+       help
+         This option enables the nf_tables set infrastructure that allows to
+         look up for elements in a set and to build one-way mappings between
+         matchings and actions.
+
 config NF_TABLES_INET
        depends on IPV6
        select NF_TABLES_IPV4
@@ -493,24 +500,6 @@ config NFT_FLOW_OFFLOAD
          This option adds the "flow_offload" expression that you can use to
          choose what flows are placed into the hardware.
 
-config NFT_SET_RBTREE
-       tristate "Netfilter nf_tables rbtree set module"
-       help
-         This option adds the "rbtree" set type (Red Black tree) that is used
-         to build interval-based sets.
-
-config NFT_SET_HASH
-       tristate "Netfilter nf_tables hash set module"
-       help
-         This option adds the "hash" set type that is used to build one-way
-         mappings between matchings and actions.
-
-config NFT_SET_BITMAP
-       tristate "Netfilter nf_tables bitmap set module"
-       help
-         This option adds the "bitmap" set type that is used to build sets
-         whose keys are smaller or equal to 16 bits.
-
 config NFT_COUNTER
        tristate "Netfilter nf_tables counter module"
        help
index 44449389e527b082b9ea171d5c1759b7c7c7f227..8a76dced974d1c10eca35dca78cf2ab284cb2490 100644 (file)
@@ -78,7 +78,11 @@ nf_tables-objs := nf_tables_core.o nf_tables_api.o nft_chain_filter.o \
                  nft_bitwise.o nft_byteorder.o nft_payload.o nft_lookup.o \
                  nft_dynset.o nft_meta.o nft_rt.o nft_exthdr.o
 
+nf_tables_set-objs := nf_tables_set_core.o \
+                     nft_set_hash.o nft_set_bitmap.o nft_set_rbtree.o
+
 obj-$(CONFIG_NF_TABLES)                += nf_tables.o
+obj-$(CONFIG_NF_TABLES_SET)    += nf_tables_set.o
 obj-$(CONFIG_NFT_COMPAT)       += nft_compat.o
 obj-$(CONFIG_NFT_CONNLIMIT)    += nft_connlimit.o
 obj-$(CONFIG_NFT_NUMGEN)       += nft_numgen.o
@@ -91,9 +95,6 @@ obj-$(CONFIG_NFT_QUEUE)               += nft_queue.o
 obj-$(CONFIG_NFT_QUOTA)                += nft_quota.o
 obj-$(CONFIG_NFT_REJECT)       += nft_reject.o
 obj-$(CONFIG_NFT_REJECT_INET)  += nft_reject_inet.o
-obj-$(CONFIG_NFT_SET_RBTREE)   += nft_set_rbtree.o
-obj-$(CONFIG_NFT_SET_HASH)     += nft_set_hash.o
-obj-$(CONFIG_NFT_SET_BITMAP)   += nft_set_bitmap.o
 obj-$(CONFIG_NFT_COUNTER)      += nft_counter.o
 obj-$(CONFIG_NFT_LOG)          += nft_log.o
 obj-$(CONFIG_NFT_MASQ)         += nft_masq.o
index d8383609fe2825b707cfb8ebc54381761ccc1108..510039862aa93c99904d2dbd3a7969327d0d896a 100644 (file)
@@ -47,6 +47,8 @@ struct nf_conncount_tuple {
        struct hlist_node               node;
        struct nf_conntrack_tuple       tuple;
        struct nf_conntrack_zone        zone;
+       int                             cpu;
+       u32                             jiffies32;
 };
 
 struct nf_conncount_rb {
@@ -91,11 +93,42 @@ bool nf_conncount_add(struct hlist_head *head,
                return false;
        conn->tuple = *tuple;
        conn->zone = *zone;
+       conn->cpu = raw_smp_processor_id();
+       conn->jiffies32 = (u32)jiffies;
        hlist_add_head(&conn->node, head);
        return true;
 }
 EXPORT_SYMBOL_GPL(nf_conncount_add);
 
+static const struct nf_conntrack_tuple_hash *
+find_or_evict(struct net *net, struct nf_conncount_tuple *conn)
+{
+       const struct nf_conntrack_tuple_hash *found;
+       unsigned long a, b;
+       int cpu = raw_smp_processor_id();
+       __s32 age;
+
+       found = nf_conntrack_find_get(net, &conn->zone, &conn->tuple);
+       if (found)
+               return found;
+       b = conn->jiffies32;
+       a = (u32)jiffies;
+
+       /* conn might have been added just before by another cpu and
+        * might still be unconfirmed.  In this case, nf_conntrack_find()
+        * returns no result.  Thus only evict if this cpu added the
+        * stale entry or if the entry is older than two jiffies.
+        */
+       age = a - b;
+       if (conn->cpu == cpu || age >= 2) {
+               hlist_del(&conn->node);
+               kmem_cache_free(conncount_conn_cachep, conn);
+               return ERR_PTR(-ENOENT);
+       }
+
+       return ERR_PTR(-EAGAIN);
+}
+
 unsigned int nf_conncount_lookup(struct net *net, struct hlist_head *head,
                                 const struct nf_conntrack_tuple *tuple,
                                 const struct nf_conntrack_zone *zone,
@@ -103,18 +136,27 @@ unsigned int nf_conncount_lookup(struct net *net, struct hlist_head *head,
 {
        const struct nf_conntrack_tuple_hash *found;
        struct nf_conncount_tuple *conn;
-       struct hlist_node *n;
        struct nf_conn *found_ct;
+       struct hlist_node *n;
        unsigned int length = 0;
 
        *addit = tuple ? true : false;
 
        /* check the saved connections */
        hlist_for_each_entry_safe(conn, n, head, node) {
-               found = nf_conntrack_find_get(net, &conn->zone, &conn->tuple);
-               if (found == NULL) {
-                       hlist_del(&conn->node);
-                       kmem_cache_free(conncount_conn_cachep, conn);
+               found = find_or_evict(net, conn);
+               if (IS_ERR(found)) {
+                       /* Not found, but might be about to be confirmed */
+                       if (PTR_ERR(found) == -EAGAIN) {
+                               length++;
+                               if (!tuple)
+                                       continue;
+
+                               if (nf_ct_tuple_equal(&conn->tuple, tuple) &&
+                                   nf_ct_zone_id(&conn->zone, conn->zone.dir) ==
+                                   nf_ct_zone_id(zone, zone->dir))
+                                       *addit = false;
+                       }
                        continue;
                }
 
index 3465da2a98bd4ff68fc8e52935aad047c69855e8..3d52804250274602c521f3cfe6c0c3b8fa9e78e9 100644 (file)
@@ -2043,7 +2043,7 @@ int nf_conntrack_set_hashsize(const char *val, const struct kernel_param *kp)
                return -EOPNOTSUPP;
 
        /* On boot, we can set this without any fancy locking. */
-       if (!nf_conntrack_htable_size)
+       if (!nf_conntrack_hash)
                return param_set_uint(val, kp);
 
        rc = kstrtouint(val, 0, &hashsize);
index 551a1eddf0fab75eccf803b9711e069e61e60d5d..a75b11c393128d79107fc447c5109b7d0a786ea5 100644 (file)
@@ -465,6 +465,11 @@ void nf_conntrack_helper_unregister(struct nf_conntrack_helper *me)
 
        nf_ct_expect_iterate_destroy(expect_iter_me, NULL);
        nf_ct_iterate_destroy(unhelp, me);
+
+       /* Maybe someone has gotten the helper already when unhelp above.
+        * So need to wait it.
+        */
+       synchronize_rcu();
 }
 EXPORT_SYMBOL_GPL(nf_conntrack_helper_unregister);
 
index abe647d5b8c63256da8895388363ad9dde11edc0..9ce6336d1e559459235f755be7db4a7adb9ebfc1 100644 (file)
@@ -243,14 +243,14 @@ dccp_state_table[CT_DCCP_ROLE_MAX + 1][DCCP_PKT_SYNCACK + 1][CT_DCCP_MAX + 1] =
                 * We currently ignore Sync packets
                 *
                 *      sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */
-                       sIG, sIG, sIG, sIG, sIG, sIG, sIG, sIG,
+                       sIV, sIG, sIG, sIG, sIG, sIG, sIG, sIG,
                },
                [DCCP_PKT_SYNCACK] = {
                /*
                 * We currently ignore SyncAck packets
                 *
                 *      sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */
-                       sIG, sIG, sIG, sIG, sIG, sIG, sIG, sIG,
+                       sIV, sIG, sIG, sIG, sIG, sIG, sIG, sIG,
                },
        },
        [CT_DCCP_ROLE_SERVER] = {
@@ -371,14 +371,14 @@ dccp_state_table[CT_DCCP_ROLE_MAX + 1][DCCP_PKT_SYNCACK + 1][CT_DCCP_MAX + 1] =
                 * We currently ignore Sync packets
                 *
                 *      sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */
-                       sIG, sIG, sIG, sIG, sIG, sIG, sIG, sIG,
+                       sIV, sIG, sIG, sIG, sIG, sIG, sIG, sIG,
                },
                [DCCP_PKT_SYNCACK] = {
                /*
                 * We currently ignore SyncAck packets
                 *
                 *      sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */
-                       sIG, sIG, sIG, sIG, sIG, sIG, sIG, sIG,
+                       sIV, sIG, sIG, sIG, sIG, sIG, sIG, sIG,
                },
        },
 };
index 4264570475788be388e603c1bc70330c812d0eb3..a61d6df6e5f64f5b2086d14f35c88a0491f77ce6 100644 (file)
@@ -424,6 +424,10 @@ static int nf_log_proc_dostring(struct ctl_table *table, int write,
        if (write) {
                struct ctl_table tmp = *table;
 
+               /* proc_dostring() can append to existing strings, so we need to
+                * initialize it as an empty string.
+                */
+               buf[0] = '\0';
                tmp.data = buf;
                r = proc_dostring(&tmp, write, buffer, lenp, ppos);
                if (r)
@@ -442,14 +446,17 @@ static int nf_log_proc_dostring(struct ctl_table *table, int write,
                rcu_assign_pointer(net->nf.nf_loggers[tindex], logger);
                mutex_unlock(&nf_log_mutex);
        } else {
+               struct ctl_table tmp = *table;
+
+               tmp.data = buf;
                mutex_lock(&nf_log_mutex);
                logger = nft_log_dereference(net->nf.nf_loggers[tindex]);
                if (!logger)
-                       table->data = "NONE";
+                       strlcpy(buf, "NONE", sizeof(buf));
                else
-                       table->data = logger->name;
-               r = proc_dostring(table, write, buffer, lenp, ppos);
+                       strlcpy(buf, logger->name, sizeof(buf));
                mutex_unlock(&nf_log_mutex);
+               r = proc_dostring(&tmp, write, buffer, lenp, ppos);
        }
 
        return r;
index 896d4a36081d4bb527b10c5db27df1a4dab32df8..f5745e4c6513e7a6bc8d1814e6efb3f497f76870 100644 (file)
@@ -75,6 +75,7 @@ static void nft_ctx_init(struct nft_ctx *ctx,
 {
        ctx->net        = net;
        ctx->family     = family;
+       ctx->level      = 0;
        ctx->table      = table;
        ctx->chain      = chain;
        ctx->nla        = nla;
@@ -1597,7 +1598,6 @@ static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy,
        struct nft_base_chain *basechain;
        struct nft_stats *stats = NULL;
        struct nft_chain_hook hook;
-       const struct nlattr *name;
        struct nf_hook_ops *ops;
        struct nft_trans *trans;
        int err;
@@ -1645,12 +1645,11 @@ static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy,
                        return PTR_ERR(stats);
        }
 
+       err = -ENOMEM;
        trans = nft_trans_alloc(ctx, NFT_MSG_NEWCHAIN,
                                sizeof(struct nft_trans_chain));
-       if (trans == NULL) {
-               free_percpu(stats);
-               return -ENOMEM;
-       }
+       if (trans == NULL)
+               goto err;
 
        nft_trans_chain_stats(trans) = stats;
        nft_trans_chain_update(trans) = true;
@@ -1660,19 +1659,37 @@ static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy,
        else
                nft_trans_chain_policy(trans) = -1;
 
-       name = nla[NFTA_CHAIN_NAME];
-       if (nla[NFTA_CHAIN_HANDLE] && name) {
-               nft_trans_chain_name(trans) =
-                       nla_strdup(name, GFP_KERNEL);
-               if (!nft_trans_chain_name(trans)) {
-                       kfree(trans);
-                       free_percpu(stats);
-                       return -ENOMEM;
+       if (nla[NFTA_CHAIN_HANDLE] &&
+           nla[NFTA_CHAIN_NAME]) {
+               struct nft_trans *tmp;
+               char *name;
+
+               err = -ENOMEM;
+               name = nla_strdup(nla[NFTA_CHAIN_NAME], GFP_KERNEL);
+               if (!name)
+                       goto err;
+
+               err = -EEXIST;
+               list_for_each_entry(tmp, &ctx->net->nft.commit_list, list) {
+                       if (tmp->msg_type == NFT_MSG_NEWCHAIN &&
+                           tmp->ctx.table == table &&
+                           nft_trans_chain_update(tmp) &&
+                           nft_trans_chain_name(tmp) &&
+                           strcmp(name, nft_trans_chain_name(tmp)) == 0) {
+                               kfree(name);
+                               goto err;
+                       }
                }
+
+               nft_trans_chain_name(trans) = name;
        }
        list_add_tail(&trans->list, &ctx->net->nft.commit_list);
 
        return 0;
+err:
+       free_percpu(stats);
+       kfree(trans);
+       return err;
 }
 
 static int nf_tables_newchain(struct net *net, struct sock *nlsk,
@@ -2254,6 +2271,39 @@ done:
        return skb->len;
 }
 
+static int nf_tables_dump_rules_start(struct netlink_callback *cb)
+{
+       const struct nlattr * const *nla = cb->data;
+       struct nft_rule_dump_ctx *ctx = NULL;
+
+       if (nla[NFTA_RULE_TABLE] || nla[NFTA_RULE_CHAIN]) {
+               ctx = kzalloc(sizeof(*ctx), GFP_ATOMIC);
+               if (!ctx)
+                       return -ENOMEM;
+
+               if (nla[NFTA_RULE_TABLE]) {
+                       ctx->table = nla_strdup(nla[NFTA_RULE_TABLE],
+                                                       GFP_ATOMIC);
+                       if (!ctx->table) {
+                               kfree(ctx);
+                               return -ENOMEM;
+                       }
+               }
+               if (nla[NFTA_RULE_CHAIN]) {
+                       ctx->chain = nla_strdup(nla[NFTA_RULE_CHAIN],
+                                               GFP_ATOMIC);
+                       if (!ctx->chain) {
+                               kfree(ctx->table);
+                               kfree(ctx);
+                               return -ENOMEM;
+                       }
+               }
+       }
+
+       cb->data = ctx;
+       return 0;
+}
+
 static int nf_tables_dump_rules_done(struct netlink_callback *cb)
 {
        struct nft_rule_dump_ctx *ctx = cb->data;
@@ -2283,38 +2333,13 @@ static int nf_tables_getrule(struct net *net, struct sock *nlsk,
 
        if (nlh->nlmsg_flags & NLM_F_DUMP) {
                struct netlink_dump_control c = {
+                       .start= nf_tables_dump_rules_start,
                        .dump = nf_tables_dump_rules,
                        .done = nf_tables_dump_rules_done,
                        .module = THIS_MODULE,
+                       .data = (void *)nla,
                };
 
-               if (nla[NFTA_RULE_TABLE] || nla[NFTA_RULE_CHAIN]) {
-                       struct nft_rule_dump_ctx *ctx;
-
-                       ctx = kzalloc(sizeof(*ctx), GFP_ATOMIC);
-                       if (!ctx)
-                               return -ENOMEM;
-
-                       if (nla[NFTA_RULE_TABLE]) {
-                               ctx->table = nla_strdup(nla[NFTA_RULE_TABLE],
-                                                       GFP_ATOMIC);
-                               if (!ctx->table) {
-                                       kfree(ctx);
-                                       return -ENOMEM;
-                               }
-                       }
-                       if (nla[NFTA_RULE_CHAIN]) {
-                               ctx->chain = nla_strdup(nla[NFTA_RULE_CHAIN],
-                                                       GFP_ATOMIC);
-                               if (!ctx->chain) {
-                                       kfree(ctx->table);
-                                       kfree(ctx);
-                                       return -ENOMEM;
-                               }
-                       }
-                       c.data = ctx;
-               }
-
                return nft_netlink_dump_start_rcu(nlsk, skb, nlh, &c);
        }
 
@@ -2384,6 +2409,9 @@ int nft_chain_validate(const struct nft_ctx *ctx, const struct nft_chain *chain)
        struct nft_rule *rule;
        int err;
 
+       if (ctx->level == NFT_JUMP_STACK_SIZE)
+               return -EMLINK;
+
        list_for_each_entry(rule, &chain->rules, list) {
                if (!nft_is_active_next(ctx->net, rule))
                        continue;
@@ -3161,6 +3189,18 @@ done:
        return skb->len;
 }
 
+static int nf_tables_dump_sets_start(struct netlink_callback *cb)
+{
+       struct nft_ctx *ctx_dump = NULL;
+
+       ctx_dump = kmemdup(cb->data, sizeof(*ctx_dump), GFP_ATOMIC);
+       if (ctx_dump == NULL)
+               return -ENOMEM;
+
+       cb->data = ctx_dump;
+       return 0;
+}
+
 static int nf_tables_dump_sets_done(struct netlink_callback *cb)
 {
        kfree(cb->data);
@@ -3188,18 +3228,12 @@ static int nf_tables_getset(struct net *net, struct sock *nlsk,
 
        if (nlh->nlmsg_flags & NLM_F_DUMP) {
                struct netlink_dump_control c = {
+                       .start = nf_tables_dump_sets_start,
                        .dump = nf_tables_dump_sets,
                        .done = nf_tables_dump_sets_done,
+                       .data = &ctx,
                        .module = THIS_MODULE,
                };
-               struct nft_ctx *ctx_dump;
-
-               ctx_dump = kmalloc(sizeof(*ctx_dump), GFP_ATOMIC);
-               if (ctx_dump == NULL)
-                       return -ENOMEM;
-
-               *ctx_dump = ctx;
-               c.data = ctx_dump;
 
                return nft_netlink_dump_start_rcu(nlsk, skb, nlh, &c);
        }
@@ -3849,6 +3883,15 @@ nla_put_failure:
        return -ENOSPC;
 }
 
+static int nf_tables_dump_set_start(struct netlink_callback *cb)
+{
+       struct nft_set_dump_ctx *dump_ctx = cb->data;
+
+       cb->data = kmemdup(dump_ctx, sizeof(*dump_ctx), GFP_ATOMIC);
+
+       return cb->data ? 0 : -ENOMEM;
+}
+
 static int nf_tables_dump_set_done(struct netlink_callback *cb)
 {
        kfree(cb->data);
@@ -4002,20 +4045,17 @@ static int nf_tables_getsetelem(struct net *net, struct sock *nlsk,
 
        if (nlh->nlmsg_flags & NLM_F_DUMP) {
                struct netlink_dump_control c = {
+                       .start = nf_tables_dump_set_start,
                        .dump = nf_tables_dump_set,
                        .done = nf_tables_dump_set_done,
                        .module = THIS_MODULE,
                };
-               struct nft_set_dump_ctx *dump_ctx;
-
-               dump_ctx = kmalloc(sizeof(*dump_ctx), GFP_ATOMIC);
-               if (!dump_ctx)
-                       return -ENOMEM;
-
-               dump_ctx->set = set;
-               dump_ctx->ctx = ctx;
+               struct nft_set_dump_ctx dump_ctx = {
+                       .set = set,
+                       .ctx = ctx,
+               };
 
-               c.data = dump_ctx;
+               c.data = &dump_ctx;
                return nft_netlink_dump_start_rcu(nlsk, skb, nlh, &c);
        }
 
@@ -4975,38 +5015,42 @@ done:
        return skb->len;
 }
 
-static int nf_tables_dump_obj_done(struct netlink_callback *cb)
+static int nf_tables_dump_obj_start(struct netlink_callback *cb)
 {
-       struct nft_obj_filter *filter = cb->data;
+       const struct nlattr * const *nla = cb->data;
+       struct nft_obj_filter *filter = NULL;
 
-       if (filter) {
-               kfree(filter->table);
-               kfree(filter);
+       if (nla[NFTA_OBJ_TABLE] || nla[NFTA_OBJ_TYPE]) {
+               filter = kzalloc(sizeof(*filter), GFP_ATOMIC);
+               if (!filter)
+                       return -ENOMEM;
+
+               if (nla[NFTA_OBJ_TABLE]) {
+                       filter->table = nla_strdup(nla[NFTA_OBJ_TABLE], GFP_ATOMIC);
+                       if (!filter->table) {
+                               kfree(filter);
+                               return -ENOMEM;
+                       }
+               }
+
+               if (nla[NFTA_OBJ_TYPE])
+                       filter->type = ntohl(nla_get_be32(nla[NFTA_OBJ_TYPE]));
        }
 
+       cb->data = filter;
        return 0;
 }
 
-static struct nft_obj_filter *
-nft_obj_filter_alloc(const struct nlattr * const nla[])
+static int nf_tables_dump_obj_done(struct netlink_callback *cb)
 {
-       struct nft_obj_filter *filter;
-
-       filter = kzalloc(sizeof(*filter), GFP_ATOMIC);
-       if (!filter)
-               return ERR_PTR(-ENOMEM);
+       struct nft_obj_filter *filter = cb->data;
 
-       if (nla[NFTA_OBJ_TABLE]) {
-               filter->table = nla_strdup(nla[NFTA_OBJ_TABLE], GFP_ATOMIC);
-               if (!filter->table) {
-                       kfree(filter);
-                       return ERR_PTR(-ENOMEM);
-               }
+       if (filter) {
+               kfree(filter->table);
+               kfree(filter);
        }
-       if (nla[NFTA_OBJ_TYPE])
-               filter->type = ntohl(nla_get_be32(nla[NFTA_OBJ_TYPE]));
 
-       return filter;
+       return 0;
 }
 
 /* called with rcu_read_lock held */
@@ -5027,21 +5071,13 @@ static int nf_tables_getobj(struct net *net, struct sock *nlsk,
 
        if (nlh->nlmsg_flags & NLM_F_DUMP) {
                struct netlink_dump_control c = {
+                       .start = nf_tables_dump_obj_start,
                        .dump = nf_tables_dump_obj,
                        .done = nf_tables_dump_obj_done,
                        .module = THIS_MODULE,
+                       .data = (void *)nla,
                };
 
-               if (nla[NFTA_OBJ_TABLE] ||
-                   nla[NFTA_OBJ_TYPE]) {
-                       struct nft_obj_filter *filter;
-
-                       filter = nft_obj_filter_alloc(nla);
-                       if (IS_ERR(filter))
-                               return -ENOMEM;
-
-                       c.data = filter;
-               }
                return nft_netlink_dump_start_rcu(nlsk, skb, nlh, &c);
        }
 
@@ -5320,8 +5356,6 @@ static int nf_tables_flowtable_parse_hook(const struct nft_ctx *ctx,
                flowtable->ops[i].priv          = &flowtable->data;
                flowtable->ops[i].hook          = flowtable->data.type->hook;
                flowtable->ops[i].dev           = dev_array[i];
-               flowtable->dev_name[i]          = kstrdup(dev_array[i]->name,
-                                                         GFP_KERNEL);
        }
 
        return err;
@@ -5479,10 +5513,8 @@ static int nf_tables_newflowtable(struct net *net, struct sock *nlsk,
 err6:
        i = flowtable->ops_len;
 err5:
-       for (k = i - 1; k >= 0; k--) {
-               kfree(flowtable->dev_name[k]);
+       for (k = i - 1; k >= 0; k--)
                nf_unregister_net_hook(net, &flowtable->ops[k]);
-       }
 
        kfree(flowtable->ops);
 err4:
@@ -5581,9 +5613,10 @@ static int nf_tables_fill_flowtable_info(struct sk_buff *skb, struct net *net,
                goto nla_put_failure;
 
        for (i = 0; i < flowtable->ops_len; i++) {
-               if (flowtable->dev_name[i][0] &&
-                   nla_put_string(skb, NFTA_DEVICE_NAME,
-                                  flowtable->dev_name[i]))
+               const struct net_device *dev = READ_ONCE(flowtable->ops[i].dev);
+
+               if (dev &&
+                   nla_put_string(skb, NFTA_DEVICE_NAME, dev->name))
                        goto nla_put_failure;
        }
        nla_nest_end(skb, nest_devs);
@@ -5650,37 +5683,39 @@ done:
        return skb->len;
 }
 
-static int nf_tables_dump_flowtable_done(struct netlink_callback *cb)
+static int nf_tables_dump_flowtable_start(struct netlink_callback *cb)
 {
-       struct nft_flowtable_filter *filter = cb->data;
+       const struct nlattr * const *nla = cb->data;
+       struct nft_flowtable_filter *filter = NULL;
 
-       if (!filter)
-               return 0;
+       if (nla[NFTA_FLOWTABLE_TABLE]) {
+               filter = kzalloc(sizeof(*filter), GFP_ATOMIC);
+               if (!filter)
+                       return -ENOMEM;
 
-       kfree(filter->table);
-       kfree(filter);
+               filter->table = nla_strdup(nla[NFTA_FLOWTABLE_TABLE],
+                                          GFP_ATOMIC);
+               if (!filter->table) {
+                       kfree(filter);
+                       return -ENOMEM;
+               }
+       }
 
+       cb->data = filter;
        return 0;
 }
 
-static struct nft_flowtable_filter *
-nft_flowtable_filter_alloc(const struct nlattr * const nla[])
+static int nf_tables_dump_flowtable_done(struct netlink_callback *cb)
 {
-       struct nft_flowtable_filter *filter;
+       struct nft_flowtable_filter *filter = cb->data;
 
-       filter = kzalloc(sizeof(*filter), GFP_ATOMIC);
        if (!filter)
-               return ERR_PTR(-ENOMEM);
+               return 0;
 
-       if (nla[NFTA_FLOWTABLE_TABLE]) {
-               filter->table = nla_strdup(nla[NFTA_FLOWTABLE_TABLE],
-                                          GFP_ATOMIC);
-               if (!filter->table) {
-                       kfree(filter);
-                       return ERR_PTR(-ENOMEM);
-               }
-       }
-       return filter;
+       kfree(filter->table);
+       kfree(filter);
+
+       return 0;
 }
 
 /* called with rcu_read_lock held */
@@ -5700,20 +5735,13 @@ static int nf_tables_getflowtable(struct net *net, struct sock *nlsk,
 
        if (nlh->nlmsg_flags & NLM_F_DUMP) {
                struct netlink_dump_control c = {
+                       .start = nf_tables_dump_flowtable_start,
                        .dump = nf_tables_dump_flowtable,
                        .done = nf_tables_dump_flowtable_done,
                        .module = THIS_MODULE,
+                       .data = (void *)nla,
                };
 
-               if (nla[NFTA_FLOWTABLE_TABLE]) {
-                       struct nft_flowtable_filter *filter;
-
-                       filter = nft_flowtable_filter_alloc(nla);
-                       if (IS_ERR(filter))
-                               return -ENOMEM;
-
-                       c.data = filter;
-               }
                return nft_netlink_dump_start_rcu(nlsk, skb, nlh, &c);
        }
 
@@ -5783,6 +5811,7 @@ static void nf_tables_flowtable_destroy(struct nft_flowtable *flowtable)
        kfree(flowtable->name);
        flowtable->data.type->free(&flowtable->data);
        module_put(flowtable->data.type->owner);
+       kfree(flowtable);
 }
 
 static int nf_tables_fill_gen_info(struct sk_buff *skb, struct net *net,
@@ -5825,7 +5854,6 @@ static void nft_flowtable_event(unsigned long event, struct net_device *dev,
                        continue;
 
                nf_unregister_net_hook(dev_net(dev), &flowtable->ops[i]);
-               flowtable->dev_name[i][0] = '\0';
                flowtable->ops[i].dev = NULL;
                break;
        }
@@ -6086,6 +6114,9 @@ static void nft_commit_release(struct nft_trans *trans)
        case NFT_MSG_DELTABLE:
                nf_tables_table_destroy(&trans->ctx);
                break;
+       case NFT_MSG_NEWCHAIN:
+               kfree(nft_trans_chain_name(trans));
+               break;
        case NFT_MSG_DELCHAIN:
                nf_tables_chain_destroy(&trans->ctx);
                break;
@@ -6315,13 +6346,15 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
                        nf_tables_table_notify(&trans->ctx, NFT_MSG_DELTABLE);
                        break;
                case NFT_MSG_NEWCHAIN:
-                       if (nft_trans_chain_update(trans))
+                       if (nft_trans_chain_update(trans)) {
                                nft_chain_commit_update(trans);
-                       else
+                               nf_tables_chain_notify(&trans->ctx, NFT_MSG_NEWCHAIN);
+                               /* trans destroyed after rcu grace period */
+                       } else {
                                nft_clear(net, trans->ctx.chain);
-
-                       nf_tables_chain_notify(&trans->ctx, NFT_MSG_NEWCHAIN);
-                       nft_trans_destroy(trans);
+                               nf_tables_chain_notify(&trans->ctx, NFT_MSG_NEWCHAIN);
+                               nft_trans_destroy(trans);
+                       }
                        break;
                case NFT_MSG_DELCHAIN:
                        nft_chain_del(trans->ctx.chain);
@@ -6471,7 +6504,7 @@ static int __nf_tables_abort(struct net *net)
                case NFT_MSG_NEWCHAIN:
                        if (nft_trans_chain_update(trans)) {
                                free_percpu(nft_trans_chain_stats(trans));
-
+                               kfree(nft_trans_chain_name(trans));
                                nft_trans_destroy(trans);
                        } else {
                                trans->ctx.table->use--;
@@ -6837,13 +6870,6 @@ int nft_validate_register_store(const struct nft_ctx *ctx,
                        err = nf_tables_check_loops(ctx, data->verdict.chain);
                        if (err < 0)
                                return err;
-
-                       if (ctx->chain->level + 1 >
-                           data->verdict.chain->level) {
-                               if (ctx->chain->level + 1 == NFT_JUMP_STACK_SIZE)
-                                       return -EMLINK;
-                               data->verdict.chain->level = ctx->chain->level + 1;
-                       }
                }
 
                return 0;
diff --git a/net/netfilter/nf_tables_set_core.c b/net/netfilter/nf_tables_set_core.c
new file mode 100644 (file)
index 0000000..8147896
--- /dev/null
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <net/netfilter/nf_tables_core.h>
+
+static int __init nf_tables_set_module_init(void)
+{
+       nft_register_set(&nft_set_hash_fast_type);
+       nft_register_set(&nft_set_hash_type);
+       nft_register_set(&nft_set_rhash_type);
+       nft_register_set(&nft_set_bitmap_type);
+       nft_register_set(&nft_set_rbtree_type);
+
+       return 0;
+}
+
+static void __exit nf_tables_set_module_exit(void)
+{
+       nft_unregister_set(&nft_set_rbtree_type);
+       nft_unregister_set(&nft_set_bitmap_type);
+       nft_unregister_set(&nft_set_rhash_type);
+       nft_unregister_set(&nft_set_hash_type);
+       nft_unregister_set(&nft_set_hash_fast_type);
+}
+
+module_init(nf_tables_set_module_init);
+module_exit(nf_tables_set_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_NFT_SET();
index 4ccd2988f9db637166358335d8e26299c7237bec..ea4ba551abb28cb25c833dc408e23d1313b21bb4 100644 (file)
@@ -1243,6 +1243,9 @@ static int nfqnl_recv_unsupp(struct net *net, struct sock *ctnl,
 static const struct nla_policy nfqa_cfg_policy[NFQA_CFG_MAX+1] = {
        [NFQA_CFG_CMD]          = { .len = sizeof(struct nfqnl_msg_config_cmd) },
        [NFQA_CFG_PARAMS]       = { .len = sizeof(struct nfqnl_msg_config_params) },
+       [NFQA_CFG_QUEUE_MAXLEN] = { .type = NLA_U32 },
+       [NFQA_CFG_MASK]         = { .type = NLA_U32 },
+       [NFQA_CFG_FLAGS]        = { .type = NLA_U32 },
 };
 
 static const struct nf_queue_handler nfqh = {
index 8d1ff654e5aff1dfd5c2ace7693876568ea3377a..32535eea51b296ab1f2cb5bdd06972497f380a78 100644 (file)
@@ -832,10 +832,18 @@ nft_target_select_ops(const struct nft_ctx *ctx,
        rev = ntohl(nla_get_be32(tb[NFTA_TARGET_REV]));
        family = ctx->family;
 
+       if (strcmp(tg_name, XT_ERROR_TARGET) == 0 ||
+           strcmp(tg_name, XT_STANDARD_TARGET) == 0 ||
+           strcmp(tg_name, "standard") == 0)
+               return ERR_PTR(-EINVAL);
+
        /* Re-use the existing target if it's already loaded. */
        list_for_each_entry(nft_target, &nft_target_list, head) {
                struct xt_target *target = nft_target->ops.data;
 
+               if (!target->target)
+                       continue;
+
                if (nft_target_cmp(target, tg_name, rev, family))
                        return &nft_target->ops;
        }
@@ -844,6 +852,11 @@ nft_target_select_ops(const struct nft_ctx *ctx,
        if (IS_ERR(target))
                return ERR_PTR(-ENOENT);
 
+       if (!target->target) {
+               err = -EINVAL;
+               goto err;
+       }
+
        if (target->targetsize > nla_len(tb[NFTA_TARGET_INFO])) {
                err = -EINVAL;
                goto err;
index 15adf8ca82c3783efcb510efa85aa894afd1c2da..0777a93211e2b576e57eec2f4aaec71d57f3700d 100644 (file)
@@ -98,6 +98,7 @@ static int nft_immediate_validate(const struct nft_ctx *ctx,
                                  const struct nft_data **d)
 {
        const struct nft_immediate_expr *priv = nft_expr_priv(expr);
+       struct nft_ctx *pctx = (struct nft_ctx *)ctx;
        const struct nft_data *data;
        int err;
 
@@ -109,9 +110,11 @@ static int nft_immediate_validate(const struct nft_ctx *ctx,
        switch (data->verdict.code) {
        case NFT_JUMP:
        case NFT_GOTO:
+               pctx->level++;
                err = nft_chain_validate(ctx, data->verdict.chain);
                if (err < 0)
                        return err;
+               pctx->level--;
                break;
        default:
                break;
index 42e6fadf1417eba7ce4512d43cce339fc627e204..c2a1d84cdfc460d86b50ae6d28dde2653f1666dd 100644 (file)
@@ -155,7 +155,9 @@ static int nft_lookup_validate_setelem(const struct nft_ctx *ctx,
                                       struct nft_set_elem *elem)
 {
        const struct nft_set_ext *ext = nft_set_elem_ext(set, elem->priv);
+       struct nft_ctx *pctx = (struct nft_ctx *)ctx;
        const struct nft_data *data;
+       int err;
 
        if (nft_set_ext_exists(ext, NFT_SET_EXT_FLAGS) &&
            *nft_set_ext_flags(ext) & NFT_SET_ELEM_INTERVAL_END)
@@ -165,10 +167,17 @@ static int nft_lookup_validate_setelem(const struct nft_ctx *ctx,
        switch (data->verdict.code) {
        case NFT_JUMP:
        case NFT_GOTO:
-               return nft_chain_validate(ctx, data->verdict.chain);
+               pctx->level++;
+               err = nft_chain_validate(ctx, data->verdict.chain);
+               if (err < 0)
+                       return err;
+               pctx->level--;
+               break;
        default:
-               return 0;
+               break;
        }
+
+       return 0;
 }
 
 static int nft_lookup_validate(const struct nft_ctx *ctx,
index d6626e01c7ee6b0c25a2197f75309030edca34c6..128bc16f52dd436aa78ac21ae45be4cf69a70f00 100644 (file)
@@ -296,7 +296,7 @@ static bool nft_bitmap_estimate(const struct nft_set_desc *desc, u32 features,
        return true;
 }
 
-static struct nft_set_type nft_bitmap_type __read_mostly = {
+struct nft_set_type nft_set_bitmap_type __read_mostly = {
        .owner          = THIS_MODULE,
        .ops            = {
                .privsize       = nft_bitmap_privsize,
@@ -314,20 +314,3 @@ static struct nft_set_type nft_bitmap_type __read_mostly = {
                .get            = nft_bitmap_get,
        },
 };
-
-static int __init nft_bitmap_module_init(void)
-{
-       return nft_register_set(&nft_bitmap_type);
-}
-
-static void __exit nft_bitmap_module_exit(void)
-{
-       nft_unregister_set(&nft_bitmap_type);
-}
-
-module_init(nft_bitmap_module_init);
-module_exit(nft_bitmap_module_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
-MODULE_ALIAS_NFT_SET();
index 6f9a1365a09f07c517804cec45e31cd657f93337..90c3e7e6cacba2878d36209c0b2cf76b9d2f5c82 100644 (file)
@@ -387,6 +387,7 @@ static void nft_rhash_destroy(const struct nft_set *set)
        struct nft_rhash *priv = nft_set_priv(set);
 
        cancel_delayed_work_sync(&priv->gc_work);
+       rcu_barrier();
        rhashtable_free_and_destroy(&priv->ht, nft_rhash_elem_destroy,
                                    (void *)set);
 }
@@ -654,7 +655,7 @@ static bool nft_hash_fast_estimate(const struct nft_set_desc *desc, u32 features
        return true;
 }
 
-static struct nft_set_type nft_rhash_type __read_mostly = {
+struct nft_set_type nft_set_rhash_type __read_mostly = {
        .owner          = THIS_MODULE,
        .features       = NFT_SET_MAP | NFT_SET_OBJECT |
                          NFT_SET_TIMEOUT | NFT_SET_EVAL,
@@ -677,7 +678,7 @@ static struct nft_set_type nft_rhash_type __read_mostly = {
        },
 };
 
-static struct nft_set_type nft_hash_type __read_mostly = {
+struct nft_set_type nft_set_hash_type __read_mostly = {
        .owner          = THIS_MODULE,
        .features       = NFT_SET_MAP | NFT_SET_OBJECT,
        .ops            = {
@@ -697,7 +698,7 @@ static struct nft_set_type nft_hash_type __read_mostly = {
        },
 };
 
-static struct nft_set_type nft_hash_fast_type __read_mostly = {
+struct nft_set_type nft_set_hash_fast_type __read_mostly = {
        .owner          = THIS_MODULE,
        .features       = NFT_SET_MAP | NFT_SET_OBJECT,
        .ops            = {
@@ -716,26 +717,3 @@ static struct nft_set_type nft_hash_fast_type __read_mostly = {
                .get            = nft_hash_get,
        },
 };
-
-static int __init nft_hash_module_init(void)
-{
-       if (nft_register_set(&nft_hash_fast_type) ||
-           nft_register_set(&nft_hash_type) ||
-           nft_register_set(&nft_rhash_type))
-               return 1;
-       return 0;
-}
-
-static void __exit nft_hash_module_exit(void)
-{
-       nft_unregister_set(&nft_rhash_type);
-       nft_unregister_set(&nft_hash_type);
-       nft_unregister_set(&nft_hash_fast_type);
-}
-
-module_init(nft_hash_module_init);
-module_exit(nft_hash_module_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
-MODULE_ALIAS_NFT_SET();
index 7f3a9a211034b2dee751dd776e1b5f59db6c6b61..9873d734b49480ff0722ca73d18cf7ab774e98fb 100644 (file)
@@ -381,7 +381,7 @@ static void nft_rbtree_gc(struct work_struct *work)
 
                gcb = nft_set_gc_batch_check(set, gcb, GFP_ATOMIC);
                if (!gcb)
-                       goto out;
+                       break;
 
                atomic_dec(&set->nelems);
                nft_set_gc_batch_add(gcb, rbe);
@@ -390,10 +390,12 @@ static void nft_rbtree_gc(struct work_struct *work)
                        rbe = rb_entry(prev, struct nft_rbtree_elem, node);
                        atomic_dec(&set->nelems);
                        nft_set_gc_batch_add(gcb, rbe);
+                       prev = NULL;
                }
                node = rb_next(node);
+               if (!node)
+                       break;
        }
-out:
        if (gcb) {
                for (i = 0; i < gcb->head.cnt; i++) {
                        rbe = gcb->elems[i];
@@ -440,6 +442,7 @@ static void nft_rbtree_destroy(const struct nft_set *set)
        struct rb_node *node;
 
        cancel_delayed_work_sync(&priv->gc_work);
+       rcu_barrier();
        while ((node = priv->root.rb_node) != NULL) {
                rb_erase(node, &priv->root);
                rbe = rb_entry(node, struct nft_rbtree_elem, node);
@@ -462,7 +465,7 @@ static bool nft_rbtree_estimate(const struct nft_set_desc *desc, u32 features,
        return true;
 }
 
-static struct nft_set_type nft_rbtree_type __read_mostly = {
+struct nft_set_type nft_set_rbtree_type __read_mostly = {
        .owner          = THIS_MODULE,
        .features       = NFT_SET_INTERVAL | NFT_SET_MAP | NFT_SET_OBJECT | NFT_SET_TIMEOUT,
        .ops            = {
@@ -481,20 +484,3 @@ static struct nft_set_type nft_rbtree_type __read_mostly = {
                .get            = nft_rbtree_get,
        },
 };
-
-static int __init nft_rbtree_module_init(void)
-{
-       return nft_register_set(&nft_rbtree_type);
-}
-
-static void __exit nft_rbtree_module_exit(void)
-{
-       nft_unregister_set(&nft_rbtree_type);
-}
-
-module_init(nft_rbtree_module_init);
-module_exit(nft_rbtree_module_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
-MODULE_ALIAS_NFT_SET();
index 58fce4e749a97deb7f50ee96cb328d45624ccc8c..d76550a8b642aafd96853332d18db898e43ff587 100644 (file)
@@ -61,7 +61,7 @@ tproxy_tg4(struct net *net, struct sk_buff *skb, __be32 laddr, __be16 lport,
         * addresses, this happens if the redirect already happened
         * and the current packet belongs to an already established
         * connection */
-       sk = nf_tproxy_get_sock_v4(net, skb, hp, iph->protocol,
+       sk = nf_tproxy_get_sock_v4(net, skb, iph->protocol,
                                   iph->saddr, iph->daddr,
                                   hp->source, hp->dest,
                                   skb->dev, NF_TPROXY_LOOKUP_ESTABLISHED);
@@ -77,7 +77,7 @@ tproxy_tg4(struct net *net, struct sk_buff *skb, __be32 laddr, __be16 lport,
        else if (!sk)
                /* no, there's no established connection, check if
                 * there's a listener on the redirected addr/port */
-               sk = nf_tproxy_get_sock_v4(net, skb, hp, iph->protocol,
+               sk = nf_tproxy_get_sock_v4(net, skb, iph->protocol,
                                           iph->saddr, laddr,
                                           hp->source, lport,
                                           skb->dev, NF_TPROXY_LOOKUP_LISTENER);
@@ -150,7 +150,7 @@ tproxy_tg6_v1(struct sk_buff *skb, const struct xt_action_param *par)
         * addresses, this happens if the redirect already happened
         * and the current packet belongs to an already established
         * connection */
-       sk = nf_tproxy_get_sock_v6(xt_net(par), skb, thoff, hp, tproto,
+       sk = nf_tproxy_get_sock_v6(xt_net(par), skb, thoff, tproto,
                                   &iph->saddr, &iph->daddr,
                                   hp->source, hp->dest,
                                   xt_in(par), NF_TPROXY_LOOKUP_ESTABLISHED);
@@ -171,7 +171,7 @@ tproxy_tg6_v1(struct sk_buff *skb, const struct xt_action_param *par)
        else if (!sk)
                /* no there's no established connection, check if
                 * there's a listener on the redirected addr/port */
-               sk = nf_tproxy_get_sock_v6(xt_net(par), skb, thoff, hp,
+               sk = nf_tproxy_get_sock_v6(xt_net(par), skb, thoff,
                                           tproto, &iph->saddr, laddr,
                                           hp->source, lport,
                                           xt_in(par), NF_TPROXY_LOOKUP_LISTENER);
index 1189b84413d5a8236f878a9cc99bcfa09368ec69..56704d95f82d27f5a2bc26714e5001f3868765b2 100644 (file)
@@ -63,6 +63,7 @@
 #include <linux/hash.h>
 #include <linux/genetlink.h>
 #include <linux/net_namespace.h>
+#include <linux/nospec.h>
 
 #include <net/net_namespace.h>
 #include <net/netns/generic.h>
@@ -679,6 +680,7 @@ static int netlink_create(struct net *net, struct socket *sock, int protocol,
 
        if (protocol < 0 || protocol >= MAX_LINKS)
                return -EPROTONOSUPPORT;
+       protocol = array_index_nospec(protocol, MAX_LINKS);
 
        netlink_lock_table();
 #ifdef CONFIG_MODULES
@@ -1009,6 +1011,11 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
                        return err;
        }
 
+       if (nlk->ngroups == 0)
+               groups = 0;
+       else if (nlk->ngroups < 8*sizeof(groups))
+               groups &= (1UL << nlk->ngroups) - 1;
+
        bound = nlk->bound;
        if (bound) {
                /* Ensure nlk->portid is up-to-date. */
@@ -2658,7 +2665,7 @@ static const struct proto_ops netlink_ops = {
        .socketpair =   sock_no_socketpair,
        .accept =       sock_no_accept,
        .getname =      netlink_getname,
-       .poll_mask =    datagram_poll_mask,
+       .poll =         datagram_poll,
        .ioctl =        netlink_ioctl,
        .listen =       sock_no_listen,
        .shutdown =     sock_no_shutdown,
index 93fbcafbf3886d34b0be87244c405b8319df89dd..03f37c4e64fe44cd822952225736084ad151b2e8 100644 (file)
@@ -1355,7 +1355,7 @@ static const struct proto_ops nr_proto_ops = {
        .socketpair     =       sock_no_socketpair,
        .accept         =       nr_accept,
        .getname        =       nr_getname,
-       .poll_mask      =       datagram_poll_mask,
+       .poll           =       datagram_poll,
        .ioctl          =       nr_ioctl,
        .listen         =       nr_listen,
        .shutdown       =       sock_no_shutdown,
index 2ceefa183ceed6ba3d06f2aae958104a514f2146..6a196e438b6c03d4c86e0a8a78af1c496a7e599b 100644 (file)
@@ -752,11 +752,14 @@ int nfc_llcp_send_ui_frame(struct nfc_llcp_sock *sock, u8 ssap, u8 dsap,
                pr_debug("Fragment %zd bytes remaining %zd",
                         frag_len, remaining_len);
 
-               pdu = nfc_alloc_send_skb(sock->dev, &sock->sk, MSG_DONTWAIT,
+               pdu = nfc_alloc_send_skb(sock->dev, &sock->sk, 0,
                                         frag_len + LLCP_HEADER_SIZE, &err);
                if (pdu == NULL) {
-                       pr_err("Could not allocate PDU\n");
-                       continue;
+                       pr_err("Could not allocate PDU (error=%d)\n", err);
+                       len -= remaining_len;
+                       if (len == 0)
+                               len = err;
+                       break;
                }
 
                pdu = llcp_add_header(pdu, dsap, ssap, LLCP_PDU_UI);
index ab5bb14b49af92241b12584925983de43b143bb7..ea0c0c6f187429426f4849347c09b847f0111fff 100644 (file)
@@ -548,13 +548,16 @@ static inline __poll_t llcp_accept_poll(struct sock *parent)
        return 0;
 }
 
-static __poll_t llcp_sock_poll_mask(struct socket *sock, __poll_t events)
+static __poll_t llcp_sock_poll(struct file *file, struct socket *sock,
+                                  poll_table *wait)
 {
        struct sock *sk = sock->sk;
        __poll_t mask = 0;
 
        pr_debug("%p\n", sk);
 
+       sock_poll_wait(file, sk_sleep(sk), wait);
+
        if (sk->sk_state == LLCP_LISTEN)
                return llcp_accept_poll(sk);
 
@@ -896,7 +899,7 @@ static const struct proto_ops llcp_sock_ops = {
        .socketpair     = sock_no_socketpair,
        .accept         = llcp_sock_accept,
        .getname        = llcp_sock_getname,
-       .poll_mask      = llcp_sock_poll_mask,
+       .poll           = llcp_sock_poll,
        .ioctl          = sock_no_ioctl,
        .listen         = llcp_sock_listen,
        .shutdown       = sock_no_shutdown,
@@ -916,7 +919,7 @@ static const struct proto_ops llcp_rawsock_ops = {
        .socketpair     = sock_no_socketpair,
        .accept         = sock_no_accept,
        .getname        = llcp_sock_getname,
-       .poll_mask      = llcp_sock_poll_mask,
+       .poll           = llcp_sock_poll,
        .ioctl          = sock_no_ioctl,
        .listen         = sock_no_listen,
        .shutdown       = sock_no_shutdown,
index 60c322531c498f1d43582be5b76f3a2f575ed5bc..e2188deb08dc3bb16e2a60808b274a4a092fd2ee 100644 (file)
@@ -284,7 +284,7 @@ static const struct proto_ops rawsock_ops = {
        .socketpair     = sock_no_socketpair,
        .accept         = sock_no_accept,
        .getname        = sock_no_getname,
-       .poll_mask      = datagram_poll_mask,
+       .poll           = datagram_poll,
        .ioctl          = sock_no_ioctl,
        .listen         = sock_no_listen,
        .shutdown       = sock_no_shutdown,
@@ -304,7 +304,7 @@ static const struct proto_ops rawsock_raw_ops = {
        .socketpair     = sock_no_socketpair,
        .accept         = sock_no_accept,
        .getname        = sock_no_getname,
-       .poll_mask      = datagram_poll_mask,
+       .poll           = datagram_poll,
        .ioctl          = sock_no_ioctl,
        .listen         = sock_no_listen,
        .shutdown       = sock_no_shutdown,
index 9696ef96b719bf24625adea2a959deac1d2a975f..1a30e165eeb4fd1b884a0d5cd79c6823a5de9feb 100644 (file)
@@ -104,7 +104,7 @@ static struct sk_buff *nsh_gso_segment(struct sk_buff *skb,
        __skb_pull(skb, nsh_len);
 
        skb_reset_mac_header(skb);
-       skb_reset_mac_len(skb);
+       skb->mac_len = proto == htons(ETH_P_TEB) ? ETH_HLEN : 0;
        skb->protocol = proto;
 
        features &= NETIF_F_SG;
index b891a91577f8030e55e973f4a4cf74f0c4637916..c038e021a591685cd9cca521c5b32259ea48bdbd 100644 (file)
@@ -211,6 +211,7 @@ static struct dp_meter *dp_meter_create(struct nlattr **a)
        if (!meter)
                return ERR_PTR(-ENOMEM);
 
+       meter->id = nla_get_u32(a[OVS_METER_ATTR_ID]);
        meter->used = div_u64(ktime_get_ns(), 1000 * 1000);
        meter->kbps = a[OVS_METER_ATTR_KBPS] ? 1 : 0;
        meter->keep_stats = !a[OVS_METER_ATTR_CLEAR];
@@ -280,6 +281,10 @@ static int ovs_meter_cmd_set(struct sk_buff *skb, struct genl_info *info)
        u32 meter_id;
        bool failed;
 
+       if (!a[OVS_METER_ATTR_ID]) {
+               return -ENODEV;
+       }
+
        meter = dp_meter_create(a);
        if (IS_ERR_OR_NULL(meter))
                return PTR_ERR(meter);
@@ -298,11 +303,6 @@ static int ovs_meter_cmd_set(struct sk_buff *skb, struct genl_info *info)
                goto exit_unlock;
        }
 
-       if (!a[OVS_METER_ATTR_ID]) {
-               err = -ENODEV;
-               goto exit_unlock;
-       }
-
        meter_id = nla_get_u32(a[OVS_METER_ATTR_ID]);
 
        /* Cannot fail after this. */
index 50809748c1279ea17b7499acbec5699443804f64..e6445d8f3f57fee1c39de269724294b4455d8013 100644 (file)
@@ -2262,6 +2262,13 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
                if (po->stats.stats1.tp_drops)
                        status |= TP_STATUS_LOSING;
        }
+
+       if (do_vnet &&
+           virtio_net_hdr_from_skb(skb, h.raw + macoff -
+                                   sizeof(struct virtio_net_hdr),
+                                   vio_le(), true, 0))
+               goto drop_n_account;
+
        po->stats.stats1.tp_packets++;
        if (copy_skb) {
                status |= TP_STATUS_COPY;
@@ -2269,15 +2276,6 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
        }
        spin_unlock(&sk->sk_receive_queue.lock);
 
-       if (do_vnet) {
-               if (virtio_net_hdr_from_skb(skb, h.raw + macoff -
-                                           sizeof(struct virtio_net_hdr),
-                                           vio_le(), true, 0)) {
-                       spin_lock(&sk->sk_receive_queue.lock);
-                       goto drop_n_account;
-               }
-       }
-
        skb_copy_bits(skb, 0, h.raw + macoff, snaplen);
 
        if (!(ts_status = tpacket_get_timestamp(skb, &ts, po->tp_tstamp)))
@@ -2880,6 +2878,8 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
                        goto out_free;
        } else if (reserve) {
                skb_reserve(skb, -reserve);
+               if (len < reserve)
+                       skb_reset_network_header(skb);
        }
 
        /* Returns -EFAULT on error */
@@ -4078,11 +4078,12 @@ static int packet_ioctl(struct socket *sock, unsigned int cmd,
        return 0;
 }
 
-static __poll_t packet_poll_mask(struct socket *sock, __poll_t events)
+static __poll_t packet_poll(struct file *file, struct socket *sock,
+                               poll_table *wait)
 {
        struct sock *sk = sock->sk;
        struct packet_sock *po = pkt_sk(sk);
-       __poll_t mask = datagram_poll_mask(sock, events);
+       __poll_t mask = datagram_poll(file, sock, wait);
 
        spin_lock_bh(&sk->sk_receive_queue.lock);
        if (po->rx_ring.pg_vec) {
@@ -4225,6 +4226,8 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
        }
 
        if (req->tp_block_nr) {
+               unsigned int min_frame_size;
+
                /* Sanity tests and some calculations */
                err = -EBUSY;
                if (unlikely(rb->pg_vec))
@@ -4247,12 +4250,12 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
                        goto out;
                if (unlikely(!PAGE_ALIGNED(req->tp_block_size)))
                        goto out;
+               min_frame_size = po->tp_hdrlen + po->tp_reserve;
                if (po->tp_version >= TPACKET_V3 &&
-                   req->tp_block_size <=
-                   BLK_PLUS_PRIV((u64)req_u->req3.tp_sizeof_priv) + sizeof(struct tpacket3_hdr))
+                   req->tp_block_size <
+                   BLK_PLUS_PRIV((u64)req_u->req3.tp_sizeof_priv) + min_frame_size)
                        goto out;
-               if (unlikely(req->tp_frame_size < po->tp_hdrlen +
-                                       po->tp_reserve))
+               if (unlikely(req->tp_frame_size < min_frame_size))
                        goto out;
                if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1)))
                        goto out;
@@ -4424,7 +4427,7 @@ static const struct proto_ops packet_ops_spkt = {
        .socketpair =   sock_no_socketpair,
        .accept =       sock_no_accept,
        .getname =      packet_getname_spkt,
-       .poll_mask =    datagram_poll_mask,
+       .poll =         datagram_poll,
        .ioctl =        packet_ioctl,
        .listen =       sock_no_listen,
        .shutdown =     sock_no_shutdown,
@@ -4445,7 +4448,7 @@ static const struct proto_ops packet_ops = {
        .socketpair =   sock_no_socketpair,
        .accept =       sock_no_accept,
        .getname =      packet_getname,
-       .poll_mask =    packet_poll_mask,
+       .poll =         packet_poll,
        .ioctl =        packet_ioctl,
        .listen =       sock_no_listen,
        .shutdown =     sock_no_shutdown,
index c295c4e20f012f31c1b443c5f859969caf412cec..30187990257fdb07a57c03707d6e1af0740b42f0 100644 (file)
@@ -340,12 +340,15 @@ static int pn_socket_getname(struct socket *sock, struct sockaddr *addr,
        return sizeof(struct sockaddr_pn);
 }
 
-static __poll_t pn_socket_poll_mask(struct socket *sock, __poll_t events)
+static __poll_t pn_socket_poll(struct file *file, struct socket *sock,
+                                       poll_table *wait)
 {
        struct sock *sk = sock->sk;
        struct pep_sock *pn = pep_sk(sk);
        __poll_t mask = 0;
 
+       poll_wait(file, sk_sleep(sk), wait);
+
        if (sk->sk_state == TCP_CLOSE)
                return EPOLLERR;
        if (!skb_queue_empty(&sk->sk_receive_queue))
@@ -445,7 +448,7 @@ const struct proto_ops phonet_dgram_ops = {
        .socketpair     = sock_no_socketpair,
        .accept         = sock_no_accept,
        .getname        = pn_socket_getname,
-       .poll_mask      = datagram_poll_mask,
+       .poll           = datagram_poll,
        .ioctl          = pn_socket_ioctl,
        .listen         = sock_no_listen,
        .shutdown       = sock_no_shutdown,
@@ -470,7 +473,7 @@ const struct proto_ops phonet_stream_ops = {
        .socketpair     = sock_no_socketpair,
        .accept         = pn_socket_accept,
        .getname        = pn_socket_getname,
-       .poll_mask      = pn_socket_poll_mask,
+       .poll           = pn_socket_poll,
        .ioctl          = pn_socket_ioctl,
        .listen         = pn_socket_listen,
        .shutdown       = sock_no_shutdown,
index 1b5025ea5b0426272145b56fa42e21d908612243..86e1e37eb4e8a68beeecd3bfeeb597951259ea81 100644 (file)
@@ -191,8 +191,13 @@ static int qrtr_node_enqueue(struct qrtr_node *node, struct sk_buff *skb,
        hdr->type = cpu_to_le32(type);
        hdr->src_node_id = cpu_to_le32(from->sq_node);
        hdr->src_port_id = cpu_to_le32(from->sq_port);
-       hdr->dst_node_id = cpu_to_le32(to->sq_node);
-       hdr->dst_port_id = cpu_to_le32(to->sq_port);
+       if (to->sq_port == QRTR_PORT_CTRL) {
+               hdr->dst_node_id = cpu_to_le32(node->nid);
+               hdr->dst_port_id = cpu_to_le32(QRTR_NODE_BCAST);
+       } else {
+               hdr->dst_node_id = cpu_to_le32(to->sq_node);
+               hdr->dst_port_id = cpu_to_le32(to->sq_port);
+       }
 
        hdr->size = cpu_to_le32(len);
        hdr->confirm_rx = 0;
@@ -764,6 +769,10 @@ static int qrtr_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
        node = NULL;
        if (addr->sq_node == QRTR_NODE_BCAST) {
                enqueue_fn = qrtr_bcast_enqueue;
+               if (addr->sq_port != QRTR_PORT_CTRL) {
+                       release_sock(sk);
+                       return -ENOTCONN;
+               }
        } else if (addr->sq_node == ipc->us.sq_node) {
                enqueue_fn = qrtr_local_enqueue;
        } else {
@@ -1023,7 +1032,7 @@ static const struct proto_ops qrtr_proto_ops = {
        .recvmsg        = qrtr_recvmsg,
        .getname        = qrtr_getname,
        .ioctl          = qrtr_ioctl,
-       .poll_mask      = datagram_poll_mask,
+       .poll           = datagram_poll,
        .shutdown       = sock_no_shutdown,
        .setsockopt     = sock_no_setsockopt,
        .getsockopt     = sock_no_getsockopt,
index abef75da89a7450092aefc46ed902e6602fba7a6..cfb05953b0e57afad21fd708f0df42d63c77cd55 100644 (file)
@@ -659,11 +659,19 @@ static void rds_conn_info(struct socket *sock, unsigned int len,
 
 int rds_conn_init(void)
 {
+       int ret;
+
+       ret = rds_loop_net_init(); /* register pernet callback */
+       if (ret)
+               return ret;
+
        rds_conn_slab = kmem_cache_create("rds_connection",
                                          sizeof(struct rds_connection),
                                          0, 0, NULL);
-       if (!rds_conn_slab)
+       if (!rds_conn_slab) {
+               rds_loop_net_exit();
                return -ENOMEM;
+       }
 
        rds_info_register_func(RDS_INFO_CONNECTIONS, rds_conn_info);
        rds_info_register_func(RDS_INFO_SEND_MESSAGES,
@@ -676,6 +684,7 @@ int rds_conn_init(void)
 
 void rds_conn_exit(void)
 {
+       rds_loop_net_exit(); /* unregister pernet callback */
        rds_loop_exit();
 
        WARN_ON(!hlist_empty(rds_conn_hash));
index 48332a6ed7383c51def7402dcfef1e581fa677f7..d152e48ea371a5c6c8a565f3d6745625d165e5b3 100644 (file)
@@ -344,6 +344,11 @@ struct rds_ib_mr *rds_ib_reg_frmr(struct rds_ib_device *rds_ibdev,
        struct rds_ib_frmr *frmr;
        int ret;
 
+       if (!ic) {
+               /* TODO: Add FRWR support for RDS_GET_MR using proxy qp*/
+               return ERR_PTR(-EOPNOTSUPP);
+       }
+
        do {
                if (ibmr)
                        rds_ib_free_frmr(ibmr, true);
index 0ea4ab017a8cc3f807931e1194cddb5048a82956..655f01d427fe5c7899f916987dd88022832c8bdc 100644 (file)
@@ -115,7 +115,8 @@ void rds_ib_get_mr_info(struct rds_ib_device *rds_ibdev,
                        struct rds_info_rdma_connection *iinfo);
 void rds_ib_destroy_mr_pool(struct rds_ib_mr_pool *);
 void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents,
-                   struct rds_sock *rs, u32 *key_ret);
+                   struct rds_sock *rs, u32 *key_ret,
+                   struct rds_connection *conn);
 void rds_ib_sync_mr(void *trans_private, int dir);
 void rds_ib_free_mr(void *trans_private, int invalidate);
 void rds_ib_flush_mrs(void);
index e678699268a253d1e65aea63e64ac30b44ac6dcd..2e49a40a5e113cef44543f112c4911a39501fc7e 100644 (file)
@@ -537,11 +537,12 @@ void rds_ib_flush_mrs(void)
 }
 
 void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents,
-                   struct rds_sock *rs, u32 *key_ret)
+                   struct rds_sock *rs, u32 *key_ret,
+                   struct rds_connection *conn)
 {
        struct rds_ib_device *rds_ibdev;
        struct rds_ib_mr *ibmr = NULL;
-       struct rds_ib_connection *ic = rs->rs_conn->c_transport_data;
+       struct rds_ib_connection *ic = NULL;
        int ret;
 
        rds_ibdev = rds_ib_get_device(rs->rs_bound_addr);
@@ -550,6 +551,9 @@ void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents,
                goto out;
        }
 
+       if (conn)
+               ic = conn->c_transport_data;
+
        if (!rds_ibdev->mr_8k_pool || !rds_ibdev->mr_1m_pool) {
                ret = -ENODEV;
                goto out;
@@ -559,17 +563,18 @@ void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents,
                ibmr = rds_ib_reg_frmr(rds_ibdev, ic, sg, nents, key_ret);
        else
                ibmr = rds_ib_reg_fmr(rds_ibdev, sg, nents, key_ret);
-       if (ibmr)
-               rds_ibdev = NULL;
-
- out:
-       if (!ibmr)
+       if (IS_ERR(ibmr)) {
+               ret = PTR_ERR(ibmr);
                pr_warn("RDS/IB: rds_ib_get_mr failed (errno=%d)\n", ret);
+       } else {
+               return ibmr;
+       }
 
+ out:
        if (rds_ibdev)
                rds_ib_dev_put(rds_ibdev);
 
-       return ibmr;
+       return ERR_PTR(ret);
 }
 
 void rds_ib_destroy_mr_pool(struct rds_ib_mr_pool *pool)
index dac6218a460ed4d4a5b7b03ad4f6056a68784a16..feea1f96ee2ad582dce8f815442da1bbf6e0508a 100644 (file)
@@ -33,6 +33,8 @@
 #include <linux/kernel.h>
 #include <linux/slab.h>
 #include <linux/in.h>
+#include <net/net_namespace.h>
+#include <net/netns/generic.h>
 
 #include "rds_single_path.h"
 #include "rds.h"
 
 static DEFINE_SPINLOCK(loop_conns_lock);
 static LIST_HEAD(loop_conns);
+static atomic_t rds_loop_unloading = ATOMIC_INIT(0);
+
+static void rds_loop_set_unloading(void)
+{
+       atomic_set(&rds_loop_unloading, 1);
+}
+
+static bool rds_loop_is_unloading(struct rds_connection *conn)
+{
+       return atomic_read(&rds_loop_unloading) != 0;
+}
 
 /*
  * This 'loopback' transport is a special case for flows that originate
@@ -165,6 +178,8 @@ void rds_loop_exit(void)
        struct rds_loop_connection *lc, *_lc;
        LIST_HEAD(tmp_list);
 
+       rds_loop_set_unloading();
+       synchronize_rcu();
        /* avoid calling conn_destroy with irqs off */
        spin_lock_irq(&loop_conns_lock);
        list_splice(&loop_conns, &tmp_list);
@@ -177,6 +192,46 @@ void rds_loop_exit(void)
        }
 }
 
+static void rds_loop_kill_conns(struct net *net)
+{
+       struct rds_loop_connection *lc, *_lc;
+       LIST_HEAD(tmp_list);
+
+       spin_lock_irq(&loop_conns_lock);
+       list_for_each_entry_safe(lc, _lc, &loop_conns, loop_node)  {
+               struct net *c_net = read_pnet(&lc->conn->c_net);
+
+               if (net != c_net)
+                       continue;
+               list_move_tail(&lc->loop_node, &tmp_list);
+       }
+       spin_unlock_irq(&loop_conns_lock);
+
+       list_for_each_entry_safe(lc, _lc, &tmp_list, loop_node) {
+               WARN_ON(lc->conn->c_passive);
+               rds_conn_destroy(lc->conn);
+       }
+}
+
+static void __net_exit rds_loop_exit_net(struct net *net)
+{
+       rds_loop_kill_conns(net);
+}
+
+static struct pernet_operations rds_loop_net_ops = {
+       .exit = rds_loop_exit_net,
+};
+
+int rds_loop_net_init(void)
+{
+       return register_pernet_device(&rds_loop_net_ops);
+}
+
+void rds_loop_net_exit(void)
+{
+       unregister_pernet_device(&rds_loop_net_ops);
+}
+
 /*
  * This is missing .xmit_* because loop doesn't go through generic
  * rds_send_xmit() and doesn't call rds_recv_incoming().  .listen_stop and
@@ -194,4 +249,5 @@ struct rds_transport rds_loop_transport = {
        .inc_free               = rds_loop_inc_free,
        .t_name                 = "loopback",
        .t_type                 = RDS_TRANS_LOOP,
+       .t_unloading            = rds_loop_is_unloading,
 };
index 469fa4b2da4f38b5fb62358507cb9d9ca62aa825..bbc8cdd030df3137ea250578cb3d429a86fd68f2 100644 (file)
@@ -5,6 +5,8 @@
 /* loop.c */
 extern struct rds_transport rds_loop_transport;
 
+int rds_loop_net_init(void);
+void rds_loop_net_exit(void);
 void rds_loop_exit(void);
 
 #endif
index 634cfcb7bba6833bde376706947c99f1cb103199..80920e47f2c79eb3ce0f95f6acc70e2126af1441 100644 (file)
@@ -170,7 +170,8 @@ static int rds_pin_pages(unsigned long user_addr, unsigned int nr_pages,
 }
 
 static int __rds_rdma_map(struct rds_sock *rs, struct rds_get_mr_args *args,
-                               u64 *cookie_ret, struct rds_mr **mr_ret)
+                         u64 *cookie_ret, struct rds_mr **mr_ret,
+                         struct rds_conn_path *cp)
 {
        struct rds_mr *mr = NULL, *found;
        unsigned int nr_pages;
@@ -269,7 +270,8 @@ static int __rds_rdma_map(struct rds_sock *rs, struct rds_get_mr_args *args,
         * Note that dma_map() implies that pending writes are
         * flushed to RAM, so no dma_sync is needed here. */
        trans_private = rs->rs_transport->get_mr(sg, nents, rs,
-                                                &mr->r_key);
+                                                &mr->r_key,
+                                                cp ? cp->cp_conn : NULL);
 
        if (IS_ERR(trans_private)) {
                for (i = 0 ; i < nents; i++)
@@ -330,7 +332,7 @@ int rds_get_mr(struct rds_sock *rs, char __user *optval, int optlen)
                           sizeof(struct rds_get_mr_args)))
                return -EFAULT;
 
-       return __rds_rdma_map(rs, &args, NULL, NULL);
+       return __rds_rdma_map(rs, &args, NULL, NULL, NULL);
 }
 
 int rds_get_mr_for_dest(struct rds_sock *rs, char __user *optval, int optlen)
@@ -354,7 +356,7 @@ int rds_get_mr_for_dest(struct rds_sock *rs, char __user *optval, int optlen)
        new_args.cookie_addr = args.cookie_addr;
        new_args.flags = args.flags;
 
-       return __rds_rdma_map(rs, &new_args, NULL, NULL);
+       return __rds_rdma_map(rs, &new_args, NULL, NULL, NULL);
 }
 
 /*
@@ -782,7 +784,8 @@ int rds_cmsg_rdma_map(struct rds_sock *rs, struct rds_message *rm,
            rm->m_rdma_cookie != 0)
                return -EINVAL;
 
-       return __rds_rdma_map(rs, CMSG_DATA(cmsg), &rm->m_rdma_cookie, &rm->rdma.op_rdma_mr);
+       return __rds_rdma_map(rs, CMSG_DATA(cmsg), &rm->m_rdma_cookie,
+                             &rm->rdma.op_rdma_mr, rm->m_conn_path);
 }
 
 /*
index f2272fb8cd456e7b5bb2495d5d02691803c3769f..60b3b787fbdb321ca3a280314c18443890ed78c3 100644 (file)
@@ -464,6 +464,8 @@ struct rds_message {
                        struct scatterlist      *op_sg;
                } data;
        };
+
+       struct rds_conn_path *m_conn_path;
 };
 
 /*
@@ -544,7 +546,8 @@ struct rds_transport {
                                        unsigned int avail);
        void (*exit)(void);
        void *(*get_mr)(struct scatterlist *sg, unsigned long nr_sg,
-                       struct rds_sock *rs, u32 *key_ret);
+                       struct rds_sock *rs, u32 *key_ret,
+                       struct rds_connection *conn);
        void (*sync_mr)(void *trans_private, int direction);
        void (*free_mr)(void *trans_private, int invalidate);
        void (*flush_mrs)(void);
index 94c7f74909be32f36344ab0b82cfad06c16b041f..59f17a2335f44c00b445d8571a19eb393b803c68 100644 (file)
@@ -1169,6 +1169,13 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
                rs->rs_conn = conn;
        }
 
+       if (conn->c_trans->t_mp_capable)
+               cpath = &conn->c_path[rds_send_mprds_hash(rs, conn)];
+       else
+               cpath = &conn->c_path[0];
+
+       rm->m_conn_path = cpath;
+
        /* Parse any control messages the user may have included. */
        ret = rds_cmsg_send(rs, rm, msg, &allocated_mr);
        if (ret) {
@@ -1192,11 +1199,6 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
                goto out;
        }
 
-       if (conn->c_trans->t_mp_capable)
-               cpath = &conn->c_path[rds_send_mprds_hash(rs, conn)];
-       else
-               cpath = &conn->c_path[0];
-
        if (rds_destroy_pending(conn)) {
                ret = -EAGAIN;
                goto out;
index ebe42e7eb45697030367c4baba455b50c973c409..d00a0ef39a56b38cae4114654c44a3bddccb35ba 100644 (file)
@@ -1470,7 +1470,7 @@ static const struct proto_ops rose_proto_ops = {
        .socketpair     =       sock_no_socketpair,
        .accept         =       rose_accept,
        .getname        =       rose_getname,
-       .poll_mask      =       datagram_poll_mask,
+       .poll           =       datagram_poll,
        .ioctl          =       rose_ioctl,
        .listen         =       rose_listen,
        .shutdown       =       sock_no_shutdown,
index 3b1ac93efee22248ab01c3c8a610e874e99356b5..2b463047dd7ba93267feb584e1ffda280449a0b3 100644 (file)
@@ -734,11 +734,15 @@ static int rxrpc_getsockopt(struct socket *sock, int level, int optname,
 /*
  * permit an RxRPC socket to be polled
  */
-static __poll_t rxrpc_poll_mask(struct socket *sock, __poll_t events)
+static __poll_t rxrpc_poll(struct file *file, struct socket *sock,
+                              poll_table *wait)
 {
        struct sock *sk = sock->sk;
        struct rxrpc_sock *rx = rxrpc_sk(sk);
-       __poll_t mask = 0;
+       __poll_t mask;
+
+       sock_poll_wait(file, sk_sleep(sk), wait);
+       mask = 0;
 
        /* the socket is readable if there are any messages waiting on the Rx
         * queue */
@@ -945,7 +949,7 @@ static const struct proto_ops rxrpc_rpc_ops = {
        .socketpair     = sock_no_socketpair,
        .accept         = sock_no_accept,
        .getname        = sock_no_getname,
-       .poll_mask      = rxrpc_poll_mask,
+       .poll           = rxrpc_poll,
        .ioctl          = sock_no_ioctl,
        .listen         = rxrpc_listen,
        .shutdown       = rxrpc_shutdown,
index 5fb7d3254d9e290106dda6566ed393eb6a2c3696..707630ab47133406f76cdd8bf76fe792bf2fa41f 100644 (file)
@@ -104,9 +104,9 @@ struct rxrpc_net {
 
 #define RXRPC_KEEPALIVE_TIME 20 /* NAT keepalive time in seconds */
        u8                      peer_keepalive_cursor;
-       ktime_t                 peer_keepalive_base;
-       struct hlist_head       peer_keepalive[RXRPC_KEEPALIVE_TIME + 1];
-       struct hlist_head       peer_keepalive_new;
+       time64_t                peer_keepalive_base;
+       struct list_head        peer_keepalive[32];
+       struct list_head        peer_keepalive_new;
        struct timer_list       peer_keepalive_timer;
        struct work_struct      peer_keepalive_work;
 };
@@ -295,7 +295,7 @@ struct rxrpc_peer {
        struct hlist_head       error_targets;  /* targets for net error distribution */
        struct work_struct      error_distributor;
        struct rb_root          service_conns;  /* Service connections */
-       struct hlist_node       keepalive_link; /* Link in net->peer_keepalive[] */
+       struct list_head        keepalive_link; /* Link in net->peer_keepalive[] */
        time64_t                last_tx_at;     /* Last time packet sent here */
        seqlock_t               service_conn_lock;
        spinlock_t              lock;           /* access lock */
index a9a9be5519b9abfbc6d627279b4e64a35e4b05bd..9d1e298b784c8b595626ec0b8f5af0f14e7e03a4 100644 (file)
@@ -116,9 +116,9 @@ static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx,
                while (*pp) {
                        parent = *pp;
                        xcall = rb_entry(parent, struct rxrpc_call, sock_node);
-                       if (user_call_ID < call->user_call_ID)
+                       if (user_call_ID < xcall->user_call_ID)
                                pp = &(*pp)->rb_left;
-                       else if (user_call_ID > call->user_call_ID)
+                       else if (user_call_ID > xcall->user_call_ID)
                                pp = &(*pp)->rb_right;
                        else
                                goto id_in_use;
index f6734d8cb01a711317854a17c981b2506625846b..9486293fef5c6f98c96397fc90eb14eecf332196 100644 (file)
@@ -415,7 +415,7 @@ void rxrpc_incoming_call(struct rxrpc_sock *rx,
 bool rxrpc_queue_call(struct rxrpc_call *call)
 {
        const void *here = __builtin_return_address(0);
-       int n = __atomic_add_unless(&call->usage, 1, 0);
+       int n = atomic_fetch_add_unless(&call->usage, 1, 0);
        if (n == 0)
                return false;
        if (rxrpc_queue_work(&call->processor))
index 8229a52c2acd79f69883e27e966fa4df43ca93c3..3fde001fcc392d9db372c4fa60a80ddf60e55d9a 100644 (file)
@@ -136,7 +136,7 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
        }
 
        ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, ioc, len);
-       conn->params.peer->last_tx_at = ktime_get_real();
+       conn->params.peer->last_tx_at = ktime_get_seconds();
        if (ret < 0)
                trace_rxrpc_tx_fail(conn->debug_id, serial, ret,
                                    rxrpc_tx_fail_call_final_resend);
@@ -245,7 +245,7 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn,
                return -EAGAIN;
        }
 
-       conn->params.peer->last_tx_at = ktime_get_real();
+       conn->params.peer->last_tx_at = ktime_get_seconds();
 
        _leave(" = 0");
        return 0;
index 4c77a78a252af9a19d4801d3904847475a4d5341..77440a356b14ae60e875fcd94a2613227fd899cf 100644 (file)
@@ -266,7 +266,7 @@ void rxrpc_kill_connection(struct rxrpc_connection *conn)
 bool rxrpc_queue_conn(struct rxrpc_connection *conn)
 {
        const void *here = __builtin_return_address(0);
-       int n = __atomic_add_unless(&conn->usage, 1, 0);
+       int n = atomic_fetch_add_unless(&conn->usage, 1, 0);
        if (n == 0)
                return false;
        if (rxrpc_queue_work(&conn->processor))
@@ -309,7 +309,7 @@ rxrpc_get_connection_maybe(struct rxrpc_connection *conn)
        const void *here = __builtin_return_address(0);
 
        if (conn) {
-               int n = __atomic_add_unless(&conn->usage, 1, 0);
+               int n = atomic_fetch_add_unless(&conn->usage, 1, 0);
                if (n > 0)
                        trace_rxrpc_conn(conn, rxrpc_conn_got, n + 1, here);
                else
index b493e6b6274043e07b15c5a0481f0e92a4478ea4..777c3ed4cfc03d3923e052d95597926a1893a163 100644 (file)
@@ -305,7 +305,7 @@ struct rxrpc_local *rxrpc_get_local_maybe(struct rxrpc_local *local)
        const void *here = __builtin_return_address(0);
 
        if (local) {
-               int n = __atomic_add_unless(&local->usage, 1, 0);
+               int n = atomic_fetch_add_unless(&local->usage, 1, 0);
                if (n > 0)
                        trace_rxrpc_local(local, rxrpc_local_got, n + 1, here);
                else
index 5d6a773db973180c646e61ddea9598dca6ff453a..417d80867c4f4a013465b3f9c22be04e49082b2f 100644 (file)
@@ -85,12 +85,12 @@ static __net_init int rxrpc_init_net(struct net *net)
        hash_init(rxnet->peer_hash);
        spin_lock_init(&rxnet->peer_hash_lock);
        for (i = 0; i < ARRAY_SIZE(rxnet->peer_keepalive); i++)
-               INIT_HLIST_HEAD(&rxnet->peer_keepalive[i]);
-       INIT_HLIST_HEAD(&rxnet->peer_keepalive_new);
+               INIT_LIST_HEAD(&rxnet->peer_keepalive[i]);
+       INIT_LIST_HEAD(&rxnet->peer_keepalive_new);
        timer_setup(&rxnet->peer_keepalive_timer,
                    rxrpc_peer_keepalive_timeout, 0);
        INIT_WORK(&rxnet->peer_keepalive_work, rxrpc_peer_keepalive_worker);
-       rxnet->peer_keepalive_base = ktime_add(ktime_get_real(), NSEC_PER_SEC);
+       rxnet->peer_keepalive_base = ktime_get_seconds();
 
        ret = -ENOMEM;
        rxnet->proc_net = proc_net_mkdir(net, "rxrpc", net->proc_net);
index f03de1c59ba37678f36f3a5c0778f3f3f9274757..4774c8f5634d95c647d1fbdc938dece00bc38ea2 100644 (file)
@@ -209,7 +209,7 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping,
        now = ktime_get_real();
        if (ping)
                call->ping_time = now;
-       conn->params.peer->last_tx_at = ktime_get_real();
+       conn->params.peer->last_tx_at = ktime_get_seconds();
        if (ret < 0)
                trace_rxrpc_tx_fail(call->debug_id, serial, ret,
                                    rxrpc_tx_fail_call_ack);
@@ -296,7 +296,7 @@ int rxrpc_send_abort_packet(struct rxrpc_call *call)
 
        ret = kernel_sendmsg(conn->params.local->socket,
                             &msg, iov, 1, sizeof(pkt));
-       conn->params.peer->last_tx_at = ktime_get_real();
+       conn->params.peer->last_tx_at = ktime_get_seconds();
        if (ret < 0)
                trace_rxrpc_tx_fail(call->debug_id, serial, ret,
                                    rxrpc_tx_fail_call_abort);
@@ -391,7 +391,7 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb,
         *     message and update the peer record
         */
        ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, 2, len);
-       conn->params.peer->last_tx_at = ktime_get_real();
+       conn->params.peer->last_tx_at = ktime_get_seconds();
 
        up_read(&conn->params.local->defrag_sem);
        if (ret < 0)
@@ -457,7 +457,7 @@ send_fragmentable:
                if (ret == 0) {
                        ret = kernel_sendmsg(conn->params.local->socket, &msg,
                                             iov, 2, len);
-                       conn->params.peer->last_tx_at = ktime_get_real();
+                       conn->params.peer->last_tx_at = ktime_get_seconds();
 
                        opt = IP_PMTUDISC_DO;
                        kernel_setsockopt(conn->params.local->socket, SOL_IP,
@@ -475,7 +475,7 @@ send_fragmentable:
                if (ret == 0) {
                        ret = kernel_sendmsg(conn->params.local->socket, &msg,
                                             iov, 2, len);
-                       conn->params.peer->last_tx_at = ktime_get_real();
+                       conn->params.peer->last_tx_at = ktime_get_seconds();
 
                        opt = IPV6_PMTUDISC_DO;
                        kernel_setsockopt(conn->params.local->socket,
@@ -599,6 +599,6 @@ void rxrpc_send_keepalive(struct rxrpc_peer *peer)
                trace_rxrpc_tx_fail(peer->debug_id, 0, ret,
                                    rxrpc_tx_fail_version_keepalive);
 
-       peer->last_tx_at = ktime_get_real();
+       peer->last_tx_at = ktime_get_seconds();
        _leave("");
 }
index 0ed8b651cec293e121e40cf05282bddc8c3f1171..4f9da2f51c694c3f93d3883476057377664b80e7 100644 (file)
@@ -350,97 +350,117 @@ void rxrpc_peer_add_rtt(struct rxrpc_call *call, enum rxrpc_rtt_rx_trace why,
 }
 
 /*
- * Perform keep-alive pings with VERSION packets to keep any NAT alive.
+ * Perform keep-alive pings.
  */
-void rxrpc_peer_keepalive_worker(struct work_struct *work)
+static void rxrpc_peer_keepalive_dispatch(struct rxrpc_net *rxnet,
+                                         struct list_head *collector,
+                                         time64_t base,
+                                         u8 cursor)
 {
-       struct rxrpc_net *rxnet =
-               container_of(work, struct rxrpc_net, peer_keepalive_work);
        struct rxrpc_peer *peer;
-       unsigned long delay;
-       ktime_t base, now = ktime_get_real();
-       s64 diff;
-       u8 cursor, slot;
+       const u8 mask = ARRAY_SIZE(rxnet->peer_keepalive) - 1;
+       time64_t keepalive_at;
+       int slot;
 
-       base = rxnet->peer_keepalive_base;
-       cursor = rxnet->peer_keepalive_cursor;
+       spin_lock_bh(&rxnet->peer_hash_lock);
 
-       _enter("%u,%lld", cursor, ktime_sub(now, base));
+       while (!list_empty(collector)) {
+               peer = list_entry(collector->next,
+                                 struct rxrpc_peer, keepalive_link);
 
-next_bucket:
-       diff = ktime_to_ns(ktime_sub(now, base));
-       if (diff < 0)
-               goto resched;
+               list_del_init(&peer->keepalive_link);
+               if (!rxrpc_get_peer_maybe(peer))
+                       continue;
 
-       _debug("at %u", cursor);
-       spin_lock_bh(&rxnet->peer_hash_lock);
-next_peer:
-       if (!rxnet->live) {
                spin_unlock_bh(&rxnet->peer_hash_lock);
-               goto out;
-       }
 
-       /* Everything in the bucket at the cursor is processed this second; the
-        * bucket at cursor + 1 goes now + 1s and so on...
-        */
-       if (hlist_empty(&rxnet->peer_keepalive[cursor])) {
-               if (hlist_empty(&rxnet->peer_keepalive_new)) {
-                       spin_unlock_bh(&rxnet->peer_hash_lock);
-                       goto emptied_bucket;
+               keepalive_at = peer->last_tx_at + RXRPC_KEEPALIVE_TIME;
+               slot = keepalive_at - base;
+               _debug("%02x peer %u t=%d {%pISp}",
+                      cursor, peer->debug_id, slot, &peer->srx.transport);
+
+               if (keepalive_at <= base ||
+                   keepalive_at > base + RXRPC_KEEPALIVE_TIME) {
+                       rxrpc_send_keepalive(peer);
+                       slot = RXRPC_KEEPALIVE_TIME;
                }
 
-               hlist_move_list(&rxnet->peer_keepalive_new,
-                               &rxnet->peer_keepalive[cursor]);
+               /* A transmission to this peer occurred since last we examined
+                * it so put it into the appropriate future bucket.
+                */
+               slot += cursor;
+               slot &= mask;
+               spin_lock_bh(&rxnet->peer_hash_lock);
+               list_add_tail(&peer->keepalive_link,
+                             &rxnet->peer_keepalive[slot & mask]);
+               rxrpc_put_peer(peer);
        }
 
-       peer = hlist_entry(rxnet->peer_keepalive[cursor].first,
-                          struct rxrpc_peer, keepalive_link);
-       hlist_del_init(&peer->keepalive_link);
-       if (!rxrpc_get_peer_maybe(peer))
-               goto next_peer;
-
        spin_unlock_bh(&rxnet->peer_hash_lock);
+}
 
-       _debug("peer %u {%pISp}", peer->debug_id, &peer->srx.transport);
+/*
+ * Perform keep-alive pings with VERSION packets to keep any NAT alive.
+ */
+void rxrpc_peer_keepalive_worker(struct work_struct *work)
+{
+       struct rxrpc_net *rxnet =
+               container_of(work, struct rxrpc_net, peer_keepalive_work);
+       const u8 mask = ARRAY_SIZE(rxnet->peer_keepalive) - 1;
+       time64_t base, now, delay;
+       u8 cursor, stop;
+       LIST_HEAD(collector);
 
-recalc:
-       diff = ktime_divns(ktime_sub(peer->last_tx_at, base), NSEC_PER_SEC);
-       if (diff < -30 || diff > 30)
-               goto send; /* LSW of 64-bit time probably wrapped on 32-bit */
-       diff += RXRPC_KEEPALIVE_TIME - 1;
-       if (diff < 0)
-               goto send;
+       now = ktime_get_seconds();
+       base = rxnet->peer_keepalive_base;
+       cursor = rxnet->peer_keepalive_cursor;
+       _enter("%lld,%u", base - now, cursor);
 
-       slot = (diff > RXRPC_KEEPALIVE_TIME - 1) ? RXRPC_KEEPALIVE_TIME - 1 : diff;
-       if (slot == 0)
-               goto send;
+       if (!rxnet->live)
+               return;
 
-       /* A transmission to this peer occurred since last we examined it so
-        * put it into the appropriate future bucket.
+       /* Remove to a temporary list all the peers that are currently lodged
+        * in expired buckets plus all new peers.
+        *
+        * Everything in the bucket at the cursor is processed this
+        * second; the bucket at cursor + 1 goes at now + 1s and so
+        * on...
         */
-       slot = (slot + cursor) % ARRAY_SIZE(rxnet->peer_keepalive);
        spin_lock_bh(&rxnet->peer_hash_lock);
-       hlist_add_head(&peer->keepalive_link, &rxnet->peer_keepalive[slot]);
-       rxrpc_put_peer(peer);
-       goto next_peer;
-
-send:
-       rxrpc_send_keepalive(peer);
-       now = ktime_get_real();
-       goto recalc;
+       list_splice_init(&rxnet->peer_keepalive_new, &collector);
+
+       stop = cursor + ARRAY_SIZE(rxnet->peer_keepalive);
+       while (base <= now && (s8)(cursor - stop) < 0) {
+               list_splice_tail_init(&rxnet->peer_keepalive[cursor & mask],
+                                     &collector);
+               base++;
+               cursor++;
+       }
 
-emptied_bucket:
-       cursor++;
-       if (cursor >= ARRAY_SIZE(rxnet->peer_keepalive))
-               cursor = 0;
-       base = ktime_add_ns(base, NSEC_PER_SEC);
-       goto next_bucket;
+       base = now;
+       spin_unlock_bh(&rxnet->peer_hash_lock);
 
-resched:
        rxnet->peer_keepalive_base = base;
        rxnet->peer_keepalive_cursor = cursor;
-       delay = nsecs_to_jiffies(-diff) + 1;
-       timer_reduce(&rxnet->peer_keepalive_timer, jiffies + delay);
-out:
+       rxrpc_peer_keepalive_dispatch(rxnet, &collector, base, cursor);
+       ASSERT(list_empty(&collector));
+
+       /* Schedule the timer for the next occupied timeslot. */
+       cursor = rxnet->peer_keepalive_cursor;
+       stop = cursor + RXRPC_KEEPALIVE_TIME - 1;
+       for (; (s8)(cursor - stop) < 0; cursor++) {
+               if (!list_empty(&rxnet->peer_keepalive[cursor & mask]))
+                       break;
+               base++;
+       }
+
+       now = ktime_get_seconds();
+       delay = base - now;
+       if (delay < 1)
+               delay = 1;
+       delay *= HZ;
+       if (rxnet->live)
+               timer_reduce(&rxnet->peer_keepalive_timer, jiffies + delay);
+
        _leave("");
 }
index 1b7e8107b3ae8a144bc72d01f6e1277368b3cbad..1dc7648e3eff34f25ceea7b0edbd74b5f8cd02b3 100644 (file)
@@ -322,7 +322,7 @@ struct rxrpc_peer *rxrpc_lookup_incoming_peer(struct rxrpc_local *local,
        if (!peer) {
                peer = prealloc;
                hash_add_rcu(rxnet->peer_hash, &peer->hash_link, hash_key);
-               hlist_add_head(&peer->keepalive_link, &rxnet->peer_keepalive_new);
+               list_add_tail(&peer->keepalive_link, &rxnet->peer_keepalive_new);
        }
 
        spin_unlock(&rxnet->peer_hash_lock);
@@ -367,8 +367,8 @@ struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_local *local,
                if (!peer) {
                        hash_add_rcu(rxnet->peer_hash,
                                     &candidate->hash_link, hash_key);
-                       hlist_add_head(&candidate->keepalive_link,
-                                      &rxnet->peer_keepalive_new);
+                       list_add_tail(&candidate->keepalive_link,
+                                     &rxnet->peer_keepalive_new);
                }
 
                spin_unlock_bh(&rxnet->peer_hash_lock);
@@ -406,7 +406,7 @@ struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *peer)
        const void *here = __builtin_return_address(0);
 
        if (peer) {
-               int n = __atomic_add_unless(&peer->usage, 1, 0);
+               int n = atomic_fetch_add_unless(&peer->usage, 1, 0);
                if (n > 0)
                        trace_rxrpc_peer(peer, rxrpc_peer_got, n + 1, here);
                else
@@ -441,7 +441,7 @@ static void __rxrpc_put_peer(struct rxrpc_peer *peer)
 
        spin_lock_bh(&rxnet->peer_hash_lock);
        hash_del_rcu(&peer->hash_link);
-       hlist_del_init(&peer->keepalive_link);
+       list_del_init(&peer->keepalive_link);
        spin_unlock_bh(&rxnet->peer_hash_lock);
 
        kfree_rcu(peer, rcu);
index 278ac0807a60a8664bbb825ccd06737a595d8631..47cb019c521a8ca01ead3d47e25a0474ae6282b1 100644 (file)
@@ -669,7 +669,7 @@ static int rxkad_issue_challenge(struct rxrpc_connection *conn)
                return -EAGAIN;
        }
 
-       conn->params.peer->last_tx_at = ktime_get_real();
+       conn->params.peer->last_tx_at = ktime_get_seconds();
        _leave(" = 0");
        return 0;
 }
@@ -725,7 +725,7 @@ static int rxkad_send_response(struct rxrpc_connection *conn,
                return -EAGAIN;
        }
 
-       conn->params.peer->last_tx_at = ktime_get_real();
+       conn->params.peer->last_tx_at = ktime_get_seconds();
        _leave(" = 0");
        return 0;
 }
index 526a8e491626efb65fcda10d875e6f55ca2168e8..6e7124e57918e98433f0d3302565ae4e0b9eaaf4 100644 (file)
@@ -91,7 +91,7 @@ static int tcf_csum_init(struct net *net, struct nlattr *nla,
        }
        params_old = rtnl_dereference(p->params);
 
-       params_new->action = parm->action;
+       p->tcf_action = parm->action;
        params_new->update_flags = parm->update_flags;
        rcu_assign_pointer(p->params, params_new);
        if (params_old)
@@ -561,7 +561,7 @@ static int tcf_csum(struct sk_buff *skb, const struct tc_action *a,
        tcf_lastuse_update(&p->tcf_tm);
        bstats_cpu_update(this_cpu_ptr(p->common.cpu_bstats), skb);
 
-       action = params->action;
+       action = READ_ONCE(p->tcf_action);
        if (unlikely(action == TC_ACT_SHOT))
                goto drop_stats;
 
@@ -599,11 +599,11 @@ static int tcf_csum_dump(struct sk_buff *skb, struct tc_action *a, int bind,
                .index   = p->tcf_index,
                .refcnt  = p->tcf_refcnt - ref,
                .bindcnt = p->tcf_bindcnt - bind,
+               .action  = p->tcf_action,
        };
        struct tcf_t t;
 
        params = rtnl_dereference(p->params);
-       opt.action = params->action;
        opt.update_flags = params->update_flags;
 
        if (nla_put(skb, TCA_CSUM_PARMS, sizeof(opt), &opt))
index 8527cfdc446d9bb82e8fa9fe1364dc13249b1e03..20d7d36b2fc9b9d3af256f48795da6e387f7f781 100644 (file)
@@ -415,7 +415,8 @@ static void tcf_ife_cleanup(struct tc_action *a)
        spin_unlock_bh(&ife->tcf_lock);
 
        p = rcu_dereference_protected(ife->params, 1);
-       kfree_rcu(p, rcu);
+       if (p)
+               kfree_rcu(p, rcu);
 }
 
 /* under ife->tcf_lock for existing action */
@@ -516,8 +517,6 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
                        saddr = nla_data(tb[TCA_IFE_SMAC]);
        }
 
-       ife->tcf_action = parm->action;
-
        if (parm->flags & IFE_ENCODE) {
                if (daddr)
                        ether_addr_copy(p->eth_dst, daddr);
@@ -543,10 +542,8 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
                                       NULL, NULL);
                if (err) {
 metadata_parse_err:
-                       if (exists)
-                               tcf_idr_release(*a, bind);
                        if (ret == ACT_P_CREATED)
-                               _tcf_ife_cleanup(*a);
+                               tcf_idr_release(*a, bind);
 
                        if (exists)
                                spin_unlock_bh(&ife->tcf_lock);
@@ -567,7 +564,7 @@ metadata_parse_err:
                err = use_all_metadata(ife);
                if (err) {
                        if (ret == ACT_P_CREATED)
-                               _tcf_ife_cleanup(*a);
+                               tcf_idr_release(*a, bind);
 
                        if (exists)
                                spin_unlock_bh(&ife->tcf_lock);
@@ -576,6 +573,7 @@ metadata_parse_err:
                }
        }
 
+       ife->tcf_action = parm->action;
        if (exists)
                spin_unlock_bh(&ife->tcf_lock);
 
index 626dac81a48a6b2ab97e9d0c786b08989f693288..9bc6c2ae98a56ceb2a4719be91a1937b5441a58d 100644 (file)
@@ -36,7 +36,7 @@ static int tunnel_key_act(struct sk_buff *skb, const struct tc_action *a,
 
        tcf_lastuse_update(&t->tcf_tm);
        bstats_cpu_update(this_cpu_ptr(t->common.cpu_bstats), skb);
-       action = params->action;
+       action = READ_ONCE(t->tcf_action);
 
        switch (params->tcft_action) {
        case TCA_TUNNEL_KEY_ACT_RELEASE:
@@ -182,7 +182,7 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
 
        params_old = rtnl_dereference(t->params);
 
-       params_new->action = parm->action;
+       t->tcf_action = parm->action;
        params_new->tcft_action = parm->t_action;
        params_new->tcft_enc_metadata = metadata;
 
@@ -254,13 +254,13 @@ static int tunnel_key_dump(struct sk_buff *skb, struct tc_action *a,
                .index    = t->tcf_index,
                .refcnt   = t->tcf_refcnt - ref,
                .bindcnt  = t->tcf_bindcnt - bind,
+               .action   = t->tcf_action,
        };
        struct tcf_t tm;
 
        params = rtnl_dereference(t->params);
 
        opt.t_action = params->tcft_action;
-       opt.action = params->action;
 
        if (nla_put(skb, TCA_TUNNEL_KEY_PARMS, sizeof(opt), &opt))
                goto nla_put_failure;
index cdc3c87c53e62d4db4bb18fa5f59d7889b9866cb..f74513a7c7a8ed179bfbeabb17fe60dd2f9b6eb2 100644 (file)
@@ -1053,7 +1053,7 @@ static void tfilter_notify_chain(struct net *net, struct sk_buff *oskb,
        for (tp = rtnl_dereference(chain->filter_chain);
             tp; tp = rtnl_dereference(tp->next))
                tfilter_notify(net, oskb, n, tp, block,
-                              q, parent, 0, event, false);
+                              q, parent, NULL, event, false);
 }
 
 static int tc_new_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
@@ -1444,7 +1444,7 @@ static bool tcf_chain_dump(struct tcf_chain *chain, struct Qdisc *q, u32 parent,
                        memset(&cb->args[1], 0,
                               sizeof(cb->args) - sizeof(cb->args[0]));
                if (cb->args[1] == 0) {
-                       if (tcf_fill_node(net, skb, tp, block, q, parent, 0,
+                       if (tcf_fill_node(net, skb, tp, block, q, parent, NULL,
                                          NETLINK_CB(cb->skb).portid,
                                          cb->nlh->nlmsg_seq, NLM_F_MULTI,
                                          RTM_NEWTFILTER) <= 0)
index 2b5be42a9f1ca8e63952158ed2b9339e1a308d0b..9e8b26a80fb3ea9e57b6b22d259eaefe171eca09 100644 (file)
@@ -66,7 +66,7 @@ struct fl_flow_mask {
        struct rhashtable_params filter_ht_params;
        struct flow_dissector dissector;
        struct list_head filters;
-       struct rcu_head rcu;
+       struct rcu_work rwork;
        struct list_head list;
 };
 
@@ -203,6 +203,20 @@ static int fl_init(struct tcf_proto *tp)
        return rhashtable_init(&head->ht, &mask_ht_params);
 }
 
+static void fl_mask_free(struct fl_flow_mask *mask)
+{
+       rhashtable_destroy(&mask->ht);
+       kfree(mask);
+}
+
+static void fl_mask_free_work(struct work_struct *work)
+{
+       struct fl_flow_mask *mask = container_of(to_rcu_work(work),
+                                                struct fl_flow_mask, rwork);
+
+       fl_mask_free(mask);
+}
+
 static bool fl_mask_put(struct cls_fl_head *head, struct fl_flow_mask *mask,
                        bool async)
 {
@@ -210,12 +224,11 @@ static bool fl_mask_put(struct cls_fl_head *head, struct fl_flow_mask *mask,
                return false;
 
        rhashtable_remove_fast(&head->ht, &mask->ht_node, mask_ht_params);
-       rhashtable_destroy(&mask->ht);
        list_del_rcu(&mask->list);
        if (async)
-               kfree_rcu(mask, rcu);
+               tcf_queue_work(&mask->rwork, fl_mask_free_work);
        else
-               kfree(mask);
+               fl_mask_free(mask);
 
        return true;
 }
index c98a61e980baa68931f7e974582eb1c43ed60cf5..9c4c2bb547d7ea1da26e956a77b23592d467365b 100644 (file)
@@ -21,7 +21,7 @@ static int blackhole_enqueue(struct sk_buff *skb, struct Qdisc *sch,
                             struct sk_buff **to_free)
 {
        qdisc_drop(skb, sch, to_free);
-       return NET_XMIT_SUCCESS;
+       return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
 }
 
 static struct sk_buff *blackhole_dequeue(struct Qdisc *sch)
index cd2e0e342fb6235840860ff15ceaeb73eddaa492..6c0a9d5dbf9441d00a832915e23d6b82bd8ab313 100644 (file)
@@ -479,24 +479,28 @@ static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt,
        q->cparams.mtu = psched_mtu(qdisc_dev(sch));
 
        if (opt) {
-               int err = fq_codel_change(sch, opt, extack);
+               err = fq_codel_change(sch, opt, extack);
                if (err)
-                       return err;
+                       goto init_failure;
        }
 
        err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
        if (err)
-               return err;
+               goto init_failure;
 
        if (!q->flows) {
                q->flows = kvcalloc(q->flows_cnt,
                                    sizeof(struct fq_codel_flow),
                                    GFP_KERNEL);
-               if (!q->flows)
-                       return -ENOMEM;
+               if (!q->flows) {
+                       err = -ENOMEM;
+                       goto init_failure;
+               }
                q->backlogs = kvcalloc(q->flows_cnt, sizeof(u32), GFP_KERNEL);
-               if (!q->backlogs)
-                       return -ENOMEM;
+               if (!q->backlogs) {
+                       err = -ENOMEM;
+                       goto alloc_failure;
+               }
                for (i = 0; i < q->flows_cnt; i++) {
                        struct fq_codel_flow *flow = q->flows + i;
 
@@ -509,6 +513,13 @@ static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt,
        else
                sch->flags &= ~TCQ_F_CAN_BYPASS;
        return 0;
+
+alloc_failure:
+       kvfree(q->flows);
+       q->flows = NULL;
+init_failure:
+       q->flows_cnt = 0;
+       return err;
 }
 
 static int fq_codel_dump(struct Qdisc *sch, struct sk_buff *skb)
index 3ae9877ea2057d0ba517c84d38f6ba6a79ff6ef8..3278a76f6861576ba7e42cf9f91a62f96443cb3a 100644 (file)
@@ -1385,8 +1385,8 @@ hfsc_schedule_watchdog(struct Qdisc *sch)
                if (next_time == 0 || next_time > q->root.cl_cfmin)
                        next_time = q->root.cl_cfmin;
        }
-       WARN_ON(next_time == 0);
-       qdisc_watchdog_schedule(&q->watchdog, next_time);
+       if (next_time)
+               qdisc_watchdog_schedule(&q->watchdog, next_time);
 }
 
 static int
index 79daa98208c391c780440144d69bc7be875c3476..bfb9f812e2ef9fa605b08dc1f534781573c3abf8 100644 (file)
@@ -237,7 +237,9 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc,
        /* Account for a different sized first fragment */
        if (msg_len >= first_len) {
                msg->can_delay = 0;
-               SCTP_INC_STATS(sock_net(asoc->base.sk), SCTP_MIB_FRAGUSRMSGS);
+               if (msg_len > first_len)
+                       SCTP_INC_STATS(sock_net(asoc->base.sk),
+                                      SCTP_MIB_FRAGUSRMSGS);
        } else {
                /* Which may be the only one... */
                first_len = msg_len;
index 7339918a805d93db8a94fed627f99962e07e3267..0cd2e764f47ff0874438301324de25e4bf33dd95 100644 (file)
@@ -1010,7 +1010,7 @@ static const struct proto_ops inet6_seqpacket_ops = {
        .socketpair        = sock_no_socketpair,
        .accept            = inet_accept,
        .getname           = sctp_getname,
-       .poll_mask         = sctp_poll_mask,
+       .poll              = sctp_poll,
        .ioctl             = inet6_ioctl,
        .listen            = sctp_inet_listen,
        .shutdown          = inet_shutdown,
index 5dffbc4930086699cefa10f704de5fd2068169c8..67f73d3a1356b93d3896b6985a65e70615902b18 100644 (file)
@@ -1016,7 +1016,7 @@ static const struct proto_ops inet_seqpacket_ops = {
        .socketpair        = sock_no_socketpair,
        .accept            = inet_accept,
        .getname           = inet_getname,      /* Semantics are different.  */
-       .poll_mask         = sctp_poll_mask,
+       .poll              = sctp_poll,
        .ioctl             = inet_ioctl,
        .listen            = sctp_inet_listen,
        .shutdown          = inet_shutdown,     /* Looks harmless.  */
index d20f7addee19ecb794fa85f9ed73e8b40784a095..ce620e878538be99e1f79784582d0da48ba292ea 100644 (file)
@@ -7717,12 +7717,14 @@ out:
  * here, again, by modeling the current TCP/UDP code.  We don't have
  * a good way to test with it yet.
  */
-__poll_t sctp_poll_mask(struct socket *sock, __poll_t events)
+__poll_t sctp_poll(struct file *file, struct socket *sock, poll_table *wait)
 {
        struct sock *sk = sock->sk;
        struct sctp_sock *sp = sctp_sk(sk);
        __poll_t mask;
 
+       poll_wait(file, sk_sleep(sk), wait);
+
        sock_rps_record_flow(sk);
 
        /* A TCP-style listening socket becomes readable when the accept queue
index 445b7ef61677cfdb1172486e432b9bd6a0f853d5..12cac85da994356ef24cf264e1fb8451f2e303dc 100644 (file)
@@ -282,7 +282,7 @@ bool sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu)
 
        if (dst) {
                /* Re-fetch, as under layers may have a higher minimum size */
-               pmtu = SCTP_TRUNC4(dst_mtu(dst));
+               pmtu = sctp_dst_mtu(dst);
                change = t->pathmtu != pmtu;
        }
        t->pathmtu = pmtu;
index da7f02edcd374c44437e34a2705f410317ea536d..e7de5f282722d5dd41b77affc6bea0a490e3969d 100644 (file)
@@ -45,6 +45,7 @@ static DEFINE_MUTEX(smc_create_lgr_pending);  /* serialize link group
                                                 */
 
 static void smc_tcp_listen_work(struct work_struct *);
+static void smc_connect_work(struct work_struct *);
 
 static void smc_set_keepalive(struct sock *sk, int val)
 {
@@ -122,6 +123,12 @@ static int smc_release(struct socket *sock)
                goto out;
 
        smc = smc_sk(sk);
+
+       /* cleanup for a dangling non-blocking connect */
+       flush_work(&smc->connect_work);
+       kfree(smc->connect_info);
+       smc->connect_info = NULL;
+
        if (sk->sk_state == SMC_LISTEN)
                /* smc_close_non_accepted() is called and acquires
                 * sock lock for child sockets again
@@ -140,7 +147,8 @@ static int smc_release(struct socket *sock)
                smc->clcsock = NULL;
        }
        if (smc->use_fallback) {
-               sock_put(sk); /* passive closing */
+               if (sk->sk_state != SMC_LISTEN && sk->sk_state != SMC_INIT)
+                       sock_put(sk); /* passive closing */
                sk->sk_state = SMC_CLOSED;
                sk->sk_state_change(sk);
        }
@@ -186,6 +194,7 @@ static struct sock *smc_sock_alloc(struct net *net, struct socket *sock,
        sk->sk_protocol = protocol;
        smc = smc_sk(sk);
        INIT_WORK(&smc->tcp_listen_work, smc_tcp_listen_work);
+       INIT_WORK(&smc->connect_work, smc_connect_work);
        INIT_DELAYED_WORK(&smc->conn.tx_work, smc_tx_work);
        INIT_LIST_HEAD(&smc->accept_q);
        spin_lock_init(&smc->accept_q_lock);
@@ -409,12 +418,18 @@ static int smc_connect_decline_fallback(struct smc_sock *smc, int reason_code)
 {
        int rc;
 
-       if (reason_code < 0) /* error, fallback is not possible */
+       if (reason_code < 0) { /* error, fallback is not possible */
+               if (smc->sk.sk_state == SMC_INIT)
+                       sock_put(&smc->sk); /* passive closing */
                return reason_code;
+       }
        if (reason_code != SMC_CLC_DECL_REPLY) {
                rc = smc_clc_send_decline(smc, reason_code);
-               if (rc < 0)
+               if (rc < 0) {
+                       if (smc->sk.sk_state == SMC_INIT)
+                               sock_put(&smc->sk); /* passive closing */
                        return rc;
+               }
        }
        return smc_connect_fallback(smc);
 }
@@ -427,8 +442,6 @@ static int smc_connect_abort(struct smc_sock *smc, int reason_code,
                smc_lgr_forget(smc->conn.lgr);
        mutex_unlock(&smc_create_lgr_pending);
        smc_conn_free(&smc->conn);
-       if (reason_code < 0 && smc->sk.sk_state == SMC_INIT)
-               sock_put(&smc->sk); /* passive closing */
        return reason_code;
 }
 
@@ -576,6 +589,35 @@ static int __smc_connect(struct smc_sock *smc)
        return 0;
 }
 
+static void smc_connect_work(struct work_struct *work)
+{
+       struct smc_sock *smc = container_of(work, struct smc_sock,
+                                           connect_work);
+       int rc;
+
+       lock_sock(&smc->sk);
+       rc = kernel_connect(smc->clcsock, &smc->connect_info->addr,
+                           smc->connect_info->alen, smc->connect_info->flags);
+       if (smc->clcsock->sk->sk_err) {
+               smc->sk.sk_err = smc->clcsock->sk->sk_err;
+               goto out;
+       }
+       if (rc < 0) {
+               smc->sk.sk_err = -rc;
+               goto out;
+       }
+
+       rc = __smc_connect(smc);
+       if (rc < 0)
+               smc->sk.sk_err = -rc;
+
+out:
+       smc->sk.sk_state_change(&smc->sk);
+       kfree(smc->connect_info);
+       smc->connect_info = NULL;
+       release_sock(&smc->sk);
+}
+
 static int smc_connect(struct socket *sock, struct sockaddr *addr,
                       int alen, int flags)
 {
@@ -605,15 +647,32 @@ static int smc_connect(struct socket *sock, struct sockaddr *addr,
 
        smc_copy_sock_settings_to_clc(smc);
        tcp_sk(smc->clcsock->sk)->syn_smc = 1;
-       rc = kernel_connect(smc->clcsock, addr, alen, flags);
-       if (rc)
-               goto out;
+       if (flags & O_NONBLOCK) {
+               if (smc->connect_info) {
+                       rc = -EALREADY;
+                       goto out;
+               }
+               smc->connect_info = kzalloc(alen + 2 * sizeof(int), GFP_KERNEL);
+               if (!smc->connect_info) {
+                       rc = -ENOMEM;
+                       goto out;
+               }
+               smc->connect_info->alen = alen;
+               smc->connect_info->flags = flags ^ O_NONBLOCK;
+               memcpy(&smc->connect_info->addr, addr, alen);
+               schedule_work(&smc->connect_work);
+               rc = -EINPROGRESS;
+       } else {
+               rc = kernel_connect(smc->clcsock, addr, alen, flags);
+               if (rc)
+                       goto out;
 
-       rc = __smc_connect(smc);
-       if (rc < 0)
-               goto out;
-       else
-               rc = 0; /* success cases including fallback */
+               rc = __smc_connect(smc);
+               if (rc < 0)
+                       goto out;
+               else
+                       rc = 0; /* success cases including fallback */
+       }
 
 out:
        release_sock(sk);
@@ -1063,6 +1122,8 @@ static void smc_tcp_listen_work(struct work_struct *work)
                sock_hold(lsk); /* sock_put in smc_listen_work */
                INIT_WORK(&new_smc->smc_listen_work, smc_listen_work);
                smc_copy_sock_settings_to_smc(new_smc);
+               new_smc->sk.sk_sndbuf = lsmc->sk.sk_sndbuf;
+               new_smc->sk.sk_rcvbuf = lsmc->sk.sk_rcvbuf;
                sock_hold(&new_smc->sk); /* sock_put in passive closing */
                if (!schedule_work(&new_smc->smc_listen_work))
                        sock_put(&new_smc->sk);
@@ -1273,40 +1334,26 @@ static __poll_t smc_accept_poll(struct sock *parent)
        return mask;
 }
 
-static __poll_t smc_poll_mask(struct socket *sock, __poll_t events)
+static __poll_t smc_poll(struct file *file, struct socket *sock,
+                            poll_table *wait)
 {
        struct sock *sk = sock->sk;
        __poll_t mask = 0;
        struct smc_sock *smc;
-       int rc;
 
        if (!sk)
                return EPOLLNVAL;
 
        smc = smc_sk(sock->sk);
-       sock_hold(sk);
-       lock_sock(sk);
        if ((sk->sk_state == SMC_INIT) || smc->use_fallback) {
                /* delegate to CLC child sock */
-               release_sock(sk);
-               mask = smc->clcsock->ops->poll_mask(smc->clcsock, events);
-               lock_sock(sk);
+               mask = smc->clcsock->ops->poll(file, smc->clcsock, wait);
                sk->sk_err = smc->clcsock->sk->sk_err;
-               if (sk->sk_err) {
+               if (sk->sk_err)
                        mask |= EPOLLERR;
-               } else {
-                       /* if non-blocking connect finished ... */
-                       if (sk->sk_state == SMC_INIT &&
-                           mask & EPOLLOUT &&
-                           smc->clcsock->sk->sk_state != TCP_CLOSE) {
-                               rc = __smc_connect(smc);
-                               if (rc < 0)
-                                       mask |= EPOLLERR;
-                               /* success cases including fallback */
-                               mask |= EPOLLOUT | EPOLLWRNORM;
-                       }
-               }
        } else {
+               if (sk->sk_state != SMC_CLOSED)
+                       sock_poll_wait(file, sk_sleep(sk), wait);
                if (sk->sk_err)
                        mask |= EPOLLERR;
                if ((sk->sk_shutdown == SHUTDOWN_MASK) ||
@@ -1332,10 +1379,7 @@ static __poll_t smc_poll_mask(struct socket *sock, __poll_t events)
                }
                if (smc->conn.urg_state == SMC_URG_VALID)
                        mask |= EPOLLPRI;
-
        }
-       release_sock(sk);
-       sock_put(sk);
 
        return mask;
 }
@@ -1355,8 +1399,7 @@ static int smc_shutdown(struct socket *sock, int how)
        lock_sock(sk);
 
        rc = -ENOTCONN;
-       if ((sk->sk_state != SMC_LISTEN) &&
-           (sk->sk_state != SMC_ACTIVE) &&
+       if ((sk->sk_state != SMC_ACTIVE) &&
            (sk->sk_state != SMC_PEERCLOSEWAIT1) &&
            (sk->sk_state != SMC_PEERCLOSEWAIT2) &&
            (sk->sk_state != SMC_APPCLOSEWAIT1) &&
@@ -1415,7 +1458,8 @@ static int smc_setsockopt(struct socket *sock, int level, int optname,
 
        if (optlen < sizeof(int))
                return -EINVAL;
-       get_user(val, (int __user *)optval);
+       if (get_user(val, (int __user *)optval))
+               return -EFAULT;
 
        lock_sock(sk);
        switch (optname) {
@@ -1478,15 +1522,22 @@ static int smc_ioctl(struct socket *sock, unsigned int cmd,
 
        smc = smc_sk(sock->sk);
        conn = &smc->conn;
+       lock_sock(&smc->sk);
        if (smc->use_fallback) {
-               if (!smc->clcsock)
+               if (!smc->clcsock) {
+                       release_sock(&smc->sk);
                        return -EBADF;
-               return smc->clcsock->ops->ioctl(smc->clcsock, cmd, arg);
+               }
+               answ = smc->clcsock->ops->ioctl(smc->clcsock, cmd, arg);
+               release_sock(&smc->sk);
+               return answ;
        }
        switch (cmd) {
        case SIOCINQ: /* same as FIONREAD */
-               if (smc->sk.sk_state == SMC_LISTEN)
+               if (smc->sk.sk_state == SMC_LISTEN) {
+                       release_sock(&smc->sk);
                        return -EINVAL;
+               }
                if (smc->sk.sk_state == SMC_INIT ||
                    smc->sk.sk_state == SMC_CLOSED)
                        answ = 0;
@@ -1495,8 +1546,10 @@ static int smc_ioctl(struct socket *sock, unsigned int cmd,
                break;
        case SIOCOUTQ:
                /* output queue size (not send + not acked) */
-               if (smc->sk.sk_state == SMC_LISTEN)
+               if (smc->sk.sk_state == SMC_LISTEN) {
+                       release_sock(&smc->sk);
                        return -EINVAL;
+               }
                if (smc->sk.sk_state == SMC_INIT ||
                    smc->sk.sk_state == SMC_CLOSED)
                        answ = 0;
@@ -1506,8 +1559,10 @@ static int smc_ioctl(struct socket *sock, unsigned int cmd,
                break;
        case SIOCOUTQNSD:
                /* output queue size (not send only) */
-               if (smc->sk.sk_state == SMC_LISTEN)
+               if (smc->sk.sk_state == SMC_LISTEN) {
+                       release_sock(&smc->sk);
                        return -EINVAL;
+               }
                if (smc->sk.sk_state == SMC_INIT ||
                    smc->sk.sk_state == SMC_CLOSED)
                        answ = 0;
@@ -1515,8 +1570,10 @@ static int smc_ioctl(struct socket *sock, unsigned int cmd,
                        answ = smc_tx_prepared_sends(&smc->conn);
                break;
        case SIOCATMARK:
-               if (smc->sk.sk_state == SMC_LISTEN)
+               if (smc->sk.sk_state == SMC_LISTEN) {
+                       release_sock(&smc->sk);
                        return -EINVAL;
+               }
                if (smc->sk.sk_state == SMC_INIT ||
                    smc->sk.sk_state == SMC_CLOSED) {
                        answ = 0;
@@ -1532,8 +1589,10 @@ static int smc_ioctl(struct socket *sock, unsigned int cmd,
                }
                break;
        default:
+               release_sock(&smc->sk);
                return -ENOIOCTLCMD;
        }
+       release_sock(&smc->sk);
 
        return put_user(answ, (int __user *)arg);
 }
@@ -1619,7 +1678,7 @@ static const struct proto_ops smc_sock_ops = {
        .socketpair     = sock_no_socketpair,
        .accept         = smc_accept,
        .getname        = smc_getname,
-       .poll_mask      = smc_poll_mask,
+       .poll           = smc_poll,
        .ioctl          = smc_ioctl,
        .listen         = smc_listen,
        .shutdown       = smc_shutdown,
index 51ae1f10d81aa9390e76e392096e3f93c15b65fe..d7ca265704821a1862f84f209550c4b19fc0db59 100644 (file)
@@ -187,11 +187,19 @@ struct smc_connection {
        struct work_struct      close_work;     /* peer sent some closing */
 };
 
+struct smc_connect_info {
+       int                     flags;
+       int                     alen;
+       struct sockaddr         addr;
+};
+
 struct smc_sock {                              /* smc sock container */
        struct sock             sk;
        struct socket           *clcsock;       /* internal tcp socket */
        struct smc_connection   conn;           /* smc connection */
        struct smc_sock         *listen_smc;    /* listen parent */
+       struct smc_connect_info *connect_info;  /* connect address & flags */
+       struct work_struct      connect_work;   /* handle non-blocking connect*/
        struct work_struct      tcp_listen_work;/* handle tcp socket accepts */
        struct work_struct      smc_listen_work;/* prepare new accept socket */
        struct list_head        accept_q;       /* sockets to be accepted */
index a7e8d63fc8aebe61c094c232d4b6f31439eed3e2..9bde1e4ca288cbf9db802fa3fd15650df4788867 100644 (file)
@@ -233,7 +233,8 @@ static void smc_cdc_msg_recv_action(struct smc_sock *smc,
                        /* force immediate tx of current consumer cursor, but
                         * under send_lock to guarantee arrival in seqno-order
                         */
-                       smc_tx_sndbuf_nonempty(conn);
+                       if (smc->sk.sk_state != SMC_INIT)
+                               smc_tx_sndbuf_nonempty(conn);
                }
        }
 
index 717449b1da0b73d924488d43cd04ed0871607d1b..ae5d168653cecf804b20e49f27bb39bcf0385081 100644 (file)
@@ -250,6 +250,7 @@ out:
 int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen,
                     u8 expected_type)
 {
+       long rcvtimeo = smc->clcsock->sk->sk_rcvtimeo;
        struct sock *clc_sk = smc->clcsock->sk;
        struct smc_clc_msg_hdr *clcm = buf;
        struct msghdr msg = {NULL, 0};
@@ -306,7 +307,6 @@ int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen,
        memset(&msg, 0, sizeof(struct msghdr));
        iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC, &vec, 1, datlen);
        krflags = MSG_WAITALL;
-       smc->clcsock->sk->sk_rcvtimeo = CLC_WAIT_TIME;
        len = sock_recvmsg(smc->clcsock, &msg, krflags);
        if (len < datlen || !smc_clc_msg_hdr_valid(clcm)) {
                smc->sk.sk_err = EPROTO;
@@ -322,6 +322,7 @@ int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen,
        }
 
 out:
+       smc->clcsock->sk->sk_rcvtimeo = rcvtimeo;
        return reason_code;
 }
 
index fa41d988174146f6888d29db743b074d7b1ee1db..ac961dfb1ea1b775b666be3fdc0f292545703533 100644 (file)
@@ -107,6 +107,8 @@ static void smc_close_active_abort(struct smc_sock *smc)
        }
        switch (sk->sk_state) {
        case SMC_INIT:
+               sk->sk_state = SMC_PEERABORTWAIT;
+               break;
        case SMC_ACTIVE:
                sk->sk_state = SMC_PEERABORTWAIT;
                release_sock(sk);
index cee66640075242fc7fe863734ebf301d261e02d6..f82886b7d1d8394adada4998159a708c3c897a82 100644 (file)
@@ -495,7 +495,8 @@ out:
 
 void smc_tx_consumer_update(struct smc_connection *conn, bool force)
 {
-       union smc_host_cursor cfed, cons;
+       union smc_host_cursor cfed, cons, prod;
+       int sender_free = conn->rmb_desc->len;
        int to_confirm;
 
        smc_curs_write(&cons,
@@ -505,11 +506,18 @@ void smc_tx_consumer_update(struct smc_connection *conn, bool force)
                       smc_curs_read(&conn->rx_curs_confirmed, conn),
                       conn);
        to_confirm = smc_curs_diff(conn->rmb_desc->len, &cfed, &cons);
+       if (to_confirm > conn->rmbe_update_limit) {
+               smc_curs_write(&prod,
+                              smc_curs_read(&conn->local_rx_ctrl.prod, conn),
+                              conn);
+               sender_free = conn->rmb_desc->len -
+                             smc_curs_diff(conn->rmb_desc->len, &prod, &cfed);
+       }
 
        if (conn->local_rx_ctrl.prod_flags.cons_curs_upd_req ||
            force ||
            ((to_confirm > conn->rmbe_update_limit) &&
-            ((to_confirm > (conn->rmb_desc->len / 2)) ||
+            ((sender_free <= (conn->rmb_desc->len / 2)) ||
              conn->local_rx_ctrl.prod_flags.write_blocked))) {
                if ((smc_cdc_get_slot_and_msg_send(conn) < 0) &&
                    conn->alert_token_local) { /* connection healthy */
index 8a109012608a6132a65293c86cd175426b851cbe..792f0313ea9171971ffdea3bcae7893b18fad9ae 100644 (file)
@@ -89,6 +89,7 @@
 #include <linux/magic.h>
 #include <linux/slab.h>
 #include <linux/xattr.h>
+#include <linux/nospec.h>
 
 #include <linux/uaccess.h>
 #include <asm/unistd.h>
@@ -117,10 +118,8 @@ static ssize_t sock_write_iter(struct kiocb *iocb, struct iov_iter *from);
 static int sock_mmap(struct file *file, struct vm_area_struct *vma);
 
 static int sock_close(struct inode *inode, struct file *file);
-static struct wait_queue_head *sock_get_poll_head(struct file *file,
-               __poll_t events);
-static __poll_t sock_poll_mask(struct file *file, __poll_t);
-static __poll_t sock_poll(struct file *file, struct poll_table_struct *wait);
+static __poll_t sock_poll(struct file *file,
+                             struct poll_table_struct *wait);
 static long sock_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
 #ifdef CONFIG_COMPAT
 static long compat_sock_ioctl(struct file *file,
@@ -143,8 +142,6 @@ static const struct file_operations socket_file_ops = {
        .llseek =       no_llseek,
        .read_iter =    sock_read_iter,
        .write_iter =   sock_write_iter,
-       .get_poll_head = sock_get_poll_head,
-       .poll_mask =    sock_poll_mask,
        .poll =         sock_poll,
        .unlocked_ioctl = sock_ioctl,
 #ifdef CONFIG_COMPAT
@@ -391,39 +388,20 @@ static struct file_system_type sock_fs_type = {
 
 struct file *sock_alloc_file(struct socket *sock, int flags, const char *dname)
 {
-       struct qstr name = { .name = "" };
-       struct path path;
        struct file *file;
 
-       if (dname) {
-               name.name = dname;
-               name.len = strlen(name.name);
-       } else if (sock->sk) {
-               name.name = sock->sk->sk_prot_creator->name;
-               name.len = strlen(name.name);
-       }
-       path.dentry = d_alloc_pseudo(sock_mnt->mnt_sb, &name);
-       if (unlikely(!path.dentry)) {
-               sock_release(sock);
-               return ERR_PTR(-ENOMEM);
-       }
-       path.mnt = mntget(sock_mnt);
+       if (!dname)
+               dname = sock->sk ? sock->sk->sk_prot_creator->name : "";
 
-       d_instantiate(path.dentry, SOCK_INODE(sock));
-
-       file = alloc_file(&path, FMODE_READ | FMODE_WRITE,
-                 &socket_file_ops);
+       file = alloc_file_pseudo(SOCK_INODE(sock), sock_mnt, dname,
+                               O_RDWR | (flags & O_NONBLOCK),
+                               &socket_file_ops);
        if (IS_ERR(file)) {
-               /* drop dentry, keep inode for a bit */
-               ihold(d_inode(path.dentry));
-               path_put(&path);
-               /* ... and now kill it properly */
                sock_release(sock);
                return file;
        }
 
        sock->file = file;
-       file->f_flags = O_RDWR | (flags & O_NONBLOCK);
        file->private_data = sock;
        return file;
 }
@@ -1130,48 +1108,16 @@ out_release:
 }
 EXPORT_SYMBOL(sock_create_lite);
 
-static struct wait_queue_head *sock_get_poll_head(struct file *file,
-               __poll_t events)
-{
-       struct socket *sock = file->private_data;
-
-       if (!sock->ops->poll_mask)
-               return NULL;
-       sock_poll_busy_loop(sock, events);
-       return sk_sleep(sock->sk);
-}
-
-static __poll_t sock_poll_mask(struct file *file, __poll_t events)
-{
-       struct socket *sock = file->private_data;
-
-       /*
-        * We need to be sure we are in sync with the socket flags modification.
-        *
-        * This memory barrier is paired in the wq_has_sleeper.
-        */
-       smp_mb();
-
-       /* this socket can poll_ll so tell the system call */
-       return sock->ops->poll_mask(sock, events) |
-               (sk_can_busy_loop(sock->sk) ? POLL_BUSY_LOOP : 0);
-}
-
 /* No kernel lock held - perfect */
 static __poll_t sock_poll(struct file *file, poll_table *wait)
 {
        struct socket *sock = file->private_data;
-       __poll_t events = poll_requested_events(wait), mask = 0;
-
-       if (sock->ops->poll) {
-               sock_poll_busy_loop(sock, events);
-               mask = sock->ops->poll(file, sock, wait);
-       } else if (sock->ops->poll_mask) {
-               sock_poll_wait(file, sock_get_poll_head(file, events), wait);
-               mask = sock->ops->poll_mask(sock, events);
-       }
+       __poll_t events = poll_requested_events(wait);
 
-       return mask | sock_poll_busy_flag(sock);
+       sock_poll_busy_loop(sock, events);
+       if (!sock->ops->poll)
+               return 0;
+       return sock->ops->poll(file, sock, wait) | sock_poll_busy_flag(sock);
 }
 
 static int sock_mmap(struct file *file, struct vm_area_struct *vma)
@@ -2558,6 +2504,7 @@ SYSCALL_DEFINE2(socketcall, int, call, unsigned long __user *, args)
 
        if (call < 1 || call > SYS_SENDMMSG)
                return -EINVAL;
+       call = array_index_nospec(call, SYS_SENDMMSG + 1);
 
        len = nargs[call];
        if (len > sizeof(a))
@@ -2724,7 +2671,8 @@ EXPORT_SYMBOL(sock_unregister);
 
 bool sock_is_registered(int family)
 {
-       return family < NPROTO && rcu_access_pointer(net_families[family]);
+       return family < NPROTO &&
+               rcu_access_pointer(net_families[array_index_nospec(family, NPROTO)]);
 }
 
 static int __init sock_init(void)
index 1a96951835999091c81ba451700f0a74565d9c59..625acb27efcc272ccdc0f60d4d693d6761ed139b 100644 (file)
@@ -35,7 +35,6 @@ struct _strp_msg {
         */
        struct strp_msg strp;
        int accum_len;
-       int early_eaten;
 };
 
 static inline struct _strp_msg *_strp_msg(struct sk_buff *skb)
@@ -115,20 +114,6 @@ static int __strp_recv(read_descriptor_t *desc, struct sk_buff *orig_skb,
        head = strp->skb_head;
        if (head) {
                /* Message already in progress */
-
-               stm = _strp_msg(head);
-               if (unlikely(stm->early_eaten)) {
-                       /* Already some number of bytes on the receive sock
-                        * data saved in skb_head, just indicate they
-                        * are consumed.
-                        */
-                       eaten = orig_len <= stm->early_eaten ?
-                               orig_len : stm->early_eaten;
-                       stm->early_eaten -= eaten;
-
-                       return eaten;
-               }
-
                if (unlikely(orig_offset)) {
                        /* Getting data with a non-zero offset when a message is
                         * in progress is not expected. If it does happen, we
@@ -297,9 +282,9 @@ static int __strp_recv(read_descriptor_t *desc, struct sk_buff *orig_skb,
                                }
 
                                stm->accum_len += cand_len;
+                               eaten += cand_len;
                                strp->need_bytes = stm->strp.full_len -
                                                       stm->accum_len;
-                               stm->early_eaten = cand_len;
                                STRP_STATS_ADD(strp->stats.bytes, cand_len);
                                desc->count = 0; /* Stop reading socket */
                                break;
@@ -392,7 +377,7 @@ static int strp_read_sock(struct strparser *strp)
 /* Lower sock lock held */
 void strp_data_ready(struct strparser *strp)
 {
-       if (unlikely(strp->stopped))
+       if (unlikely(strp->stopped) || strp->paused)
                return;
 
        /* This check is needed to synchronize with do_strp_work.
@@ -407,9 +392,6 @@ void strp_data_ready(struct strparser *strp)
                return;
        }
 
-       if (strp->paused)
-               return;
-
        if (strp->need_bytes) {
                if (strp_peek_len(strp) < strp->need_bytes)
                        return;
index 3c85af058227d14bda8d9f598ec45e7b8db1785e..3fabf9f6a0f9d92eaccbc33a9600ca2d1370aa18 100644 (file)
@@ -987,8 +987,6 @@ bool xprt_prepare_transmit(struct rpc_task *task)
                task->tk_status = -EAGAIN;
                goto out_unlock;
        }
-       if (!bc_prealloc(req) && !req->rq_xmit_bytes_sent)
-               req->rq_xid = xprt_alloc_xid(xprt);
        ret = true;
 out_unlock:
        spin_unlock_bh(&xprt->transport_lock);
@@ -1298,7 +1296,12 @@ void xprt_retry_reserve(struct rpc_task *task)
 
 static inline __be32 xprt_alloc_xid(struct rpc_xprt *xprt)
 {
-       return (__force __be32)xprt->xid++;
+       __be32 xid;
+
+       spin_lock(&xprt->reserve_lock);
+       xid = (__force __be32)xprt->xid++;
+       spin_unlock(&xprt->reserve_lock);
+       return xid;
 }
 
 static inline void xprt_init_xid(struct rpc_xprt *xprt)
@@ -1316,6 +1319,7 @@ void xprt_request_init(struct rpc_task *task)
        req->rq_task    = task;
        req->rq_xprt    = xprt;
        req->rq_buffer  = NULL;
+       req->rq_xid     = xprt_alloc_xid(xprt);
        req->rq_connect_cookie = xprt->connect_cookie - 1;
        req->rq_bytes_sent = 0;
        req->rq_snd_buf.len = 0;
index 9f666e0650e23c0d4275ae219c23c5e301df5ac4..2830709957bddeb13adf0f352abb9aaacba3ec55 100644 (file)
@@ -133,6 +133,8 @@ static void disc_dupl_alert(struct tipc_bearer *b, u32 node_addr,
 }
 
 /* tipc_disc_addr_trial(): - handle an address uniqueness trial from peer
+ * Returns true if message should be dropped by caller, i.e., if it is a
+ * trial message or we are inside trial period. Otherwise false.
  */
 static bool tipc_disc_addr_trial_msg(struct tipc_discoverer *d,
                                     struct tipc_media_addr *maddr,
@@ -168,8 +170,9 @@ static bool tipc_disc_addr_trial_msg(struct tipc_discoverer *d,
                msg_set_type(buf_msg(d->skb), DSC_REQ_MSG);
        }
 
+       /* Accept regular link requests/responses only after trial period */
        if (mtyp != DSC_TRIAL_MSG)
-               return false;
+               return trial;
 
        sugg_addr = tipc_node_try_addr(net, peer_id, src);
        if (sugg_addr)
@@ -284,7 +287,6 @@ static void tipc_disc_timeout(struct timer_list *t)
 {
        struct tipc_discoverer *d = from_timer(d, t, timer);
        struct tipc_net *tn = tipc_net(d->net);
-       u32 self = tipc_own_addr(d->net);
        struct tipc_media_addr maddr;
        struct sk_buff *skb = NULL;
        struct net *net = d->net;
@@ -298,12 +300,14 @@ static void tipc_disc_timeout(struct timer_list *t)
                goto exit;
        }
 
-       /* Did we just leave the address trial period ? */
-       if (!self && !time_before(jiffies, tn->addr_trial_end)) {
-               self = tn->trial_addr;
-               tipc_net_finalize(net, self);
-               msg_set_prevnode(buf_msg(d->skb), self);
+       /* Trial period over ? */
+       if (!time_before(jiffies, tn->addr_trial_end)) {
+               /* Did we just leave it ? */
+               if (!tipc_own_addr(net))
+                       tipc_net_finalize(net, tn->trial_addr);
+
                msg_set_type(buf_msg(d->skb), DSC_REQ_MSG);
+               msg_set_prevnode(buf_msg(d->skb), tipc_own_addr(net));
        }
 
        /* Adjust timeout interval according to discovery phase */
index 4fbaa0464405370601cb2fd1dd3b03733836d342..62199cf5a56c04db99af54dad9fc2564df8d6b05 100644 (file)
@@ -121,12 +121,15 @@ int tipc_net_init(struct net *net, u8 *node_id, u32 addr)
 
 void tipc_net_finalize(struct net *net, u32 addr)
 {
-       tipc_set_node_addr(net, addr);
-       smp_mb();
-       tipc_named_reinit(net);
-       tipc_sk_reinit(net);
-       tipc_nametbl_publish(net, TIPC_CFG_SRV, addr, addr,
-                            TIPC_CLUSTER_SCOPE, 0, addr);
+       struct tipc_net *tn = tipc_net(net);
+
+       if (!cmpxchg(&tn->node_addr, 0, addr)) {
+               tipc_set_node_addr(net, addr);
+               tipc_named_reinit(net);
+               tipc_sk_reinit(net);
+               tipc_nametbl_publish(net, TIPC_CFG_SRV, addr, addr,
+                                    TIPC_CLUSTER_SCOPE, 0, addr);
+       }
 }
 
 void tipc_net_stop(struct net *net)
index 6a44eb812baf4a2fe31eeb55b04023f9f402666b..0453bd451ce80c1935bb6588facc0f2c23ae8644 100644 (file)
@@ -797,6 +797,7 @@ static u32 tipc_node_suggest_addr(struct net *net, u32 addr)
 }
 
 /* tipc_node_try_addr(): Check if addr can be used by peer, suggest other if not
+ * Returns suggested address if any, otherwise 0
  */
 u32 tipc_node_try_addr(struct net *net, u8 *id, u32 addr)
 {
@@ -819,12 +820,14 @@ u32 tipc_node_try_addr(struct net *net, u8 *id, u32 addr)
        if (n) {
                addr = n->addr;
                tipc_node_put(n);
+               return addr;
        }
-       /* Even this node may be in trial phase */
+
+       /* Even this node may be in conflict */
        if (tn->trial_addr == addr)
                return tipc_node_suggest_addr(net, addr);
 
-       return addr;
+       return 0;
 }
 
 void tipc_node_check_dest(struct net *net, u32 addr,
index 14a5d055717d2a7b95ea353b15f53dfb81a39515..930852c54d7a6e97207c61a7c942e487781457e7 100644 (file)
@@ -692,9 +692,10 @@ static int tipc_getname(struct socket *sock, struct sockaddr *uaddr,
 }
 
 /**
- * tipc_poll - read pollmask
+ * tipc_poll - read and possibly block on pollmask
  * @file: file structure associated with the socket
  * @sock: socket for which to calculate the poll bits
+ * @wait: ???
  *
  * Returns pollmask value
  *
@@ -708,12 +709,15 @@ static int tipc_getname(struct socket *sock, struct sockaddr *uaddr,
  * imply that the operation will succeed, merely that it should be performed
  * and will not block.
  */
-static __poll_t tipc_poll_mask(struct socket *sock, __poll_t events)
+static __poll_t tipc_poll(struct file *file, struct socket *sock,
+                             poll_table *wait)
 {
        struct sock *sk = sock->sk;
        struct tipc_sock *tsk = tipc_sk(sk);
        __poll_t revents = 0;
 
+       sock_poll_wait(file, sk_sleep(sk), wait);
+
        if (sk->sk_shutdown & RCV_SHUTDOWN)
                revents |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
        if (sk->sk_shutdown == SHUTDOWN_MASK)
@@ -3033,7 +3037,7 @@ static const struct proto_ops msg_ops = {
        .socketpair     = tipc_socketpair,
        .accept         = sock_no_accept,
        .getname        = tipc_getname,
-       .poll_mask      = tipc_poll_mask,
+       .poll           = tipc_poll,
        .ioctl          = tipc_ioctl,
        .listen         = sock_no_listen,
        .shutdown       = tipc_shutdown,
@@ -3054,7 +3058,7 @@ static const struct proto_ops packet_ops = {
        .socketpair     = tipc_socketpair,
        .accept         = tipc_accept,
        .getname        = tipc_getname,
-       .poll_mask      = tipc_poll_mask,
+       .poll           = tipc_poll,
        .ioctl          = tipc_ioctl,
        .listen         = tipc_listen,
        .shutdown       = tipc_shutdown,
@@ -3075,7 +3079,7 @@ static const struct proto_ops stream_ops = {
        .socketpair     = tipc_socketpair,
        .accept         = tipc_accept,
        .getname        = tipc_getname,
-       .poll_mask      = tipc_poll_mask,
+       .poll           = tipc_poll,
        .ioctl          = tipc_ioctl,
        .listen         = tipc_listen,
        .shutdown       = tipc_shutdown,
index a127d61e8af984d3aaefde49c94f48a9a9187d53..301f224304698950544088c16518ea2e14ff41a6 100644 (file)
@@ -712,7 +712,7 @@ static int __init tls_register(void)
        build_protos(tls_prots[TLSV4], &tcp_prot);
 
        tls_sw_proto_ops = inet_stream_ops;
-       tls_sw_proto_ops.poll_mask = tls_sw_poll_mask;
+       tls_sw_proto_ops.poll = tls_sw_poll;
        tls_sw_proto_ops.splice_read = tls_sw_splice_read;
 
 #ifdef CONFIG_TLS_DEVICE
index f127fac88acfe0046b0a7dd55bab4d6d486de105..1f3d9789af30fb88cf9e7550b40dcda1e897e262 100644 (file)
@@ -440,7 +440,7 @@ alloc_encrypted:
                        ret = tls_push_record(sk, msg->msg_flags, record_type);
                        if (!ret)
                                continue;
-                       if (ret == -EAGAIN)
+                       if (ret < 0)
                                goto send_end;
 
                        copied -= try_to_copy;
@@ -646,6 +646,9 @@ static struct sk_buff *tls_wait_data(struct sock *sk, int flags,
                        return NULL;
                }
 
+               if (sk->sk_shutdown & RCV_SHUTDOWN)
+                       return NULL;
+
                if (sock_flag(sk, SOCK_DONE))
                        return NULL;
 
@@ -701,6 +704,10 @@ static int decrypt_skb(struct sock *sk, struct sk_buff *skb,
        nsg = skb_to_sgvec(skb, &sgin[1],
                           rxm->offset + tls_ctx->rx.prepend_size,
                           rxm->full_len - tls_ctx->rx.prepend_size);
+       if (nsg < 0) {
+               ret = nsg;
+               goto out;
+       }
 
        tls_make_aad(ctx->rx_aad_ciphertext,
                     rxm->full_len - tls_ctx->rx.overhead_size,
@@ -712,6 +719,7 @@ static int decrypt_skb(struct sock *sk, struct sk_buff *skb,
                                rxm->full_len - tls_ctx->rx.overhead_size,
                                skb, sk->sk_allocation);
 
+out:
        if (sgin != &sgin_arr[0])
                kfree(sgin);
 
@@ -919,22 +927,23 @@ splice_read_end:
        return copied ? : err;
 }
 
-__poll_t tls_sw_poll_mask(struct socket *sock, __poll_t events)
+unsigned int tls_sw_poll(struct file *file, struct socket *sock,
+                        struct poll_table_struct *wait)
 {
+       unsigned int ret;
        struct sock *sk = sock->sk;
        struct tls_context *tls_ctx = tls_get_ctx(sk);
        struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
-       __poll_t mask;
 
-       /* Grab EPOLLOUT and EPOLLHUP from the underlying socket */
-       mask = ctx->sk_poll_mask(sock, events);
+       /* Grab POLLOUT and POLLHUP from the underlying socket */
+       ret = ctx->sk_poll(file, sock, wait);
 
-       /* Clear EPOLLIN bits, and set based on recv_pkt */
-       mask &= ~(EPOLLIN | EPOLLRDNORM);
+       /* Clear POLLIN bits, and set based on recv_pkt */
+       ret &= ~(POLLIN | POLLRDNORM);
        if (ctx->recv_pkt)
-               mask |= EPOLLIN | EPOLLRDNORM;
+               ret |= POLLIN | POLLRDNORM;
 
-       return mask;
+       return ret;
 }
 
 static int tls_read_size(struct strparser *strp, struct sk_buff *skb)
@@ -1191,7 +1200,7 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
                sk->sk_data_ready = tls_data_ready;
                write_unlock_bh(&sk->sk_callback_lock);
 
-               sw_ctx_rx->sk_poll_mask = sk->sk_socket->ops->poll_mask;
+               sw_ctx_rx->sk_poll = sk->sk_socket->ops->poll;
 
                strp_check_rcv(&sw_ctx_rx->strp);
        }
index 95b02a71fd47161735c51988463e5f5e4a7d44b3..e5473c03d667ad51308c3e8b705f3b1187f619e8 100644 (file)
@@ -638,8 +638,9 @@ static int unix_stream_connect(struct socket *, struct sockaddr *,
 static int unix_socketpair(struct socket *, struct socket *);
 static int unix_accept(struct socket *, struct socket *, int, bool);
 static int unix_getname(struct socket *, struct sockaddr *, int);
-static __poll_t unix_poll_mask(struct socket *, __poll_t);
-static __poll_t unix_dgram_poll_mask(struct socket *, __poll_t);
+static __poll_t unix_poll(struct file *, struct socket *, poll_table *);
+static __poll_t unix_dgram_poll(struct file *, struct socket *,
+                                   poll_table *);
 static int unix_ioctl(struct socket *, unsigned int, unsigned long);
 static int unix_shutdown(struct socket *, int);
 static int unix_stream_sendmsg(struct socket *, struct msghdr *, size_t);
@@ -680,7 +681,7 @@ static const struct proto_ops unix_stream_ops = {
        .socketpair =   unix_socketpair,
        .accept =       unix_accept,
        .getname =      unix_getname,
-       .poll_mask =    unix_poll_mask,
+       .poll =         unix_poll,
        .ioctl =        unix_ioctl,
        .listen =       unix_listen,
        .shutdown =     unix_shutdown,
@@ -703,7 +704,7 @@ static const struct proto_ops unix_dgram_ops = {
        .socketpair =   unix_socketpair,
        .accept =       sock_no_accept,
        .getname =      unix_getname,
-       .poll_mask =    unix_dgram_poll_mask,
+       .poll =         unix_dgram_poll,
        .ioctl =        unix_ioctl,
        .listen =       sock_no_listen,
        .shutdown =     unix_shutdown,
@@ -725,7 +726,7 @@ static const struct proto_ops unix_seqpacket_ops = {
        .socketpair =   unix_socketpair,
        .accept =       unix_accept,
        .getname =      unix_getname,
-       .poll_mask =    unix_dgram_poll_mask,
+       .poll =         unix_dgram_poll,
        .ioctl =        unix_ioctl,
        .listen =       unix_listen,
        .shutdown =     unix_shutdown,
@@ -2629,10 +2630,13 @@ static int unix_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
        return err;
 }
 
-static __poll_t unix_poll_mask(struct socket *sock, __poll_t events)
+static __poll_t unix_poll(struct file *file, struct socket *sock, poll_table *wait)
 {
        struct sock *sk = sock->sk;
-       __poll_t mask = 0;
+       __poll_t mask;
+
+       sock_poll_wait(file, sk_sleep(sk), wait);
+       mask = 0;
 
        /* exceptional events? */
        if (sk->sk_err)
@@ -2661,11 +2665,15 @@ static __poll_t unix_poll_mask(struct socket *sock, __poll_t events)
        return mask;
 }
 
-static __poll_t unix_dgram_poll_mask(struct socket *sock, __poll_t events)
+static __poll_t unix_dgram_poll(struct file *file, struct socket *sock,
+                                   poll_table *wait)
 {
        struct sock *sk = sock->sk, *other;
-       int writable;
-       __poll_t mask = 0;
+       unsigned int writable;
+       __poll_t mask;
+
+       sock_poll_wait(file, sk_sleep(sk), wait);
+       mask = 0;
 
        /* exceptional events? */
        if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
@@ -2691,7 +2699,7 @@ static __poll_t unix_dgram_poll_mask(struct socket *sock, __poll_t events)
        }
 
        /* No write status requested, avoid expensive OUT tests. */
-       if (!(events & (EPOLLWRBAND|EPOLLWRNORM|EPOLLOUT)))
+       if (!(poll_requested_events(wait) & (EPOLLWRBAND|EPOLLWRNORM|EPOLLOUT)))
                return mask;
 
        writable = unix_writable(sk);
index bb5d5fa68c357af4962602b2bced2164c6e5ab44..ab27a2872935774d41fb1f2c2f9341eb67c8cc0a 100644 (file)
@@ -451,14 +451,14 @@ static int vsock_send_shutdown(struct sock *sk, int mode)
        return transport->shutdown(vsock_sk(sk), mode);
 }
 
-void vsock_pending_work(struct work_struct *work)
+static void vsock_pending_work(struct work_struct *work)
 {
        struct sock *sk;
        struct sock *listener;
        struct vsock_sock *vsk;
        bool cleanup;
 
-       vsk = container_of(work, struct vsock_sock, dwork.work);
+       vsk = container_of(work, struct vsock_sock, pending_work.work);
        sk = sk_vsock(vsk);
        listener = vsk->listener;
        cleanup = true;
@@ -498,7 +498,6 @@ out:
        sock_put(sk);
        sock_put(listener);
 }
-EXPORT_SYMBOL_GPL(vsock_pending_work);
 
 /**** SOCKET OPERATIONS ****/
 
@@ -597,6 +596,8 @@ static int __vsock_bind(struct sock *sk, struct sockaddr_vm *addr)
        return retval;
 }
 
+static void vsock_connect_timeout(struct work_struct *work);
+
 struct sock *__vsock_create(struct net *net,
                            struct socket *sock,
                            struct sock *parent,
@@ -638,6 +639,8 @@ struct sock *__vsock_create(struct net *net,
        vsk->sent_request = false;
        vsk->ignore_connecting_rst = false;
        vsk->peer_shutdown = 0;
+       INIT_DELAYED_WORK(&vsk->connect_work, vsock_connect_timeout);
+       INIT_DELAYED_WORK(&vsk->pending_work, vsock_pending_work);
 
        psk = parent ? vsock_sk(parent) : NULL;
        if (parent) {
@@ -850,11 +853,18 @@ static int vsock_shutdown(struct socket *sock, int mode)
        return err;
 }
 
-static __poll_t vsock_poll_mask(struct socket *sock, __poll_t events)
+static __poll_t vsock_poll(struct file *file, struct socket *sock,
+                              poll_table *wait)
 {
-       struct sock *sk = sock->sk;
-       struct vsock_sock *vsk = vsock_sk(sk);
-       __poll_t mask = 0;
+       struct sock *sk;
+       __poll_t mask;
+       struct vsock_sock *vsk;
+
+       sk = sock->sk;
+       vsk = vsock_sk(sk);
+
+       poll_wait(file, sk_sleep(sk), wait);
+       mask = 0;
 
        if (sk->sk_err)
                /* Signify that there has been an error on this socket. */
@@ -1084,7 +1094,7 @@ static const struct proto_ops vsock_dgram_ops = {
        .socketpair = sock_no_socketpair,
        .accept = sock_no_accept,
        .getname = vsock_getname,
-       .poll_mask = vsock_poll_mask,
+       .poll = vsock_poll,
        .ioctl = sock_no_ioctl,
        .listen = sock_no_listen,
        .shutdown = vsock_shutdown,
@@ -1110,7 +1120,7 @@ static void vsock_connect_timeout(struct work_struct *work)
        struct vsock_sock *vsk;
        int cancel = 0;
 
-       vsk = container_of(work, struct vsock_sock, dwork.work);
+       vsk = container_of(work, struct vsock_sock, connect_work.work);
        sk = sk_vsock(vsk);
 
        lock_sock(sk);
@@ -1214,9 +1224,7 @@ static int vsock_stream_connect(struct socket *sock, struct sockaddr *addr,
                         * timeout fires.
                         */
                        sock_hold(sk);
-                       INIT_DELAYED_WORK(&vsk->dwork,
-                                         vsock_connect_timeout);
-                       schedule_delayed_work(&vsk->dwork, timeout);
+                       schedule_delayed_work(&vsk->connect_work, timeout);
 
                        /* Skip ahead to preserve error code set above. */
                        goto out_wait;
@@ -1842,7 +1850,7 @@ static const struct proto_ops vsock_stream_ops = {
        .socketpair = sock_no_socketpair,
        .accept = vsock_accept,
        .getname = vsock_getname,
-       .poll_mask = vsock_poll_mask,
+       .poll = vsock_poll,
        .ioctl = sock_no_ioctl,
        .listen = vsock_listen,
        .shutdown = vsock_shutdown,
index 8e03bd3f3668b573c4d61a786e90a238abe9fe66..5d3cce9e8744d5207753107aeb55518f2848f50a 100644 (file)
@@ -201,7 +201,7 @@ virtio_transport_send_pkt(struct virtio_vsock_pkt *pkt)
                return -ENODEV;
        }
 
-       if (le32_to_cpu(pkt->hdr.dst_cid) == vsock->guest_cid)
+       if (le64_to_cpu(pkt->hdr.dst_cid) == vsock->guest_cid)
                return virtio_transport_send_pkt_loopback(vsock, pkt);
 
        if (pkt->reply)
index a7a73ffe675b2a9e829a375e90fc3d055386dcf8..cb332adb84cdcadc006de6d7a8668111babd2f53 100644 (file)
@@ -1094,8 +1094,7 @@ static int vmci_transport_recv_listen(struct sock *sk,
        vpending->listener = sk;
        sock_hold(sk);
        sock_hold(pending);
-       INIT_DELAYED_WORK(&vpending->dwork, vsock_pending_work);
-       schedule_delayed_work(&vpending->dwork, HZ);
+       schedule_delayed_work(&vpending->pending_work, HZ);
 
 out:
        return err;
index c7bbe5f0aae8839bdfe5ac7b7bd02c6aad8ac8dc..80bc986c79e5aea8d50121be481833738a1d50b7 100644 (file)
@@ -4409,6 +4409,7 @@ static int parse_station_flags(struct genl_info *info,
                params->sta_flags_mask = BIT(NL80211_STA_FLAG_AUTHENTICATED) |
                                         BIT(NL80211_STA_FLAG_MFP) |
                                         BIT(NL80211_STA_FLAG_AUTHORIZED);
+               break;
        default:
                return -EINVAL;
        }
@@ -6231,7 +6232,7 @@ do {                                                                          \
                                  nl80211_check_s32);
        /*
         * Check HT operation mode based on
-        * IEEE 802.11 2012 8.4.2.59 HT Operation element.
+        * IEEE 802.11-2016 9.4.2.57 HT Operation element.
         */
        if (tb[NL80211_MESHCONF_HT_OPMODE]) {
                ht_opmode = nla_get_u16(tb[NL80211_MESHCONF_HT_OPMODE]);
@@ -6241,22 +6242,9 @@ do {                                                                         \
                                  IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT))
                        return -EINVAL;
 
-               if ((ht_opmode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT) &&
-                   (ht_opmode & IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT))
-                       return -EINVAL;
+               /* NON_HT_STA bit is reserved, but some programs set it */
+               ht_opmode &= ~IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT;
 
-               switch (ht_opmode & IEEE80211_HT_OP_MODE_PROTECTION) {
-               case IEEE80211_HT_OP_MODE_PROTECTION_NONE:
-               case IEEE80211_HT_OP_MODE_PROTECTION_20MHZ:
-                       if (ht_opmode & IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT)
-                               return -EINVAL;
-                       break;
-               case IEEE80211_HT_OP_MODE_PROTECTION_NONMEMBER:
-               case IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED:
-                       if (!(ht_opmode & IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT))
-                               return -EINVAL;
-                       break;
-               }
                cfg->ht_opmode = ht_opmode;
                mask |= (1 << (NL80211_MESHCONF_HT_OPMODE - 1));
        }
@@ -10962,9 +10950,12 @@ static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info)
                                    rem) {
                        u8 *mask_pat;
 
-                       nla_parse_nested(pat_tb, MAX_NL80211_PKTPAT, pat,
-                                        nl80211_packet_pattern_policy,
-                                        info->extack);
+                       err = nla_parse_nested(pat_tb, MAX_NL80211_PKTPAT, pat,
+                                              nl80211_packet_pattern_policy,
+                                              info->extack);
+                       if (err)
+                               goto error;
+
                        err = -EINVAL;
                        if (!pat_tb[NL80211_PKTPAT_MASK] ||
                            !pat_tb[NL80211_PKTPAT_PATTERN])
@@ -11213,8 +11204,11 @@ static int nl80211_parse_coalesce_rule(struct cfg80211_registered_device *rdev,
                            rem) {
                u8 *mask_pat;
 
-               nla_parse_nested(pat_tb, MAX_NL80211_PKTPAT, pat,
-                                nl80211_packet_pattern_policy, NULL);
+               err = nla_parse_nested(pat_tb, MAX_NL80211_PKTPAT, pat,
+                                      nl80211_packet_pattern_policy, NULL);
+               if (err)
+                       return err;
+
                if (!pat_tb[NL80211_PKTPAT_MASK] ||
                    !pat_tb[NL80211_PKTPAT_PATTERN])
                        return -EINVAL;
@@ -14930,20 +14924,24 @@ void cfg80211_mgmt_tx_status(struct wireless_dev *wdev, u64 cookie,
 EXPORT_SYMBOL(cfg80211_mgmt_tx_status);
 
 static int __nl80211_rx_control_port(struct net_device *dev,
-                                    const u8 *buf, size_t len,
-                                    const u8 *addr, u16 proto,
+                                    struct sk_buff *skb,
                                     bool unencrypted, gfp_t gfp)
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
        struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
+       struct ethhdr *ehdr = eth_hdr(skb);
+       const u8 *addr = ehdr->h_source;
+       u16 proto = be16_to_cpu(skb->protocol);
        struct sk_buff *msg;
        void *hdr;
+       struct nlattr *frame;
+
        u32 nlportid = READ_ONCE(wdev->conn_owner_nlportid);
 
        if (!nlportid)
                return -ENOENT;
 
-       msg = nlmsg_new(100 + len, gfp);
+       msg = nlmsg_new(100 + skb->len, gfp);
        if (!msg)
                return -ENOMEM;
 
@@ -14957,13 +14955,17 @@ static int __nl80211_rx_control_port(struct net_device *dev,
            nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) ||
            nla_put_u64_64bit(msg, NL80211_ATTR_WDEV, wdev_id(wdev),
                              NL80211_ATTR_PAD) ||
-           nla_put(msg, NL80211_ATTR_FRAME, len, buf) ||
            nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, addr) ||
            nla_put_u16(msg, NL80211_ATTR_CONTROL_PORT_ETHERTYPE, proto) ||
            (unencrypted && nla_put_flag(msg,
                                         NL80211_ATTR_CONTROL_PORT_NO_ENCRYPT)))
                goto nla_put_failure;
 
+       frame = nla_reserve(msg, NL80211_ATTR_FRAME, skb->len);
+       if (!frame)
+               goto nla_put_failure;
+
+       skb_copy_bits(skb, 0, nla_data(frame), skb->len);
        genlmsg_end(msg, hdr);
 
        return genlmsg_unicast(wiphy_net(&rdev->wiphy), msg, nlportid);
@@ -14974,14 +14976,12 @@ static int __nl80211_rx_control_port(struct net_device *dev,
 }
 
 bool cfg80211_rx_control_port(struct net_device *dev,
-                             const u8 *buf, size_t len,
-                             const u8 *addr, u16 proto, bool unencrypted)
+                             struct sk_buff *skb, bool unencrypted)
 {
        int ret;
 
-       trace_cfg80211_rx_control_port(dev, buf, len, addr, proto, unencrypted);
-       ret = __nl80211_rx_control_port(dev, buf, len, addr, proto,
-                                       unencrypted, GFP_ATOMIC);
+       trace_cfg80211_rx_control_port(dev, skb, unencrypted);
+       ret = __nl80211_rx_control_port(dev, skb, unencrypted, GFP_ATOMIC);
        trace_cfg80211_return_bool(ret == 0);
        return ret == 0;
 }
index bbe6298e4bb9e09bd59a2c602af9f6c360829a3d..4fc66a117b7d74f86a1589a7a02b88f02f203b7d 100644 (file)
@@ -2240,7 +2240,9 @@ static void wiphy_update_regulatory(struct wiphy *wiphy,
                 * as some drivers used this to restore its orig_* reg domain.
                 */
                if (initiator == NL80211_REGDOM_SET_BY_CORE &&
-                   wiphy->regulatory_flags & REGULATORY_CUSTOM_REG)
+                   wiphy->regulatory_flags & REGULATORY_CUSTOM_REG &&
+                   !(wiphy->regulatory_flags &
+                     REGULATORY_WIPHY_SELF_MANAGED))
                        reg_call_notifier(wiphy, lr);
                return;
        }
@@ -2787,26 +2789,6 @@ static void notify_self_managed_wiphys(struct regulatory_request *request)
        }
 }
 
-static bool reg_only_self_managed_wiphys(void)
-{
-       struct cfg80211_registered_device *rdev;
-       struct wiphy *wiphy;
-       bool self_managed_found = false;
-
-       ASSERT_RTNL();
-
-       list_for_each_entry(rdev, &cfg80211_rdev_list, list) {
-               wiphy = &rdev->wiphy;
-               if (wiphy->regulatory_flags & REGULATORY_WIPHY_SELF_MANAGED)
-                       self_managed_found = true;
-               else
-                       return false;
-       }
-
-       /* make sure at least one self-managed wiphy exists */
-       return self_managed_found;
-}
-
 /*
  * Processes regulatory hints, this is all the NL80211_REGDOM_SET_BY_*
  * Regulatory hints come on a first come first serve basis and we
@@ -2839,10 +2821,6 @@ static void reg_process_pending_hints(void)
        spin_unlock(&reg_requests_lock);
 
        notify_self_managed_wiphys(reg_request);
-       if (reg_only_self_managed_wiphys()) {
-               reg_free_request(reg_request);
-               return;
-       }
 
        reg_process_hint(reg_request);
 
index 2b417a2fe63ffb564b744e8e159c3bdbcf43a4ed..7c73510b161f3b84ce3d7d24dd3b9aa3b472823e 100644 (file)
@@ -2627,23 +2627,25 @@ TRACE_EVENT(cfg80211_mgmt_tx_status,
 );
 
 TRACE_EVENT(cfg80211_rx_control_port,
-       TP_PROTO(struct net_device *netdev, const u8 *buf, size_t len,
-                const u8 *addr, u16 proto, bool unencrypted),
-       TP_ARGS(netdev, buf, len, addr, proto, unencrypted),
+       TP_PROTO(struct net_device *netdev, struct sk_buff *skb,
+                bool unencrypted),
+       TP_ARGS(netdev, skb, unencrypted),
        TP_STRUCT__entry(
                NETDEV_ENTRY
-               MAC_ENTRY(addr)
+               __field(int, len)
+               MAC_ENTRY(from)
                __field(u16, proto)
                __field(bool, unencrypted)
        ),
        TP_fast_assign(
                NETDEV_ASSIGN;
-               MAC_ASSIGN(addr, addr);
-               __entry->proto = proto;
+               __entry->len = skb->len;
+               MAC_ASSIGN(from, eth_hdr(skb)->h_source);
+               __entry->proto = be16_to_cpu(skb->protocol);
                __entry->unencrypted = unencrypted;
        ),
-       TP_printk(NETDEV_PR_FMT ", " MAC_PR_FMT " proto: 0x%x, unencrypted: %s",
-                 NETDEV_PR_ARG, MAC_PR_ARG(addr),
+       TP_printk(NETDEV_PR_FMT ", len=%d, " MAC_PR_FMT ", proto: 0x%x, unencrypted: %s",
+                 NETDEV_PR_ARG, __entry->len, MAC_PR_ARG(from),
                  __entry->proto, BOOL_TO_STR(__entry->unencrypted))
 );
 
index f93365ae0fdd76b6aab9b6227cfcbb96f41eed82..d49aa79b79970d403b5c165d4000b2aa1d493442 100644 (file)
@@ -1750,7 +1750,7 @@ static const struct proto_ops x25_proto_ops = {
        .socketpair =   sock_no_socketpair,
        .accept =       x25_accept,
        .getname =      x25_getname,
-       .poll_mask =    datagram_poll_mask,
+       .poll =         datagram_poll,
        .ioctl =        x25_ioctl,
 #ifdef CONFIG_COMPAT
        .compat_ioctl = compat_x25_ioctl,
index 36919a254ba370c37b4e199bfd68c285e25fdeb6..4e937cd7c17dc6b4b617f463336e9a8d2867ed6d 100644 (file)
@@ -84,10 +84,8 @@ static int __xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
 {
        int err = xskq_produce_batch_desc(xs->rx, (u64)xdp->handle, len);
 
-       if (err) {
-               xdp_return_buff(xdp);
+       if (err)
                xs->rx_dropped++;
-       }
 
        return err;
 }
@@ -118,6 +116,9 @@ int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
        u64 addr;
        int err;
 
+       if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index)
+               return -EINVAL;
+
        if (!xskq_peek_addr(xs->umem->fq, &addr) ||
            len > xs->umem->chunk_size_nohr) {
                xs->rx_dropped++;
@@ -196,8 +197,11 @@ static void xsk_destruct_skb(struct sk_buff *skb)
 {
        u64 addr = (u64)(long)skb_shinfo(skb)->destructor_arg;
        struct xdp_sock *xs = xdp_sk(skb->sk);
+       unsigned long flags;
 
+       spin_lock_irqsave(&xs->tx_completion_lock, flags);
        WARN_ON_ONCE(xskq_produce_addr(xs->umem->cq, addr));
+       spin_unlock_irqrestore(&xs->tx_completion_lock, flags);
 
        sock_wfree(skb);
 }
@@ -212,9 +216,6 @@ static int xsk_generic_xmit(struct sock *sk, struct msghdr *m,
        struct sk_buff *skb;
        int err = 0;
 
-       if (unlikely(!xs->tx))
-               return -ENOBUFS;
-
        mutex_lock(&xs->mutex);
 
        while (xskq_peek_desc(xs->tx, &desc)) {
@@ -227,22 +228,13 @@ static int xsk_generic_xmit(struct sock *sk, struct msghdr *m,
                        goto out;
                }
 
-               if (xskq_reserve_addr(xs->umem->cq)) {
-                       err = -EAGAIN;
-                       goto out;
-               }
-
-               len = desc.len;
-               if (unlikely(len > xs->dev->mtu)) {
-                       err = -EMSGSIZE;
+               if (xskq_reserve_addr(xs->umem->cq))
                        goto out;
-               }
 
-               if (xs->queue_id >= xs->dev->real_num_tx_queues) {
-                       err = -ENXIO;
+               if (xs->queue_id >= xs->dev->real_num_tx_queues)
                        goto out;
-               }
 
+               len = desc.len;
                skb = sock_alloc_send_skb(sk, len, 1, &err);
                if (unlikely(!skb)) {
                        err = -EAGAIN;
@@ -265,15 +257,15 @@ static int xsk_generic_xmit(struct sock *sk, struct msghdr *m,
                skb->destructor = xsk_destruct_skb;
 
                err = dev_direct_xmit(skb, xs->queue_id);
+               xskq_discard_desc(xs->tx);
                /* Ignore NET_XMIT_CN as packet might have been sent */
                if (err == NET_XMIT_DROP || err == NETDEV_TX_BUSY) {
-                       err = -EAGAIN;
-                       /* SKB consumed by dev_direct_xmit() */
+                       /* SKB completed but not sent */
+                       err = -EBUSY;
                        goto out;
                }
 
                sent_frame = true;
-               xskq_discard_desc(xs->tx);
        }
 
 out:
@@ -294,15 +286,18 @@ static int xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
                return -ENXIO;
        if (unlikely(!(xs->dev->flags & IFF_UP)))
                return -ENETDOWN;
+       if (unlikely(!xs->tx))
+               return -ENOBUFS;
        if (need_wait)
                return -EOPNOTSUPP;
 
        return (xs->zc) ? xsk_zc_xmit(sk) : xsk_generic_xmit(sk, m, total_len);
 }
 
-static __poll_t xsk_poll_mask(struct socket *sock, __poll_t events)
+static unsigned int xsk_poll(struct file *file, struct socket *sock,
+                            struct poll_table_struct *wait)
 {
-       __poll_t mask = datagram_poll_mask(sock, events);
+       unsigned int mask = datagram_poll(file, sock, wait);
        struct sock *sk = sock->sk;
        struct xdp_sock *xs = xdp_sk(sk);
 
@@ -693,7 +688,7 @@ static const struct proto_ops xsk_proto_ops = {
        .socketpair     = sock_no_socketpair,
        .accept         = sock_no_accept,
        .getname        = sock_no_getname,
-       .poll_mask      = xsk_poll_mask,
+       .poll           = xsk_poll,
        .ioctl          = sock_no_ioctl,
        .listen         = sock_no_listen,
        .shutdown       = sock_no_shutdown,
@@ -751,6 +746,7 @@ static int xsk_create(struct net *net, struct socket *sock, int protocol,
 
        xs = xdp_sk(sk);
        mutex_init(&xs->mutex);
+       spin_lock_init(&xs->tx_completion_lock);
 
        local_bh_disable();
        sock_prot_inuse_add(net, &xsk_proto, 1);
index ef6a6f0ec949049de2fc03d1a675ee0c1f48ba5e..8a64b150be546d2d5903bbc62e1b32525e73d992 100644 (file)
@@ -62,14 +62,9 @@ static inline u32 xskq_nb_avail(struct xsk_queue *q, u32 dcnt)
        return (entries > dcnt) ? dcnt : entries;
 }
 
-static inline u32 xskq_nb_free_lazy(struct xsk_queue *q, u32 producer)
-{
-       return q->nentries - (producer - q->cons_tail);
-}
-
 static inline u32 xskq_nb_free(struct xsk_queue *q, u32 producer, u32 dcnt)
 {
-       u32 free_entries = xskq_nb_free_lazy(q, producer);
+       u32 free_entries = q->nentries - (producer - q->cons_tail);
 
        if (free_entries >= dcnt)
                return free_entries;
@@ -129,7 +124,7 @@ static inline int xskq_produce_addr(struct xsk_queue *q, u64 addr)
 {
        struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
 
-       if (xskq_nb_free(q, q->prod_tail, LAZY_UPDATE_THRESHOLD) == 0)
+       if (xskq_nb_free(q, q->prod_tail, 1) == 0)
                return -ENOSPC;
 
        ring->desc[q->prod_tail++ & q->ring_mask] = addr;
@@ -255,7 +250,7 @@ static inline bool xskq_full_desc(struct xsk_queue *q)
 
 static inline bool xskq_empty_desc(struct xsk_queue *q)
 {
-       return xskq_nb_free(q, q->prod_tail, 1) == q->nentries;
+       return xskq_nb_free(q, q->prod_tail, q->nentries) == q->nentries;
 }
 
 void xskq_set_umem(struct xsk_queue *q, struct xdp_umem_props *umem_props);
index 5f48251c1319aa5bb122fe4849030baa758769ee..7c5e8978aeaabfe417d86f943f1576f30571da4c 100644 (file)
@@ -2286,6 +2286,9 @@ struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig,
        if (IS_ERR(dst) && PTR_ERR(dst) == -EREMOTE)
                return make_blackhole(net, dst_orig->ops->family, dst_orig);
 
+       if (IS_ERR(dst))
+               dst_release(dst_orig);
+
        return dst;
 }
 EXPORT_SYMBOL(xfrm_lookup_route);
index 080035f056d992c49f8cbcc776d579c9769c67eb..33878e6e0d0a01cdc5da8ab20bdb92d0d93e056d 100644 (file)
@@ -1025,10 +1025,12 @@ static inline int xfrm_nlmsg_multicast(struct net *net, struct sk_buff *skb,
 {
        struct sock *nlsk = rcu_dereference(net->xfrm.nlsk);
 
-       if (nlsk)
-               return nlmsg_multicast(nlsk, skb, pid, group, GFP_ATOMIC);
-       else
-               return -1;
+       if (!nlsk) {
+               kfree_skb(skb);
+               return -EPIPE;
+       }
+
+       return nlmsg_multicast(nlsk, skb, pid, group, GFP_ATOMIC);
 }
 
 static inline unsigned int xfrm_spdinfo_msgsize(void)
@@ -1671,9 +1673,11 @@ static inline unsigned int userpolicy_type_attrsize(void)
 #ifdef CONFIG_XFRM_SUB_POLICY
 static int copy_to_user_policy_type(u8 type, struct sk_buff *skb)
 {
-       struct xfrm_userpolicy_type upt = {
-               .type = type,
-       };
+       struct xfrm_userpolicy_type upt;
+
+       /* Sadly there are two holes in struct xfrm_userpolicy_type */
+       memset(&upt, 0, sizeof(upt));
+       upt.type = type;
 
        return nla_put(skb, XFRMA_POLICY_TYPE, sizeof(upt), &upt);
 }
diff --git a/samples/bpf/.gitignore b/samples/bpf/.gitignore
new file mode 100644 (file)
index 0000000..8ae4940
--- /dev/null
@@ -0,0 +1,49 @@
+cpustat
+fds_example
+lathist
+load_sock_ops
+lwt_len_hist
+map_perf_test
+offwaketime
+per_socket_stats_example
+sampleip
+sock_example
+sockex1
+sockex2
+sockex3
+spintest
+syscall_nrs.h
+syscall_tp
+task_fd_query
+tc_l2_redirect
+test_cgrp2_array_pin
+test_cgrp2_attach
+test_cgrp2_attach2
+test_cgrp2_sock
+test_cgrp2_sock2
+test_current_task_under_cgroup
+test_lru_dist
+test_map_in_map
+test_overhead
+test_probe_write_user
+trace_event
+trace_output
+tracex1
+tracex2
+tracex3
+tracex4
+tracex5
+tracex6
+tracex7
+xdp1
+xdp2
+xdp_adjust_tail
+xdp_fwd
+xdp_monitor
+xdp_redirect
+xdp_redirect_cpu
+xdp_redirect_map
+xdp_router_ipv4
+xdp_rxq_info
+xdp_tx_iptunnel
+xdpsock
index 95c16324760c0be1af8be927e1adffae0b582525..0b6f22feb2c9ce37787ea5384276c85a4e1171eb 100644 (file)
@@ -6,6 +6,7 @@
  */
 #define KBUILD_MODNAME "foo"
 #include <linux/if_ether.h>
+#include <linux/if_vlan.h>
 #include <linux/ip.h>
 #include <linux/ipv6.h>
 #include <linux/in.h>
@@ -108,11 +109,6 @@ static int parse_ipv6(void *data, uint64_t nh_off, void *data_end)
        return 0;
 }
 
-struct vlan_hdr {
-       uint16_t h_vlan_TCI;
-       uint16_t h_vlan_encapsulated_proto;
-};
-
 SEC("varlen")
 int handle_ingress(struct __sk_buff *skb)
 {
index 6caf47afa635ca680bb56b43ef78c7f62b293dd7..9d6dcaa9db9206ebe6a5bb14fe98c7001a543b6d 100644 (file)
@@ -6,6 +6,7 @@
  */
 #define _GNU_SOURCE
 #include <sched.h>
+#include <errno.h>
 #include <stdio.h>
 #include <sys/types.h>
 #include <asm/unistd.h>
@@ -44,8 +45,13 @@ static void test_task_rename(int cpu)
                exit(1);
        }
        start_time = time_get_ns();
-       for (i = 0; i < MAX_CNT; i++)
-               write(fd, buf, sizeof(buf));
+       for (i = 0; i < MAX_CNT; i++) {
+               if (write(fd, buf, sizeof(buf)) < 0) {
+                       printf("task rename failed: %s\n", strerror(errno));
+                       close(fd);
+                       return;
+               }
+       }
        printf("task_rename:%d: %lld events per sec\n",
               cpu, MAX_CNT * 1000000000ll / (time_get_ns() - start_time));
        close(fd);
@@ -63,8 +69,13 @@ static void test_urandom_read(int cpu)
                exit(1);
        }
        start_time = time_get_ns();
-       for (i = 0; i < MAX_CNT; i++)
-               read(fd, buf, sizeof(buf));
+       for (i = 0; i < MAX_CNT; i++) {
+               if (read(fd, buf, sizeof(buf)) < 0) {
+                       printf("failed to read from /dev/urandom: %s\n", strerror(errno));
+                       close(fd);
+                       return;
+               }
+       }
        printf("urandom_read:%d: %lld events per sec\n",
               cpu, MAX_CNT * 1000000000ll / (time_get_ns() - start_time));
        close(fd);
index 1fa1becfa641510ae67db4d0ea64c3971f6d2f4d..d08046ab81f043505e0ea42a2e8c85661ae68f76 100644 (file)
@@ -122,6 +122,16 @@ static void print_stacks(void)
        }
 }
 
+static inline int generate_load(void)
+{
+       if (system("dd if=/dev/zero of=/dev/null count=5000k status=none") < 0) {
+               printf("failed to generate some load with dd: %s\n", strerror(errno));
+               return -1;
+       }
+
+       return 0;
+}
+
 static void test_perf_event_all_cpu(struct perf_event_attr *attr)
 {
        int nr_cpus = sysconf(_SC_NPROCESSORS_CONF);
@@ -142,7 +152,11 @@ static void test_perf_event_all_cpu(struct perf_event_attr *attr)
                assert(ioctl(pmu_fd[i], PERF_EVENT_IOC_SET_BPF, prog_fd[0]) == 0);
                assert(ioctl(pmu_fd[i], PERF_EVENT_IOC_ENABLE) == 0);
        }
-       system("dd if=/dev/zero of=/dev/null count=5000k status=none");
+
+       if (generate_load() < 0) {
+               error = 1;
+               goto all_cpu_err;
+       }
        print_stacks();
 all_cpu_err:
        for (i--; i >= 0; i--) {
@@ -156,7 +170,7 @@ all_cpu_err:
 
 static void test_perf_event_task(struct perf_event_attr *attr)
 {
-       int pmu_fd;
+       int pmu_fd, error = 0;
 
        /* per task perf event, enable inherit so the "dd ..." command can be traced properly.
         * Enabling inherit will cause bpf_perf_prog_read_time helper failure.
@@ -171,10 +185,17 @@ static void test_perf_event_task(struct perf_event_attr *attr)
        }
        assert(ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd[0]) == 0);
        assert(ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE) == 0);
-       system("dd if=/dev/zero of=/dev/null count=5000k status=none");
+
+       if (generate_load() < 0) {
+               error = 1;
+               goto err;
+       }
        print_stacks();
+err:
        ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE);
        close(pmu_fd);
+       if (error)
+               int_exit(0);
 }
 
 static void test_bpf_perf_event(void)
index b9c9549c4c272a944d818cef153e496f96dcec84..4bde9d066c4616430533cef02e32d295b0329b11 100755 (executable)
@@ -16,8 +16,8 @@
 BPF_FILE=xdp2skb_meta_kern.o
 DIR=$(dirname $0)
 
-export TC=/usr/sbin/tc
-export IP=/usr/sbin/ip
+[ -z "$TC" ] && TC=tc
+[ -z "$IP" ] && IP=ip
 
 function usage() {
     echo ""
@@ -53,7 +53,7 @@ function _call_cmd() {
     local allow_fail="$2"
     shift 2
     if [[ -n "$VERBOSE" ]]; then
-       echo "$(basename $cmd) $@"
+       echo "$cmd $@"
     fi
     if [[ -n "$DRYRUN" ]]; then
        return
index 6673cdb9f55cab3fb32faaca755f805e8c10ed8f..a7e94e7ff87df5f60f7a57522de77b5929e46029 100644 (file)
@@ -48,9 +48,9 @@ static __always_inline int xdp_fwd_flags(struct xdp_md *ctx, u32 flags)
        struct ethhdr *eth = data;
        struct ipv6hdr *ip6h;
        struct iphdr *iph;
-       int out_index;
        u16 h_proto;
        u64 nh_off;
+       int rc;
 
        nh_off = sizeof(*eth);
        if (data + nh_off > data_end)
@@ -101,7 +101,7 @@ static __always_inline int xdp_fwd_flags(struct xdp_md *ctx, u32 flags)
 
        fib_params.ifindex = ctx->ingress_ifindex;
 
-       out_index = bpf_fib_lookup(ctx, &fib_params, sizeof(fib_params), flags);
+       rc = bpf_fib_lookup(ctx, &fib_params, sizeof(fib_params), flags);
 
        /* verify egress index has xdp support
         * TO-DO bpf_map_lookup_elem(&tx_port, &key) fails with
@@ -109,7 +109,7 @@ static __always_inline int xdp_fwd_flags(struct xdp_md *ctx, u32 flags)
         * NOTE: without verification that egress index supports XDP
         *       forwarding packets are dropped.
         */
-       if (out_index > 0) {
+       if (rc == 0) {
                if (h_proto == htons(ETH_P_IP))
                        ip_decrease_ttl(iph);
                else if (h_proto == htons(ETH_P_IPV6))
@@ -117,7 +117,7 @@ static __always_inline int xdp_fwd_flags(struct xdp_md *ctx, u32 flags)
 
                memcpy(eth->h_dest, fib_params.dmac, ETH_ALEN);
                memcpy(eth->h_source, fib_params.smac, ETH_ALEN);
-               return bpf_redirect_map(&tx_port, out_index, 0);
+               return bpf_redirect_map(&tx_port, fib_params.ifindex, 0);
        }
 
        return XDP_PASS;
index 303e9e7161f3169ebcad88974a0d549f01e7db30..4938dcbaecbfbb7414a824dc5b9f033ff72643e1 100644 (file)
@@ -14,7 +14,7 @@
 #include <uapi/linux/bpf.h>
 #include "bpf_helpers.h"
 
-#define MAX_CPUS 12 /* WARNING - sync with _user.c */
+#define MAX_CPUS 64 /* WARNING - sync with _user.c */
 
 /* Special map type that can XDP_REDIRECT frames to another CPU */
 struct bpf_map_def SEC("maps") cpu_map = {
index f6efaefd485b15929b04bf23346ef21934342d52..4b4d78fffe30427d26ee28a338ff6749dccb7448 100644 (file)
@@ -19,7 +19,7 @@ static const char *__doc__ =
 #include <arpa/inet.h>
 #include <linux/if_link.h>
 
-#define MAX_CPUS 12 /* WARNING - sync with _kern.c */
+#define MAX_CPUS 64 /* WARNING - sync with _kern.c */
 
 /* How many xdp_progs are defined in _kern.c */
 #define MAX_PROG 5
@@ -527,7 +527,7 @@ static void stress_cpumap(void)
         * procedure.
         */
        create_cpu_entry(1,  1024, 0, false);
-       create_cpu_entry(1,   128, 0, false);
+       create_cpu_entry(1,     8, 0, false);
        create_cpu_entry(1, 16000, 0, false);
 }
 
index d69c8d78d3fdef775f27d97b94401fabb5ccfd72..5904b15438313399d8bfa8fe7412ed30b5342556 100644 (file)
@@ -729,7 +729,7 @@ static void kick_tx(int fd)
        int ret;
 
        ret = sendto(fd, NULL, 0, MSG_DONTWAIT, NULL, 0);
-       if (ret >= 0 || errno == ENOBUFS || errno == EAGAIN)
+       if (ret >= 0 || errno == ENOBUFS || errno == EAGAIN || errno == EBUSY)
                return;
        lassert(0);
 }
index 2960e26c6ea4c756064db41cc7d39c14f058068b..2535c3677c7b66a1650fc3a1a7fb9c99684ae8ea 100644 (file)
@@ -178,6 +178,8 @@ static const char *vbe_name(u32 index)
        return "(invalid)";
 }
 
+static struct page *__mbochs_get_page(struct mdev_state *mdev_state,
+                                     pgoff_t pgoff);
 static struct page *mbochs_get_page(struct mdev_state *mdev_state,
                                    pgoff_t pgoff);
 
@@ -394,7 +396,7 @@ static ssize_t mdev_access(struct mdev_device *mdev, char *buf, size_t count,
                   MBOCHS_MEMORY_BAR_OFFSET + mdev_state->memsize) {
                pos -= MBOCHS_MMIO_BAR_OFFSET;
                poff = pos & ~PAGE_MASK;
-               pg = mbochs_get_page(mdev_state, pos >> PAGE_SHIFT);
+               pg = __mbochs_get_page(mdev_state, pos >> PAGE_SHIFT);
                map = kmap(pg);
                if (is_write)
                        memcpy(map + poff, buf, count);
@@ -657,7 +659,7 @@ static void mbochs_put_pages(struct mdev_state *mdev_state)
        dev_dbg(dev, "%s: %d pages released\n", __func__, count);
 }
 
-static int mbochs_region_vm_fault(struct vm_fault *vmf)
+static vm_fault_t mbochs_region_vm_fault(struct vm_fault *vmf)
 {
        struct vm_area_struct *vma = vmf->vma;
        struct mdev_state *mdev_state = vma->vm_private_data;
@@ -695,7 +697,7 @@ static int mbochs_mmap(struct mdev_device *mdev, struct vm_area_struct *vma)
        return 0;
 }
 
-static int mbochs_dmabuf_vm_fault(struct vm_fault *vmf)
+static vm_fault_t mbochs_dmabuf_vm_fault(struct vm_fault *vmf)
 {
        struct vm_area_struct *vma = vmf->vma;
        struct mbochs_dmabuf *dmabuf = vma->vm_private_data;
@@ -803,29 +805,26 @@ static void mbochs_release_dmabuf(struct dma_buf *buf)
        mutex_unlock(&mdev_state->ops_lock);
 }
 
-static void *mbochs_kmap_atomic_dmabuf(struct dma_buf *buf,
-                                      unsigned long page_num)
+static void *mbochs_kmap_dmabuf(struct dma_buf *buf, unsigned long page_num)
 {
        struct mbochs_dmabuf *dmabuf = buf->priv;
        struct page *page = dmabuf->pages[page_num];
 
-       return kmap_atomic(page);
+       return kmap(page);
 }
 
-static void *mbochs_kmap_dmabuf(struct dma_buf *buf, unsigned long page_num)
+static void mbochs_kunmap_dmabuf(struct dma_buf *buf, unsigned long page_num,
+                                void *vaddr)
 {
-       struct mbochs_dmabuf *dmabuf = buf->priv;
-       struct page *page = dmabuf->pages[page_num];
-
-       return kmap(page);
+       kunmap(vaddr);
 }
 
 static struct dma_buf_ops mbochs_dmabuf_ops = {
        .map_dma_buf      = mbochs_map_dmabuf,
        .unmap_dma_buf    = mbochs_unmap_dmabuf,
        .release          = mbochs_release_dmabuf,
-       .map_atomic       = mbochs_kmap_atomic_dmabuf,
        .map              = mbochs_kmap_dmabuf,
+       .unmap            = mbochs_kunmap_dmabuf,
        .mmap             = mbochs_mmap_dmabuf,
 };
 
index c8156d61678cfbc6907a9176efbccb03aa8387ce..86321f06461e9835103950242930187c62837e1d 100644 (file)
@@ -214,7 +214,7 @@ hdr-inst := -f $(srctree)/scripts/Makefile.headersinst obj
 # Prefix -I with $(srctree) if it is not an absolute path.
 # skip if -I has no parameter
 addtree = $(if $(patsubst -I%,%,$(1)), \
-$(if $(filter-out -I/% -I./% -I../%,$(1)),$(patsubst -I%,-I$(srctree)/%,$(1)),$(1)))
+$(if $(filter-out -I/% -I./% -I../%,$(1)),$(patsubst -I%,-I$(srctree)/%,$(1)),$(1)),$(1))
 
 # Find all -I options and call addtree
 flags = $(foreach o,$($(1)),$(if $(filter -I%,$(o)),$(call addtree,$(o)),$(o)))
index 34d9e9ce97c29c5e0ca78e6b06085a6b29ffd8bd..514ed63ff5710789fda060eb06cf10813a4124ad 100644 (file)
@@ -239,6 +239,7 @@ cmd_record_mcount =                                         \
             "$(CC_FLAGS_FTRACE)" ]; then                       \
                $(sub_cmd_record_mcount)                        \
        fi;
+endif # -record-mcount
 endif # CONFIG_FTRACE_MCOUNT_RECORD
 
 ifdef CONFIG_STACK_VALIDATION
@@ -263,7 +264,6 @@ ifneq ($(RETPOLINE_CFLAGS),)
   objtool_args += --retpoline
 endif
 endif
-endif
 
 
 ifdef CONFIG_MODVERSIONS
@@ -590,7 +590,4 @@ endif
 # We never want them to be removed automatically.
 .SECONDARY: $(targets)
 
-# Declare the contents of the .PHONY variable as phony.  We keep that
-# information in a variable se we can use it in if_changed and friends.
-
 .PHONY: $(PHONY)
index 808d09f27ad4063424211a8264297a0b3945d3c5..17ef94c635cd5dcfd23c355576a351730076dd73 100644 (file)
@@ -88,7 +88,4 @@ PHONY += $(subdir-ymn)
 $(subdir-ymn):
        $(Q)$(MAKE) $(clean)=$@
 
-# Declare the contents of the .PHONY variable as phony.  We keep that
-# information in a variable se we can use it in if_changed and friends.
-
 .PHONY: $(PHONY)
index a763b4775d062965a82af7761aaa8cb5a28aeb8e..40867a41615ba812987100133793183e6f82a1d5 100644 (file)
@@ -54,8 +54,4 @@ PHONY += $(subdir-ym)
 $(subdir-ym):
        $(Q)$(MAKE) $(modbuiltin)=$@
 
-
-# Declare the contents of the .PHONY variable as phony.  We keep that
-# information in a variable se we can use it in if_changed and friends.
-
 .PHONY: $(PHONY)
index 51ca0244fc8ac4f8e2981fb9dbc9df3efe507861..ff5ca9817a85ab394740c7ec8f8459f02a9656f9 100644 (file)
@@ -35,8 +35,4 @@ modinst_dir = $(if $(KBUILD_EXTMOD),$(ext-mod-dir),kernel/$(@D))
 $(modules):
        $(call cmd,modules_install,$(MODLIB)/$(modinst_dir))
 
-
-# Declare the contents of the .PHONY variable as phony.  We keep that
-# information in a variable so we can use it in if_changed and friends.
-
 .PHONY: $(PHONY)
index df4174405feb331a772abe871046d9260c43c690..dd92dbbbaa687b73f31b922187c0da15f66266e0 100644 (file)
@@ -149,8 +149,4 @@ ifneq ($(cmd_files),)
   include $(cmd_files)
 endif
 
-
-# Declare the contents of the .PHONY variable as phony.  We keep that
-# information in a variable se we can use it in if_changed and friends.
-
 .PHONY: $(PHONY)
index 171483bc0538d7faa5e4a34c5804a2f8e721f2ea..da56aa78d245da2835d7714d6bb81e15cb1cf3f4 100644 (file)
@@ -27,7 +27,4 @@ modinst_dir = $(if $(KBUILD_EXTMOD),$(ext-mod-dir),kernel/$(@D))
 $(modules):
        $(call cmd,sign_ko,$(MODLIB)/$(modinst_dir))
 
-# Declare the contents of the .PHONY variable as phony.  We keep that
-# information in a variable se we can use it in if_changed and friends.
-
 .PHONY: $(PHONY)
index b593b36ccff86910644c63346af8e497346ebab4..38b2b4818e8ebfbe9d19deb0aebef5fdbcc19d28 100644 (file)
@@ -14,10 +14,6 @@ ifdef CONFIG_UBSAN_ALIGNMENT
       CFLAGS_UBSAN += $(call cc-option, -fsanitize=alignment)
 endif
 
-ifdef CONFIG_UBSAN_NULL
-      CFLAGS_UBSAN += $(call cc-option, -fsanitize=null)
-endif
-
       # -fsanitize=* options makes GCC less smart than usual and
       # increase number of 'maybe-uninitialized false-positives
       CFLAGS_UBSAN += $(call cc-option, -Wno-maybe-uninitialized)
index 208eb2825dab017a9d3fdc0bdb8beef053b5626d..6efcead3198989d2ab2ab6772c72d8bb61c89c4e 100755 (executable)
@@ -1,7 +1,7 @@
 #!/bin/sh
 # SPDX-License-Identifier: GPL-2.0
 
-cat << "END" | $@ -x c - -o /dev/null >/dev/null 2>&1 && echo "y"
+cat << "END" | $@ -x c - -o /dev/null >/dev/null 2>&1
 #include <stdio.h>
 int main(void)
 {
index e3b7362b0ee457b9601a8628609d8ebf78fcb09a..447857ffaf6be157841f8b0283d8ac67cb37dc5e 100755 (executable)
@@ -2606,12 +2606,6 @@ sub process {
                             "A patch subject line should describe the change not the tool that found it\n" . $herecurr);
                }
 
-# Check for old stable address
-               if ($line =~ /^\s*cc:\s*.*<?\bstable\@kernel\.org\b>?.*$/i) {
-                       ERROR("STABLE_ADDRESS",
-                             "The 'stable' address should be 'stable\@vger.kernel.org'\n" . $herecurr);
-               }
-
 # Check for unwanted Gerrit info
                if ($in_commit_log && $line =~ /^\s*change-id:/i) {
                        ERROR("GERRIT_CHANGE_ID",
@@ -5819,14 +5813,14 @@ sub process {
                    defined $stat &&
                    $stat =~ /^\+(?![^\{]*\{\s*).*\b(\w+)\s*\(.*$String\s*,/s &&
                    $1 !~ /^_*volatile_*$/) {
-                       my $specifier;
-                       my $extension;
-                       my $bad_specifier = "";
                        my $stat_real;
 
                        my $lc = $stat =~ tr@\n@@;
                        $lc = $lc + $linenr;
                        for (my $count = $linenr; $count <= $lc; $count++) {
+                               my $specifier;
+                               my $extension;
+                               my $bad_specifier = "";
                                my $fmt = get_quoted_string($lines[$count - 1], raw_line($count, 0));
                                $fmt =~ s/%%//g;
 
index 5061abcc25409c4e095e8c5b25d3a8de3fb4db68..e6239f39abadd480f285e4b541b21141337f9497 100755 (executable)
@@ -57,6 +57,8 @@ try_decompress '\3757zXZ\000' abcde unxz
 try_decompress 'BZh'          xy    bunzip2
 try_decompress '\135\0\0\0'   xxx   unlzma
 try_decompress '\211\114\132' xy    'lzop -d'
+try_decompress '\002!L\030'   xxx   'lz4 -d'
+try_decompress '(\265/\375'   xxx   unzstd
 
 # Bail out:
 echo "$me: Cannot find vmlinux." >&2
index 3755af0cd9f7f24c1942fd9df216c525f79d04b3..75e4e22b986adcfd07197777c5d59d5601d3c920 100755 (executable)
@@ -1,4 +1,4 @@
 #!/bin/sh
 # SPDX-License-Identifier: GPL-2.0
 
-echo "int foo(void) { char X[200]; return 3; }" | $* -S -x c -c -O0 -mcmodel=kernel -fno-PIE -fstack-protector - -o - 2> /dev/null | grep -q "%gs"
+echo "int foo(void) { char X[200]; return 3; }" | $* -S -x c -c -m64 -O0 -mcmodel=kernel -fno-PIE -fstack-protector - -o - 2> /dev/null | grep -q "%gs"
index 94a383b21df6405f4a9f6b6c08758d6c822381d8..f63b41b0dd498d23b65b3c12fe47e3b2c87e148d 100644 (file)
@@ -171,6 +171,9 @@ struct symbol {
  * config BAZ
  *         int "BAZ Value"
  *         range 1..255
+ *
+ * Please, also check zconf.y:print_symbol() when modifying the
+ * list of property types!
  */
 enum prop_type {
        P_UNKNOWN,
index 65da87fce907ad2bc7b52adba4651dc2c32786be..5ca2df790d3cfa5f4253a33a303219aaa8fc4394 100644 (file)
@@ -156,7 +156,7 @@ static char *do_shell(int argc, char *argv[])
                nread--;
 
        /* remove trailing new lines */
-       while (buf[nread - 1] == '\n')
+       while (nread > 0 && buf[nread - 1] == '\n')
                nread--;
 
        buf[nread] = 0;
index 6f9b0aa32a82239b2bc1540d1b75949859ea48e9..4b68272ebdb96cb25e8d91de0d4cbc792a3079b6 100644 (file)
@@ -31,7 +31,7 @@ struct symbol *symbol_hash[SYMBOL_HASHSIZE];
 static struct menu *current_menu, *current_entry;
 
 %}
-%expect 32
+%expect 31
 
 %union
 {
@@ -337,7 +337,7 @@ choice_block:
 
 /* if entry */
 
-if_entry: T_IF expr nl
+if_entry: T_IF expr T_EOL
 {
        printd(DEBUG_PARSE, "%s:%d:if\n", zconf_curname(), zconf_lineno());
        menu_add_entry(NULL);
@@ -717,6 +717,10 @@ static void print_symbol(FILE *out, struct menu *menu)
                        print_quoted_string(out, prop->text);
                        fputc('\n', out);
                        break;
+               case P_SYMBOL:
+                       fputs( "  symbol ", out);
+                       fprintf(out, "%s\n", prop->sym->name);
+                       break;
                default:
                        fprintf(out, "  unknown prop %d!\n", prop->type);
                        break;
index 66f08bb1cce978a1074141cc90adc7255c317ede..26de7d5aa5c89a5fd4f051c67029341f4f8849fa 100755 (executable)
@@ -152,6 +152,7 @@ regex_asm=(
 )
 regex_c=(
        '/^SYSCALL_DEFINE[0-9](\([[:alnum:]_]*\).*/sys_\1/'
+       '/^BPF_CALL_[0-9](\([[:alnum:]_]*\).*/\1/'
        '/^COMPAT_SYSCALL_DEFINE[0-9](\([[:alnum:]_]*\).*/compat_sys_\1/'
        '/^TRACE_EVENT(\([[:alnum:]_]*\).*/trace_\1/'
        '/^TRACE_EVENT(\([[:alnum:]_]*\).*/trace_\1_rcuidle/'
@@ -245,7 +246,7 @@ exuberant()
 {
        setup_regex exuberant asm c
        all_target_sources | xargs $1 -a                        \
-       -I __initdata,__exitdata,__initconst,                   \
+       -I __initdata,__exitdata,__initconst,__ro_after_init    \
        -I __initdata_memblock                                  \
        -I __refdata,__attribute,__maybe_unused,__always_unused \
        -I __acquires,__releases,__deprecated                   \
index c4302067a3ad2f3568c7aa831f1402fbc7d0903c..afa91c6f06bb72be64de51ddb8ae584ae09c7f26 100644 (file)
@@ -57,7 +57,7 @@ config SECURITY_NETWORK
 config PAGE_TABLE_ISOLATION
        bool "Remove the kernel mapping in user mode"
        default y
-       depends on X86_64 && !UML
+       depends on X86 && !UML
        help
          This feature reduces the number of hardware side channels by
          ensuring that the majority of kernel addresses are not mapped
index 74f17376202bd1cf36cafcf849abd06fe36ec35a..8b8b70620bbe753e35db3fe5e679334dab8ebb83 100644 (file)
@@ -395,7 +395,7 @@ static int apparmor_inode_getattr(const struct path *path)
        return common_perm_cond(OP_GETATTR, path, AA_MAY_GETATTR);
 }
 
-static int apparmor_file_open(struct file *file, const struct cred *cred)
+static int apparmor_file_open(struct file *file)
 {
        struct aa_file_ctx *fctx = file_ctx(file);
        struct aa_label *label;
@@ -414,7 +414,7 @@ static int apparmor_file_open(struct file *file, const struct cred *cred)
                return 0;
        }
 
-       label = aa_get_newest_cred_label(cred);
+       label = aa_get_newest_cred_label(file->f_cred);
        if (!unconfined(label)) {
                struct inode *inode = file_inode(file);
                struct path_cond cond = { inode->i_uid, inode->i_mode };
index 354bb5716ce329a39e43413a9ed373008c901b1e..e4c1a236976cb4650413160d928dabb3b51b28ec 100644 (file)
@@ -238,7 +238,7 @@ int ima_appraise_measurement(enum ima_hooks func,
                             struct integrity_iint_cache *iint,
                             struct file *file, const unsigned char *filename,
                             struct evm_ima_xattr_data *xattr_value,
-                            int xattr_len, int opened);
+                            int xattr_len);
 int ima_must_appraise(struct inode *inode, int mask, enum ima_hooks func);
 void ima_update_xattr(struct integrity_iint_cache *iint, struct file *file);
 enum integrity_status ima_get_cache_status(struct integrity_iint_cache *iint,
@@ -254,7 +254,7 @@ static inline int ima_appraise_measurement(enum ima_hooks func,
                                           struct file *file,
                                           const unsigned char *filename,
                                           struct evm_ima_xattr_data *xattr_value,
-                                          int xattr_len, int opened)
+                                          int xattr_len)
 {
        return INTEGRITY_UNKNOWN;
 }
index 8bd7a0733e5179eca03febd8524a2c08b142cfde..deec1804a00aa488d02b307371cfc18475127bc3 100644 (file)
@@ -212,7 +212,7 @@ int ima_appraise_measurement(enum ima_hooks func,
                             struct integrity_iint_cache *iint,
                             struct file *file, const unsigned char *filename,
                             struct evm_ima_xattr_data *xattr_value,
-                            int xattr_len, int opened)
+                            int xattr_len)
 {
        static const char op[] = "appraise_data";
        const char *cause = "unknown";
@@ -231,7 +231,7 @@ int ima_appraise_measurement(enum ima_hooks func,
                cause = iint->flags & IMA_DIGSIG_REQUIRED ?
                                "IMA-signature-required" : "missing-hash";
                status = INTEGRITY_NOLABEL;
-               if (opened & FILE_CREATED)
+               if (file->f_mode & FMODE_CREATED)
                        iint->flags |= IMA_NEW_FILE;
                if ((iint->flags & IMA_NEW_FILE) &&
                    (!(iint->flags & IMA_DIGSIG_REQUIRED) ||
index dca44cf7838eaddf882212faedc9759a391f7723..b286f37712d506de48285467b55d739e423e881c 100644 (file)
@@ -168,7 +168,7 @@ void ima_file_free(struct file *file)
 
 static int process_measurement(struct file *file, const struct cred *cred,
                               u32 secid, char *buf, loff_t size, int mask,
-                              enum ima_hooks func, int opened)
+                              enum ima_hooks func)
 {
        struct inode *inode = file_inode(file);
        struct integrity_iint_cache *iint = NULL;
@@ -294,7 +294,7 @@ static int process_measurement(struct file *file, const struct cred *cred,
        if (rc == 0 && (action & IMA_APPRAISE_SUBMASK)) {
                inode_lock(inode);
                rc = ima_appraise_measurement(func, iint, file, pathname,
-                                             xattr_value, xattr_len, opened);
+                                             xattr_value, xattr_len);
                inode_unlock(inode);
        }
        if (action & IMA_AUDIT)
@@ -338,7 +338,7 @@ int ima_file_mmap(struct file *file, unsigned long prot)
        if (file && (prot & PROT_EXEC)) {
                security_task_getsecid(current, &secid);
                return process_measurement(file, current_cred(), secid, NULL,
-                                          0, MAY_EXEC, MMAP_CHECK, 0);
+                                          0, MAY_EXEC, MMAP_CHECK);
        }
 
        return 0;
@@ -364,13 +364,13 @@ int ima_bprm_check(struct linux_binprm *bprm)
 
        security_task_getsecid(current, &secid);
        ret = process_measurement(bprm->file, current_cred(), secid, NULL, 0,
-                                 MAY_EXEC, BPRM_CHECK, 0);
+                                 MAY_EXEC, BPRM_CHECK);
        if (ret)
                return ret;
 
        security_cred_getsecid(bprm->cred, &secid);
        return process_measurement(bprm->file, bprm->cred, secid, NULL, 0,
-                                  MAY_EXEC, CREDS_CHECK, 0);
+                                  MAY_EXEC, CREDS_CHECK);
 }
 
 /**
@@ -383,14 +383,14 @@ int ima_bprm_check(struct linux_binprm *bprm)
  * On success return 0.  On integrity appraisal error, assuming the file
  * is in policy and IMA-appraisal is in enforcing mode, return -EACCES.
  */
-int ima_file_check(struct file *file, int mask, int opened)
+int ima_file_check(struct file *file, int mask)
 {
        u32 secid;
 
        security_task_getsecid(current, &secid);
        return process_measurement(file, current_cred(), secid, NULL, 0,
                                   mask & (MAY_READ | MAY_WRITE | MAY_EXEC |
-                                          MAY_APPEND), FILE_CHECK, opened);
+                                          MAY_APPEND), FILE_CHECK);
 }
 EXPORT_SYMBOL_GPL(ima_file_check);
 
@@ -493,7 +493,7 @@ int ima_post_read_file(struct file *file, void *buf, loff_t size,
        func = read_idmap[read_id] ?: FILE_CHECK;
        security_task_getsecid(current, &secid);
        return process_measurement(file, current_cred(), secid, buf, size,
-                                  MAY_READ, func, 0);
+                                  MAY_READ, func);
 }
 
 static int __init init_ima(void)
index f7403821db7f0aafdec4a2e9a6804b1b8c2a599b..b203f7758f9765f056c3e0d07e0286c49b181253 100644 (file)
@@ -142,6 +142,8 @@ static void kdf_dealloc(struct kdf_sdesc *sdesc)
  * The src pointer is defined as Z || other info where Z is the shared secret
  * from DH and other info is an arbitrary string (see SP800-56A section
  * 5.8.1.2).
+ *
+ * 'dlen' must be a multiple of the digest size.
  */
 static int kdf_ctr(struct kdf_sdesc *sdesc, const u8 *src, unsigned int slen,
                   u8 *dst, unsigned int dlen, unsigned int zlen)
@@ -205,8 +207,8 @@ static int keyctl_dh_compute_kdf(struct kdf_sdesc *sdesc,
 {
        uint8_t *outbuf = NULL;
        int ret;
-       size_t outbuf_len = round_up(buflen,
-                                    crypto_shash_digestsize(sdesc->shash.tfm));
+       size_t outbuf_len = roundup(buflen,
+                                   crypto_shash_digestsize(sdesc->shash.tfm));
 
        outbuf = kmalloc(outbuf_len, GFP_KERNEL);
        if (!outbuf) {
index 68f46d849abea30d148dac3942d833c8ef80e114..5dce67070cdf3344b4ce65a228b80e9baff2dee4 100644 (file)
@@ -970,11 +970,11 @@ int security_file_receive(struct file *file)
        return call_int_hook(file_receive, 0, file);
 }
 
-int security_file_open(struct file *file, const struct cred *cred)
+int security_file_open(struct file *file)
 {
        int ret;
 
-       ret = call_int_hook(file_open, 0, file, cred);
+       ret = call_int_hook(file_open, 0, file);
        if (ret)
                return ret;
 
index 2b5ee5fbd652de22a951d7618e6daad3d7cb82f5..18006be157133c3df162fe0ebb42fc350c8f614a 100644 (file)
@@ -3862,7 +3862,7 @@ static int selinux_file_receive(struct file *file)
        return file_has_perm(cred, file, file_to_av(file));
 }
 
-static int selinux_file_open(struct file *file, const struct cred *cred)
+static int selinux_file_open(struct file *file)
 {
        struct file_security_struct *fsec;
        struct inode_security_struct *isec;
@@ -3886,7 +3886,7 @@ static int selinux_file_open(struct file *file, const struct cred *cred)
         * new inode label or new policy.
         * This check is not redundant - do not remove.
         */
-       return file_path_has_perm(cred, file, open_file_to_av(file));
+       return file_path_has_perm(file->f_cred, file, open_file_to_av(file));
 }
 
 /* task security operations */
index f3d374d2ca045ce7325b20ad3cecb6304418d1b3..79d3709b06717a1f6452fe85b9922244b9f70381 100644 (file)
@@ -441,22 +441,16 @@ static int sel_release_policy(struct inode *inode, struct file *filp)
 static ssize_t sel_read_policy(struct file *filp, char __user *buf,
                               size_t count, loff_t *ppos)
 {
-       struct selinux_fs_info *fsi = file_inode(filp)->i_sb->s_fs_info;
        struct policy_load_memory *plm = filp->private_data;
        int ret;
 
-       mutex_lock(&fsi->mutex);
-
        ret = avc_has_perm(&selinux_state,
                           current_sid(), SECINITSID_SECURITY,
                          SECCLASS_SECURITY, SECURITY__READ_POLICY, NULL);
        if (ret)
-               goto out;
+               return ret;
 
-       ret = simple_read_from_buffer(buf, count, ppos, plm->data, plm->len);
-out:
-       mutex_unlock(&fsi->mutex);
-       return ret;
+       return simple_read_from_buffer(buf, count, ppos, plm->data, plm->len);
 }
 
 static vm_fault_t sel_mmap_policy_fault(struct vm_fault *vmf)
@@ -1188,25 +1182,29 @@ static ssize_t sel_read_bool(struct file *filep, char __user *buf,
        ret = -EINVAL;
        if (index >= fsi->bool_num || strcmp(name,
                                             fsi->bool_pending_names[index]))
-               goto out;
+               goto out_unlock;
 
        ret = -ENOMEM;
        page = (char *)get_zeroed_page(GFP_KERNEL);
        if (!page)
-               goto out;
+               goto out_unlock;
 
        cur_enforcing = security_get_bool_value(fsi->state, index);
        if (cur_enforcing < 0) {
                ret = cur_enforcing;
-               goto out;
+               goto out_unlock;
        }
        length = scnprintf(page, PAGE_SIZE, "%d %d", cur_enforcing,
                          fsi->bool_pending_values[index]);
-       ret = simple_read_from_buffer(buf, count, ppos, page, length);
-out:
        mutex_unlock(&fsi->mutex);
+       ret = simple_read_from_buffer(buf, count, ppos, page, length);
+out_free:
        free_page((unsigned long)page);
        return ret;
+
+out_unlock:
+       mutex_unlock(&fsi->mutex);
+       goto out_free;
 }
 
 static ssize_t sel_write_bool(struct file *filep, const char __user *buf,
@@ -1219,6 +1217,17 @@ static ssize_t sel_write_bool(struct file *filep, const char __user *buf,
        unsigned index = file_inode(filep)->i_ino & SEL_INO_MASK;
        const char *name = filep->f_path.dentry->d_name.name;
 
+       if (count >= PAGE_SIZE)
+               return -ENOMEM;
+
+       /* No partial writes. */
+       if (*ppos != 0)
+               return -EINVAL;
+
+       page = memdup_user_nul(buf, count);
+       if (IS_ERR(page))
+               return PTR_ERR(page);
+
        mutex_lock(&fsi->mutex);
 
        length = avc_has_perm(&selinux_state,
@@ -1233,22 +1242,6 @@ static ssize_t sel_write_bool(struct file *filep, const char __user *buf,
                                             fsi->bool_pending_names[index]))
                goto out;
 
-       length = -ENOMEM;
-       if (count >= PAGE_SIZE)
-               goto out;
-
-       /* No partial writes. */
-       length = -EINVAL;
-       if (*ppos != 0)
-               goto out;
-
-       page = memdup_user_nul(buf, count);
-       if (IS_ERR(page)) {
-               length = PTR_ERR(page);
-               page = NULL;
-               goto out;
-       }
-
        length = -EINVAL;
        if (sscanf(page, "%d", &new_value) != 1)
                goto out;
@@ -1280,6 +1273,17 @@ static ssize_t sel_commit_bools_write(struct file *filep,
        ssize_t length;
        int new_value;
 
+       if (count >= PAGE_SIZE)
+               return -ENOMEM;
+
+       /* No partial writes. */
+       if (*ppos != 0)
+               return -EINVAL;
+
+       page = memdup_user_nul(buf, count);
+       if (IS_ERR(page))
+               return PTR_ERR(page);
+
        mutex_lock(&fsi->mutex);
 
        length = avc_has_perm(&selinux_state,
@@ -1289,22 +1293,6 @@ static ssize_t sel_commit_bools_write(struct file *filep,
        if (length)
                goto out;
 
-       length = -ENOMEM;
-       if (count >= PAGE_SIZE)
-               goto out;
-
-       /* No partial writes. */
-       length = -EINVAL;
-       if (*ppos != 0)
-               goto out;
-
-       page = memdup_user_nul(buf, count);
-       if (IS_ERR(page)) {
-               length = PTR_ERR(page);
-               page = NULL;
-               goto out;
-       }
-
        length = -EINVAL;
        if (sscanf(page, "%d", &new_value) != 1)
                goto out;
index 7ad226018f51674b8e97a5d6ff2aaeabd9163bbd..9ab8097dab7c2204b1f478e6660fd83491d83d91 100644 (file)
@@ -1927,9 +1927,9 @@ static int smack_file_receive(struct file *file)
  *
  * Returns 0
  */
-static int smack_file_open(struct file *file, const struct cred *cred)
+static int smack_file_open(struct file *file)
 {
-       struct task_smack *tsp = cred->security;
+       struct task_smack *tsp = file->f_cred->security;
        struct inode *inode = file_inode(file);
        struct smk_audit_info ad;
        int rc;
@@ -1937,7 +1937,7 @@ static int smack_file_open(struct file *file, const struct cred *cred)
        smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_PATH);
        smk_ad_setfield_u_fs_path(&ad, file->f_path);
        rc = smk_tskacc(tsp, smk_of_inode(inode), MAY_READ, &ad);
-       rc = smk_bu_credfile(cred, file, MAY_READ, rc);
+       rc = smk_bu_credfile(file->f_cred, file, MAY_READ, rc);
 
        return rc;
 }
@@ -2296,6 +2296,7 @@ static void smack_task_to_inode(struct task_struct *p, struct inode *inode)
        struct smack_known *skp = smk_of_task_struct(p);
 
        isp->smk_inode = skp;
+       isp->smk_flags |= SMK_INODE_INSTANT;
 }
 
 /*
index 213b8c593668f260a44e9a6993c42f7a68eb4575..9f932e2d68521c2da50be376cb41c51d700425f4 100644 (file)
@@ -320,7 +320,7 @@ static int tomoyo_file_fcntl(struct file *file, unsigned int cmd,
  *
  * Returns 0 on success, negative value otherwise.
  */
-static int tomoyo_file_open(struct file *f, const struct cred *cred)
+static int tomoyo_file_open(struct file *f)
 {
        int flags = f->f_flags;
        /* Don't check read permission here if called from do_execve(). */
index 69616d00481c2cdff6331d8406bb18550db7824d..b53026a72e734e29f63a76eb711458013fa70290 100644 (file)
@@ -635,7 +635,7 @@ static int snd_rawmidi_info_select_user(struct snd_card *card,
 int snd_rawmidi_output_params(struct snd_rawmidi_substream *substream,
                              struct snd_rawmidi_params * params)
 {
-       char *newbuf;
+       char *newbuf, *oldbuf;
        struct snd_rawmidi_runtime *runtime = substream->runtime;
        
        if (substream->append && substream->use_count > 1)
@@ -648,13 +648,17 @@ int snd_rawmidi_output_params(struct snd_rawmidi_substream *substream,
                return -EINVAL;
        }
        if (params->buffer_size != runtime->buffer_size) {
-               newbuf = krealloc(runtime->buffer, params->buffer_size,
-                                 GFP_KERNEL);
+               newbuf = kmalloc(params->buffer_size, GFP_KERNEL);
                if (!newbuf)
                        return -ENOMEM;
+               spin_lock_irq(&runtime->lock);
+               oldbuf = runtime->buffer;
                runtime->buffer = newbuf;
                runtime->buffer_size = params->buffer_size;
                runtime->avail = runtime->buffer_size;
+               runtime->appl_ptr = runtime->hw_ptr = 0;
+               spin_unlock_irq(&runtime->lock);
+               kfree(oldbuf);
        }
        runtime->avail_min = params->avail_min;
        substream->active_sensing = !params->no_active_sensing;
@@ -665,7 +669,7 @@ EXPORT_SYMBOL(snd_rawmidi_output_params);
 int snd_rawmidi_input_params(struct snd_rawmidi_substream *substream,
                             struct snd_rawmidi_params * params)
 {
-       char *newbuf;
+       char *newbuf, *oldbuf;
        struct snd_rawmidi_runtime *runtime = substream->runtime;
 
        snd_rawmidi_drain_input(substream);
@@ -676,12 +680,16 @@ int snd_rawmidi_input_params(struct snd_rawmidi_substream *substream,
                return -EINVAL;
        }
        if (params->buffer_size != runtime->buffer_size) {
-               newbuf = krealloc(runtime->buffer, params->buffer_size,
-                                 GFP_KERNEL);
+               newbuf = kmalloc(params->buffer_size, GFP_KERNEL);
                if (!newbuf)
                        return -ENOMEM;
+               spin_lock_irq(&runtime->lock);
+               oldbuf = runtime->buffer;
                runtime->buffer = newbuf;
                runtime->buffer_size = params->buffer_size;
+               runtime->appl_ptr = runtime->hw_ptr = 0;
+               spin_unlock_irq(&runtime->lock);
+               kfree(oldbuf);
        }
        runtime->avail_min = params->avail_min;
        return 0;
index 61a07fe34cd271e60dc0c31a7dddae750c2532b1..56ca78423040f09e6d0569b651ba631105b8bd02 100644 (file)
@@ -2004,7 +2004,8 @@ static int snd_seq_ioctl_query_next_client(struct snd_seq_client *client,
        struct snd_seq_client *cptr = NULL;
 
        /* search for next client */
-       info->client++;
+       if (info->client < INT_MAX)
+               info->client++;
        if (info->client < 0)
                info->client = 0;
        for (; info->client < SNDRV_SEQ_MAX_CLIENTS; info->client++) {
index 665089c455603c0c683144c419981f8215780e85..b6f076bbc72d14be37893e20b19cbfaedf2f728b 100644 (file)
@@ -1520,7 +1520,7 @@ static int snd_timer_user_next_device(struct snd_timer_id __user *_tid)
                                } else {
                                        if (id.subdevice < 0)
                                                id.subdevice = 0;
-                                       else
+                                       else if (id.subdevice < INT_MAX)
                                                id.subdevice++;
                                }
                        }
index d91c87e41756ea5fceaee73d84e211b9ebba929d..20a171ac4bb2f7cff9715122c4e959c60fc9b485 100644 (file)
@@ -2899,8 +2899,9 @@ static int hda_codec_runtime_suspend(struct device *dev)
        list_for_each_entry(pcm, &codec->pcm_list_head, list)
                snd_pcm_suspend_all(pcm->pcm);
        state = hda_call_codec_suspend(codec);
-       if (codec_has_clkstop(codec) && codec_has_epss(codec) &&
-           (state & AC_PWRST_CLK_STOP_OK))
+       if (codec->link_down_at_suspend ||
+           (codec_has_clkstop(codec) && codec_has_epss(codec) &&
+            (state & AC_PWRST_CLK_STOP_OK)))
                snd_hdac_codec_link_down(&codec->core);
        snd_hdac_link_power(&codec->core, false);
        return 0;
index 681c360f29f9d628cf4462c9bb7ef92879f27d91..a8b1b31f161c26f739892ea6b52e79ba2ebca291 100644 (file)
@@ -258,6 +258,7 @@ struct hda_codec {
        unsigned int power_save_node:1; /* advanced PM for each widget */
        unsigned int auto_runtime_pm:1; /* enable automatic codec runtime pm */
        unsigned int force_pin_prefix:1; /* Add location prefix */
+       unsigned int link_down_at_suspend:1; /* link down at runtime suspend */
 #ifdef CONFIG_PM
        unsigned long power_on_acct;
        unsigned long power_off_acct;
index 04e949aa01ada5492765cd313608624f3a42c7b9..321e95c409c1427ddd5a56f802101fc9fe248bff 100644 (file)
@@ -991,6 +991,7 @@ struct ca0132_spec {
 enum {
        QUIRK_NONE,
        QUIRK_ALIENWARE,
+       QUIRK_ALIENWARE_M17XR4,
        QUIRK_SBZ,
        QUIRK_R3DI,
 };
@@ -1040,13 +1041,15 @@ static const struct hda_pintbl r3di_pincfgs[] = {
 };
 
 static const struct snd_pci_quirk ca0132_quirks[] = {
+       SND_PCI_QUIRK(0x1028, 0x057b, "Alienware M17x R4", QUIRK_ALIENWARE_M17XR4),
        SND_PCI_QUIRK(0x1028, 0x0685, "Alienware 15 2015", QUIRK_ALIENWARE),
        SND_PCI_QUIRK(0x1028, 0x0688, "Alienware 17 2015", QUIRK_ALIENWARE),
        SND_PCI_QUIRK(0x1028, 0x0708, "Alienware 15 R2 2016", QUIRK_ALIENWARE),
        SND_PCI_QUIRK(0x1102, 0x0010, "Sound Blaster Z", QUIRK_SBZ),
        SND_PCI_QUIRK(0x1102, 0x0023, "Sound Blaster Z", QUIRK_SBZ),
        SND_PCI_QUIRK(0x1458, 0xA016, "Recon3Di", QUIRK_R3DI),
-       SND_PCI_QUIRK(0x1458, 0xA036, "Recon3Di", QUIRK_R3DI),
+       SND_PCI_QUIRK(0x1458, 0xA026, "Gigabyte G1.Sniper Z97", QUIRK_R3DI),
+       SND_PCI_QUIRK(0x1458, 0xA036, "Gigabyte GA-Z170X-Gaming 7", QUIRK_R3DI),
        {}
 };
 
@@ -5663,7 +5666,7 @@ static const char * const ca0132_alt_slave_pfxs[] = {
  * I think this has to do with the pin for rear surround being 0x11,
  * and the center/lfe being 0x10. Usually the pin order is the opposite.
  */
-const struct snd_pcm_chmap_elem ca0132_alt_chmaps[] = {
+static const struct snd_pcm_chmap_elem ca0132_alt_chmaps[] = {
        { .channels = 2,
          .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR } },
        { .channels = 4,
@@ -5966,7 +5969,7 @@ static int ca0132_build_pcms(struct hda_codec *codec)
        info->stream[SNDRV_PCM_STREAM_CAPTURE].nid = spec->adcs[0];
 
        /* With the DSP enabled, desktops don't use this ADC. */
-       if (spec->use_alt_functions) {
+       if (!spec->use_alt_functions) {
                info = snd_hda_codec_pcm_new(codec, "CA0132 Analog Mic-In2");
                if (!info)
                        return -ENOMEM;
@@ -6130,7 +6133,10 @@ static void ca0132_init_dmic(struct hda_codec *codec)
         * Bit   6: set to select Data2, clear for Data1
         * Bit   7: set to enable DMic, clear for AMic
         */
-       val = 0x23;
+       if (spec->quirk == QUIRK_ALIENWARE_M17XR4)
+               val = 0x33;
+       else
+               val = 0x23;
        /* keep a copy of dmic ctl val for enable/disable dmic purpuse */
        spec->dmic_ctl = val;
        snd_hda_codec_write(codec, spec->input_pins[0], 0,
@@ -7223,7 +7229,7 @@ static int ca0132_init(struct hda_codec *codec)
 
        snd_hda_sequence_write(codec, spec->base_init_verbs);
 
-       if (spec->quirk != QUIRK_NONE)
+       if (spec->use_alt_functions)
                ca0132_alt_init(codec);
 
        ca0132_download_dsp(codec);
@@ -7237,8 +7243,9 @@ static int ca0132_init(struct hda_codec *codec)
        case QUIRK_R3DI:
                r3di_setup_defaults(codec);
                break;
-       case QUIRK_NONE:
-       case QUIRK_ALIENWARE:
+       case QUIRK_SBZ:
+               break;
+       default:
                ca0132_setup_defaults(codec);
                ca0132_init_analog_mic2(codec);
                ca0132_init_dmic(codec);
@@ -7343,7 +7350,6 @@ static const struct hda_codec_ops ca0132_patch_ops = {
 static void ca0132_config(struct hda_codec *codec)
 {
        struct ca0132_spec *spec = codec->spec;
-       struct auto_pin_cfg *cfg = &spec->autocfg;
 
        spec->dacs[0] = 0x2;
        spec->dacs[1] = 0x3;
@@ -7405,12 +7411,7 @@ static void ca0132_config(struct hda_codec *codec)
                /* SPDIF I/O */
                spec->dig_out = 0x05;
                spec->multiout.dig_out_nid = spec->dig_out;
-               cfg->dig_out_pins[0] = 0x0c;
-               cfg->dig_outs = 1;
-               cfg->dig_out_type[0] = HDA_PCM_TYPE_SPDIF;
                spec->dig_in = 0x09;
-               cfg->dig_in_pin = 0x0e;
-               cfg->dig_in_type = HDA_PCM_TYPE_SPDIF;
                break;
        case QUIRK_R3DI:
                codec_dbg(codec, "%s: QUIRK_R3DI applied.\n", __func__);
@@ -7438,9 +7439,6 @@ static void ca0132_config(struct hda_codec *codec)
                /* SPDIF I/O */
                spec->dig_out = 0x05;
                spec->multiout.dig_out_nid = spec->dig_out;
-               cfg->dig_out_pins[0] = 0x0c;
-               cfg->dig_outs = 1;
-               cfg->dig_out_type[0] = HDA_PCM_TYPE_SPDIF;
                break;
        default:
                spec->num_outputs = 2;
@@ -7463,12 +7461,7 @@ static void ca0132_config(struct hda_codec *codec)
                /* SPDIF I/O */
                spec->dig_out = 0x05;
                spec->multiout.dig_out_nid = spec->dig_out;
-               cfg->dig_out_pins[0] = 0x0c;
-               cfg->dig_outs = 1;
-               cfg->dig_out_type[0] = HDA_PCM_TYPE_SPDIF;
                spec->dig_in = 0x09;
-               cfg->dig_in_pin = 0x0e;
-               cfg->dig_in_type = HDA_PCM_TYPE_SPDIF;
                break;
        }
 }
@@ -7476,7 +7469,7 @@ static void ca0132_config(struct hda_codec *codec)
 static int ca0132_prepare_verbs(struct hda_codec *codec)
 {
 /* Verbs + terminator (an empty element) */
-#define NUM_SPEC_VERBS 4
+#define NUM_SPEC_VERBS 2
        struct ca0132_spec *spec = codec->spec;
 
        spec->chip_init_verbs = ca0132_init_verbs0;
@@ -7488,34 +7481,24 @@ static int ca0132_prepare_verbs(struct hda_codec *codec)
        if (!spec->spec_init_verbs)
                return -ENOMEM;
 
-       /* HP jack autodetection */
-       spec->spec_init_verbs[0].nid = spec->unsol_tag_hp;
-       spec->spec_init_verbs[0].param = AC_VERB_SET_UNSOLICITED_ENABLE;
-       spec->spec_init_verbs[0].verb = AC_USRSP_EN | spec->unsol_tag_hp;
-
-       /* MIC1 jack autodetection */
-       spec->spec_init_verbs[1].nid = spec->unsol_tag_amic1;
-       spec->spec_init_verbs[1].param = AC_VERB_SET_UNSOLICITED_ENABLE;
-       spec->spec_init_verbs[1].verb = AC_USRSP_EN | spec->unsol_tag_amic1;
-
        /* config EAPD */
-       spec->spec_init_verbs[2].nid = 0x0b;
-       spec->spec_init_verbs[2].param = 0x78D;
-       spec->spec_init_verbs[2].verb = 0x00;
+       spec->spec_init_verbs[0].nid = 0x0b;
+       spec->spec_init_verbs[0].param = 0x78D;
+       spec->spec_init_verbs[0].verb = 0x00;
 
        /* Previously commented configuration */
        /*
-       spec->spec_init_verbs[3].nid = 0x0b;
-       spec->spec_init_verbs[3].param = AC_VERB_SET_EAPD_BTLENABLE;
+       spec->spec_init_verbs[2].nid = 0x0b;
+       spec->spec_init_verbs[2].param = AC_VERB_SET_EAPD_BTLENABLE;
+       spec->spec_init_verbs[2].verb = 0x02;
+
+       spec->spec_init_verbs[3].nid = 0x10;
+       spec->spec_init_verbs[3].param = 0x78D;
        spec->spec_init_verbs[3].verb = 0x02;
 
        spec->spec_init_verbs[4].nid = 0x10;
-       spec->spec_init_verbs[4].param = 0x78D;
+       spec->spec_init_verbs[4].param = AC_VERB_SET_EAPD_BTLENABLE;
        spec->spec_init_verbs[4].verb = 0x02;
-
-       spec->spec_init_verbs[5].nid = 0x10;
-       spec->spec_init_verbs[5].param = AC_VERB_SET_EAPD_BTLENABLE;
-       spec->spec_init_verbs[5].verb = 0x02;
        */
 
        /* Terminator: spec->spec_init_verbs[NUM_SPEC_VERBS-1] */
index e7fcfc3b8885fb7470dc1b10a49f305f7bca323d..f641c20095f71bb93edef945be21cbc141de280f 100644 (file)
@@ -964,6 +964,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
        SND_PCI_QUIRK(0x103c, 0x8115, "HP Z1 Gen3", CXT_FIXUP_HP_GATE_MIC),
        SND_PCI_QUIRK(0x103c, 0x814f, "HP ZBook 15u G3", CXT_FIXUP_MUTE_LED_GPIO),
        SND_PCI_QUIRK(0x103c, 0x822e, "HP ProBook 440 G4", CXT_FIXUP_MUTE_LED_GPIO),
+       SND_PCI_QUIRK(0x103c, 0x836e, "HP ProBook 455 G5", CXT_FIXUP_MUTE_LED_GPIO),
        SND_PCI_QUIRK(0x103c, 0x8299, "HP 800 G3 SFF", CXT_FIXUP_HP_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x103c, 0x829a, "HP 800 G3 DM", CXT_FIXUP_HP_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x103c, 0x8455, "HP Z2 G4", CXT_FIXUP_HP_MIC_NO_PRESENCE),
index 8840daf9c6a300899efaf02898430d158d5b972a..8a49415aebacb79cd3da6b90b1a34e041da02bbd 100644 (file)
@@ -33,6 +33,7 @@
 #include <linux/delay.h>
 #include <linux/slab.h>
 #include <linux/module.h>
+#include <linux/pm_runtime.h>
 #include <sound/core.h>
 #include <sound/jack.h>
 #include <sound/asoundef.h>
@@ -764,8 +765,10 @@ static void check_presence_and_report(struct hda_codec *codec, hda_nid_t nid,
 
        if (pin_idx < 0)
                return;
+       mutex_lock(&spec->pcm_lock);
        if (hdmi_present_sense(get_pin(spec, pin_idx), 1))
                snd_hda_jack_report_sync(codec);
+       mutex_unlock(&spec->pcm_lock);
 }
 
 static void jack_callback(struct hda_codec *codec,
@@ -1628,21 +1631,23 @@ static void sync_eld_via_acomp(struct hda_codec *codec,
 static bool hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, int repoll)
 {
        struct hda_codec *codec = per_pin->codec;
-       struct hdmi_spec *spec = codec->spec;
        int ret;
 
        /* no temporary power up/down needed for component notifier */
-       if (!codec_has_acomp(codec))
-               snd_hda_power_up_pm(codec);
+       if (!codec_has_acomp(codec)) {
+               ret = snd_hda_power_up_pm(codec);
+               if (ret < 0 && pm_runtime_suspended(hda_codec_dev(codec))) {
+                       snd_hda_power_down_pm(codec);
+                       return false;
+               }
+       }
 
-       mutex_lock(&spec->pcm_lock);
        if (codec_has_acomp(codec)) {
                sync_eld_via_acomp(codec, per_pin);
                ret = false; /* don't call snd_hda_jack_report_sync() */
        } else {
                ret = hdmi_present_sense_via_verbs(per_pin, repoll);
        }
-       mutex_unlock(&spec->pcm_lock);
 
        if (!codec_has_acomp(codec))
                snd_hda_power_down_pm(codec);
@@ -1654,12 +1659,16 @@ static void hdmi_repoll_eld(struct work_struct *work)
 {
        struct hdmi_spec_per_pin *per_pin =
        container_of(to_delayed_work(work), struct hdmi_spec_per_pin, work);
+       struct hda_codec *codec = per_pin->codec;
+       struct hdmi_spec *spec = codec->spec;
 
        if (per_pin->repoll_count++ > 6)
                per_pin->repoll_count = 0;
 
+       mutex_lock(&spec->pcm_lock);
        if (hdmi_present_sense(per_pin, per_pin->repoll_count))
                snd_hda_jack_report_sync(per_pin->codec);
+       mutex_unlock(&spec->pcm_lock);
 }
 
 static void intel_haswell_fixup_connect_list(struct hda_codec *codec,
@@ -3741,6 +3750,11 @@ static int patch_atihdmi(struct hda_codec *codec)
 
        spec->chmap.channels_max = max(spec->chmap.channels_max, 8u);
 
+       /* AMD GPUs have neither EPSS nor CLKSTOP bits, hence preventing
+        * the link-down as is.  Tell the core to allow it.
+        */
+       codec->link_down_at_suspend = 1;
+
        return 0;
 }
 
index e9bd33ea538f239891c031a1a81e075a35c75043..f6af3e1c2b932d34c1de567b4229b0eb686af637 100644 (file)
@@ -2366,6 +2366,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
        SND_PCI_QUIRK_VENDOR(0x1462, "MSI", ALC882_FIXUP_GPIO3),
        SND_PCI_QUIRK(0x147b, 0x107a, "Abit AW9D-MAX", ALC882_FIXUP_ABIT_AW9D_MAX),
        SND_PCI_QUIRK(0x1558, 0x9501, "Clevo P950HR", ALC1220_FIXUP_CLEVO_P950),
+       SND_PCI_QUIRK(0x1558, 0x95e1, "Clevo P95xER", ALC1220_FIXUP_CLEVO_P950),
        SND_PCI_QUIRK(0x1558, 0x95e2, "Clevo P950ER", ALC1220_FIXUP_CLEVO_P950),
        SND_PCI_QUIRK_VENDOR(0x1558, "Clevo laptop", ALC882_FIXUP_EAPD),
        SND_PCI_QUIRK(0x161f, 0x2054, "Medion laptop", ALC883_FIXUP_EAPD),
@@ -2545,6 +2546,7 @@ static const struct snd_pci_quirk alc262_fixup_tbl[] = {
        SND_PCI_QUIRK(0x10cf, 0x1397, "Fujitsu Lifebook S7110", ALC262_FIXUP_FSC_S7110),
        SND_PCI_QUIRK(0x10cf, 0x142d, "Fujitsu Lifebook E8410", ALC262_FIXUP_BENQ),
        SND_PCI_QUIRK(0x10f1, 0x2915, "Tyan Thunder n6650W", ALC262_FIXUP_TYAN),
+       SND_PCI_QUIRK(0x1734, 0x1141, "FSC ESPRIMO U9210", ALC262_FIXUP_FSC_H270),
        SND_PCI_QUIRK(0x1734, 0x1147, "FSC Celsius H270", ALC262_FIXUP_FSC_H270),
        SND_PCI_QUIRK(0x17aa, 0x384e, "Lenovo 3000", ALC262_FIXUP_LENOVO_3000),
        SND_PCI_QUIRK(0x17ff, 0x0560, "Benq ED8", ALC262_FIXUP_BENQ),
@@ -4995,7 +4997,6 @@ static void alc_fixup_tpt440_dock(struct hda_codec *codec,
        struct alc_spec *spec = codec->spec;
 
        if (action == HDA_FIXUP_ACT_PRE_PROBE) {
-               spec->shutup = alc_no_shutup; /* reduce click noise */
                spec->reboot_notify = alc_d3_at_reboot; /* reduce noise */
                spec->parse_flags = HDA_PINCFG_NO_HP_FIXUP;
                codec->power_save_node = 0; /* avoid click noises */
@@ -5394,6 +5395,13 @@ static void alc274_fixup_bind_dacs(struct hda_codec *codec,
 /* for hda_fixup_thinkpad_acpi() */
 #include "thinkpad_helper.c"
 
+static void alc_fixup_thinkpad_acpi(struct hda_codec *codec,
+                                   const struct hda_fixup *fix, int action)
+{
+       alc_fixup_no_shutup(codec, fix, action); /* reduce click noise */
+       hda_fixup_thinkpad_acpi(codec, fix, action);
+}
+
 /* for dell wmi mic mute led */
 #include "dell_wmi_helper.c"
 
@@ -5946,7 +5954,7 @@ static const struct hda_fixup alc269_fixups[] = {
        },
        [ALC269_FIXUP_THINKPAD_ACPI] = {
                .type = HDA_FIXUP_FUNC,
-               .v.func = hda_fixup_thinkpad_acpi,
+               .v.func = alc_fixup_thinkpad_acpi,
                .chained = true,
                .chain_id = ALC269_FIXUP_SKU_IGNORE,
        },
@@ -6562,6 +6570,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x10cf, 0x1629, "Lifebook U7x7", ALC255_FIXUP_LIFEBOOK_U7x7_HEADSET_MIC),
        SND_PCI_QUIRK(0x10cf, 0x1845, "Lifebook U904", ALC269_FIXUP_LIFEBOOK_EXTMIC),
        SND_PCI_QUIRK(0x10ec, 0x10f2, "Intel Reference board", ALC700_FIXUP_INTEL_REFERENCE),
+       SND_PCI_QUIRK(0x10f7, 0x8338, "Panasonic CF-SZ6", ALC269_FIXUP_HEADSET_MODE),
        SND_PCI_QUIRK(0x144d, 0xc109, "Samsung Ativ book 9 (NP900X3G)", ALC269_FIXUP_INV_DMIC),
        SND_PCI_QUIRK(0x144d, 0xc740, "Samsung Ativ book 8 (NP870Z5G)", ALC269_FIXUP_ATIV_BOOK_8),
        SND_PCI_QUIRK(0x1458, 0xfa53, "Gigabyte BXBT-2807", ALC283_FIXUP_HEADSET_MIC),
@@ -6603,8 +6612,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
        SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
        SND_PCI_QUIRK(0x17aa, 0x310c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
+       SND_PCI_QUIRK(0x17aa, 0x312a, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
        SND_PCI_QUIRK(0x17aa, 0x312f, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
-       SND_PCI_QUIRK(0x17aa, 0x3138, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
        SND_PCI_QUIRK(0x17aa, 0x313c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
        SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
        SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC),
@@ -6782,6 +6791,17 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
                {0x14, 0x90170110},
                {0x19, 0x02a11030},
                {0x21, 0x02211020}),
+       SND_HDA_PIN_QUIRK(0x10ec0235, 0x17aa, "Lenovo", ALC294_FIXUP_LENOVO_MIC_LOCATION,
+               {0x14, 0x90170110},
+               {0x19, 0x02a11030},
+               {0x1a, 0x02a11040},
+               {0x1b, 0x01014020},
+               {0x21, 0x0221101f}),
+       SND_HDA_PIN_QUIRK(0x10ec0235, 0x17aa, "Lenovo", ALC294_FIXUP_LENOVO_MIC_LOCATION,
+               {0x14, 0x90170110},
+               {0x19, 0x02a11020},
+               {0x1a, 0x02a11030},
+               {0x21, 0x0221101f}),
        SND_HDA_PIN_QUIRK(0x10ec0236, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
                {0x12, 0x90a60140},
                {0x14, 0x90170110},
index 6c85f13ab23f17f7ef4031f5f73797fb383584a7..54f6252faca684b23ef91617318c8ae11e3de04e 100644 (file)
@@ -1018,6 +1018,7 @@ static int snd_lx6464es_create(struct snd_card *card,
        chip->port_dsp_bar = pci_ioremap_bar(pci, 2);
        if (!chip->port_dsp_bar) {
                dev_err(card->dev, "cannot remap PCI memory region\n");
+               err = -ENOMEM;
                goto remap_pci_failed;
        }
 
index caae4843cb7001fbee1fa9b222850df7006850fb..16e006f708ca0cbd44a63135bb996b8db7c3ba9e 100644 (file)
@@ -91,6 +91,7 @@ struct kvm_regs {
 #define KVM_VGIC_V3_ADDR_TYPE_DIST     2
 #define KVM_VGIC_V3_ADDR_TYPE_REDIST   3
 #define KVM_VGIC_ITS_ADDR_TYPE         4
+#define KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION    5
 
 #define KVM_VGIC_V3_DIST_SIZE          SZ_64K
 #define KVM_VGIC_V3_REDIST_SIZE                (2 * SZ_64K)
index 04b3256f8e6d5f8e3e368b043f0fdcfeb7c23164..4e76630dd6554673d71ad647c1108bb54f1bcea2 100644 (file)
@@ -91,6 +91,7 @@ struct kvm_regs {
 #define KVM_VGIC_V3_ADDR_TYPE_DIST     2
 #define KVM_VGIC_V3_ADDR_TYPE_REDIST   3
 #define KVM_VGIC_ITS_ADDR_TYPE         4
+#define KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION    5
 
 #define KVM_VGIC_V3_DIST_SIZE          SZ_64K
 #define KVM_VGIC_V3_REDIST_SIZE                (2 * SZ_64K)
diff --git a/tools/arch/arm64/include/uapi/asm/unistd.h b/tools/arch/arm64/include/uapi/asm/unistd.h
new file mode 100644 (file)
index 0000000..5072cbd
--- /dev/null
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Copyright (C) 2012 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define __ARCH_WANT_RENAMEAT
+
+#include <asm-generic/unistd.h>
index fc0df353ff0da5f8bd6deb8d69f6aa55e631dc7a..87245c584784ec1f0f877fbe0be54ee136df5456 100644 (file)
 #define        ELOOP           249     /* Too many symbolic links encountered */
 #define        ENOSYS          251     /* Function not implemented */
 
-#define ENOTSUP                252     /* Function not implemented (POSIX.4 / HPUX) */
 #define ECANCELLED     253     /* aio request was canceled before complete (POSIX.4 / HPUX) */
 #define ECANCELED      ECANCELLED      /* SuSv3 and Solaris wants one 'L' */
 
index 833ed9a16adfd03e0b6cb70adc19fe03055f7344..1b32b56a03d34ce2a5f0b7f79c621f87d8c89dbf 100644 (file)
@@ -633,6 +633,7 @@ struct kvm_ppc_cpu_char {
 #define KVM_REG_PPC_PSSCR      (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xbd)
 
 #define KVM_REG_PPC_DEC_EXPIRY (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xbe)
+#define KVM_REG_PPC_ONLINE     (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xbf)
 
 /* Transactional Memory checkpointed state:
  * This is all GPRs, all VSX regs and a subset of SPRs
index 389c36fd82990f3f6b390342f56375ac0067054a..985534d0b448b7ae7b9d4cad7c3f9257d4ce0789 100644 (file)
 #define __NR_pkey_alloc                384
 #define __NR_pkey_free         385
 #define __NR_pkey_mprotect     386
+#define __NR_rseq              387
+#define __NR_io_pgetevents     388
 
 #endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */
index fb00a2fca9901eb02ea7b730ddbac957e8ecc947..5701f5cecd3125fbce64ead21d89d02fc8fa25af 100644 (file)
 #define X86_FEATURE_AMD_IBPB           (13*32+12) /* "" Indirect Branch Prediction Barrier */
 #define X86_FEATURE_AMD_IBRS           (13*32+14) /* "" Indirect Branch Restricted Speculation */
 #define X86_FEATURE_AMD_STIBP          (13*32+15) /* "" Single Thread Indirect Branch Predictors */
+#define X86_FEATURE_AMD_SSBD           (13*32+24) /* "" Speculative Store Bypass Disable */
 #define X86_FEATURE_VIRT_SSBD          (13*32+25) /* Virtualized Speculative Store Bypass Disable */
+#define X86_FEATURE_AMD_SSB_NO         (13*32+26) /* "" Speculative Store Bypass is fixed in hardware. */
 
 /* Thermal and Power Management Leaf, CPUID level 0x00000006 (EAX), word 14 */
 #define X86_FEATURE_DTHERM             (14*32+ 0) /* Digital Thermal Sensor */
diff --git a/tools/arch/x86/include/asm/mcsafe_test.h b/tools/arch/x86/include/asm/mcsafe_test.h
new file mode 100644 (file)
index 0000000..2ccd588
--- /dev/null
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _MCSAFE_TEST_H_
+#define _MCSAFE_TEST_H_
+
+.macro MCSAFE_TEST_CTL
+.endm
+
+.macro MCSAFE_TEST_SRC reg count target
+.endm
+
+.macro MCSAFE_TEST_DST reg count target
+.endm
+#endif /* _MCSAFE_TEST_H_ */
index 9a53a06e5a3efcb62f9563a6161fd98bbc22d617..298ef1479240b6b899fb4185a5204d5ad56b8785 100644 (file)
@@ -3,6 +3,7 @@
 #include <linux/linkage.h>
 #include <asm/errno.h>
 #include <asm/cpufeatures.h>
+#include <asm/mcsafe_test.h>
 #include <asm/alternative-asm.h>
 #include <asm/export.h>
 
@@ -183,12 +184,15 @@ ENTRY(memcpy_orig)
 ENDPROC(memcpy_orig)
 
 #ifndef CONFIG_UML
+
+MCSAFE_TEST_CTL
+
 /*
- * memcpy_mcsafe_unrolled - memory copy with machine check exception handling
+ * __memcpy_mcsafe - memory copy with machine check exception handling
  * Note that we only catch machine checks when reading the source addresses.
  * Writes to target are posted and don't generate machine checks.
  */
-ENTRY(memcpy_mcsafe_unrolled)
+ENTRY(__memcpy_mcsafe)
        cmpl $8, %edx
        /* Less than 8 bytes? Go to byte copy loop */
        jb .L_no_whole_words
@@ -204,58 +208,33 @@ ENTRY(memcpy_mcsafe_unrolled)
        subl $8, %ecx
        negl %ecx
        subl %ecx, %edx
-.L_copy_leading_bytes:
+.L_read_leading_bytes:
        movb (%rsi), %al
+       MCSAFE_TEST_SRC %rsi 1 .E_leading_bytes
+       MCSAFE_TEST_DST %rdi 1 .E_leading_bytes
+.L_write_leading_bytes:
        movb %al, (%rdi)
        incq %rsi
        incq %rdi
        decl %ecx
-       jnz .L_copy_leading_bytes
+       jnz .L_read_leading_bytes
 
 .L_8byte_aligned:
-       /* Figure out how many whole cache lines (64-bytes) to copy */
-       movl %edx, %ecx
-       andl $63, %edx
-       shrl $6, %ecx
-       jz .L_no_whole_cache_lines
-
-       /* Loop copying whole cache lines */
-.L_cache_w0: movq (%rsi), %r8
-.L_cache_w1: movq 1*8(%rsi), %r9
-.L_cache_w2: movq 2*8(%rsi), %r10
-.L_cache_w3: movq 3*8(%rsi), %r11
-       movq %r8, (%rdi)
-       movq %r9, 1*8(%rdi)
-       movq %r10, 2*8(%rdi)
-       movq %r11, 3*8(%rdi)
-.L_cache_w4: movq 4*8(%rsi), %r8
-.L_cache_w5: movq 5*8(%rsi), %r9
-.L_cache_w6: movq 6*8(%rsi), %r10
-.L_cache_w7: movq 7*8(%rsi), %r11
-       movq %r8, 4*8(%rdi)
-       movq %r9, 5*8(%rdi)
-       movq %r10, 6*8(%rdi)
-       movq %r11, 7*8(%rdi)
-       leaq 64(%rsi), %rsi
-       leaq 64(%rdi), %rdi
-       decl %ecx
-       jnz .L_cache_w0
-
-       /* Are there any trailing 8-byte words? */
-.L_no_whole_cache_lines:
        movl %edx, %ecx
        andl $7, %edx
        shrl $3, %ecx
        jz .L_no_whole_words
 
-       /* Copy trailing words */
-.L_copy_trailing_words:
+.L_read_words:
        movq (%rsi), %r8
-       mov %r8, (%rdi)
-       leaq 8(%rsi), %rsi
-       leaq 8(%rdi), %rdi
+       MCSAFE_TEST_SRC %rsi 8 .E_read_words
+       MCSAFE_TEST_DST %rdi 8 .E_write_words
+.L_write_words:
+       movq %r8, (%rdi)
+       addq $8, %rsi
+       addq $8, %rdi
        decl %ecx
-       jnz .L_copy_trailing_words
+       jnz .L_read_words
 
        /* Any trailing bytes? */
 .L_no_whole_words:
@@ -264,38 +243,55 @@ ENTRY(memcpy_mcsafe_unrolled)
 
        /* Copy trailing bytes */
        movl %edx, %ecx
-.L_copy_trailing_bytes:
+.L_read_trailing_bytes:
        movb (%rsi), %al
+       MCSAFE_TEST_SRC %rsi 1 .E_trailing_bytes
+       MCSAFE_TEST_DST %rdi 1 .E_trailing_bytes
+.L_write_trailing_bytes:
        movb %al, (%rdi)
        incq %rsi
        incq %rdi
        decl %ecx
-       jnz .L_copy_trailing_bytes
+       jnz .L_read_trailing_bytes
 
        /* Copy successful. Return zero */
 .L_done_memcpy_trap:
        xorq %rax, %rax
        ret
-ENDPROC(memcpy_mcsafe_unrolled)
-EXPORT_SYMBOL_GPL(memcpy_mcsafe_unrolled)
+ENDPROC(__memcpy_mcsafe)
+EXPORT_SYMBOL_GPL(__memcpy_mcsafe)
 
        .section .fixup, "ax"
-       /* Return -EFAULT for any failure */
-.L_memcpy_mcsafe_fail:
-       mov     $-EFAULT, %rax
+       /*
+        * Return number of bytes not copied for any failure. Note that
+        * there is no "tail" handling since the source buffer is 8-byte
+        * aligned and poison is cacheline aligned.
+        */
+.E_read_words:
+       shll    $3, %ecx
+.E_leading_bytes:
+       addl    %edx, %ecx
+.E_trailing_bytes:
+       mov     %ecx, %eax
        ret
 
+       /*
+        * For write fault handling, given the destination is unaligned,
+        * we handle faults on multi-byte writes with a byte-by-byte
+        * copy up to the write-protected page.
+        */
+.E_write_words:
+       shll    $3, %ecx
+       addl    %edx, %ecx
+       movl    %ecx, %edx
+       jmp mcsafe_handle_tail
+
        .previous
 
-       _ASM_EXTABLE_FAULT(.L_copy_leading_bytes, .L_memcpy_mcsafe_fail)
-       _ASM_EXTABLE_FAULT(.L_cache_w0, .L_memcpy_mcsafe_fail)
-       _ASM_EXTABLE_FAULT(.L_cache_w1, .L_memcpy_mcsafe_fail)
-       _ASM_EXTABLE_FAULT(.L_cache_w2, .L_memcpy_mcsafe_fail)
-       _ASM_EXTABLE_FAULT(.L_cache_w3, .L_memcpy_mcsafe_fail)
-       _ASM_EXTABLE_FAULT(.L_cache_w4, .L_memcpy_mcsafe_fail)
-       _ASM_EXTABLE_FAULT(.L_cache_w5, .L_memcpy_mcsafe_fail)
-       _ASM_EXTABLE_FAULT(.L_cache_w6, .L_memcpy_mcsafe_fail)
-       _ASM_EXTABLE_FAULT(.L_cache_w7, .L_memcpy_mcsafe_fail)
-       _ASM_EXTABLE_FAULT(.L_copy_trailing_words, .L_memcpy_mcsafe_fail)
-       _ASM_EXTABLE_FAULT(.L_copy_trailing_bytes, .L_memcpy_mcsafe_fail)
+       _ASM_EXTABLE_FAULT(.L_read_leading_bytes, .E_leading_bytes)
+       _ASM_EXTABLE_FAULT(.L_read_words, .E_read_words)
+       _ASM_EXTABLE_FAULT(.L_read_trailing_bytes, .E_trailing_bytes)
+       _ASM_EXTABLE(.L_write_leading_bytes, .E_leading_bytes)
+       _ASM_EXTABLE(.L_write_words, .E_write_words)
+       _ASM_EXTABLE(.L_write_trailing_bytes, .E_trailing_bytes)
 #endif
index 32f9e397a6c07a988edd80a3338fd2a29930d3e0..3f140eff039fc699f99a30f231211120fa88ed53 100644 (file)
@@ -217,6 +217,14 @@ int do_pin_any(int argc, char **argv, int (*get_fd_by_id)(__u32))
        int err;
        int fd;
 
+       if (argc < 3) {
+               p_err("too few arguments, id ID and FILE path is required");
+               return -1;
+       } else if (argc > 3) {
+               p_err("too many arguments");
+               return -1;
+       }
+
        if (!is_prefix(*argv, "id")) {
                p_err("expected 'id' got %s", *argv);
                return -1;
@@ -230,9 +238,6 @@ int do_pin_any(int argc, char **argv, int (*get_fd_by_id)(__u32))
        }
        NEXT_ARG();
 
-       if (argc != 1)
-               usage();
-
        fd = get_fd_by_id(id);
        if (fd < 0) {
                p_err("can't get prog by id (%u): %s", id, strerror(errno));
index 097b1a5e046b20f8ced90a469c2b8e5ab25bb593..f74a8bcbda874a8cfa0595257e1002e047f5559a 100644 (file)
@@ -36,6 +36,7 @@
 #include <assert.h>
 #include <errno.h>
 #include <fcntl.h>
+#include <linux/kernel.h>
 #include <stdbool.h>
 #include <stdio.h>
 #include <stdlib.h>
@@ -90,7 +91,8 @@ static bool map_is_map_of_progs(__u32 type)
 static void *alloc_value(struct bpf_map_info *info)
 {
        if (map_is_per_cpu(info->type))
-               return malloc(info->value_size * get_possible_cpus());
+               return malloc(round_up(info->value_size, 8) *
+                             get_possible_cpus());
        else
                return malloc(info->value_size);
 }
@@ -161,9 +163,10 @@ static void print_entry_json(struct bpf_map_info *info, unsigned char *key,
                jsonw_name(json_wtr, "value");
                print_hex_data_json(value, info->value_size);
        } else {
-               unsigned int i, n;
+               unsigned int i, n, step;
 
                n = get_possible_cpus();
+               step = round_up(info->value_size, 8);
 
                jsonw_name(json_wtr, "key");
                print_hex_data_json(key, info->key_size);
@@ -176,7 +179,7 @@ static void print_entry_json(struct bpf_map_info *info, unsigned char *key,
                        jsonw_int_field(json_wtr, "cpu", i);
 
                        jsonw_name(json_wtr, "value");
-                       print_hex_data_json(value + i * info->value_size,
+                       print_hex_data_json(value + i * step,
                                            info->value_size);
 
                        jsonw_end_object(json_wtr);
@@ -207,9 +210,10 @@ static void print_entry_plain(struct bpf_map_info *info, unsigned char *key,
 
                printf("\n");
        } else {
-               unsigned int i, n;
+               unsigned int i, n, step;
 
                n = get_possible_cpus();
+               step = round_up(info->value_size, 8);
 
                printf("key:\n");
                fprint_hex(stdout, key, info->key_size, " ");
@@ -217,7 +221,7 @@ static void print_entry_plain(struct bpf_map_info *info, unsigned char *key,
                for (i = 0; i < n; i++) {
                        printf("value (CPU %02d):%c",
                               i, info->value_size > 16 ? '\n' : ' ');
-                       fprint_hex(stdout, value + i * info->value_size,
+                       fprint_hex(stdout, value + i * step,
                                   info->value_size, " ");
                        printf("\n");
                }
index ac6b1a12c9b7cd6319dc3697500d56c938de8d90..b76b77dcfd1fcc52ded0a99b7e7f3cec8cde90ce 100644 (file)
@@ -29,9 +29,10 @@ static bool has_perf_query_support(void)
        if (perf_query_supported)
                goto out;
 
-       fd = open(bin_name, O_RDONLY);
+       fd = open("/", O_RDONLY);
        if (fd < 0) {
-               p_err("perf_query_support: %s", strerror(errno));
+               p_err("perf_query_support: cannot open directory \"/\" (%s)",
+                     strerror(errno));
                goto out;
        }
 
index a4f435203feff52f9d7c9a04bf8d5c10c31d2c73..959aa53ab6789f839442326359701b17ba9e337c 100644 (file)
@@ -90,7 +90,9 @@ static void print_boot_time(__u64 nsecs, char *buf, unsigned int size)
        }
 
        wallclock_secs = (real_time_ts.tv_sec - boot_time_ts.tv_sec) +
-               nsecs / 1000000000;
+               (real_time_ts.tv_nsec - boot_time_ts.tv_nsec + nsecs) /
+               1000000000;
+
 
        if (!localtime_r(&wallclock_secs, &load_tm)) {
                snprintf(buf, size, "%llu", nsecs / 1000000000);
@@ -692,15 +694,19 @@ static int do_load(int argc, char **argv)
                return -1;
        }
 
-       if (do_pin_fd(prog_fd, argv[1])) {
-               p_err("failed to pin program");
-               return -1;
-       }
+       if (do_pin_fd(prog_fd, argv[1]))
+               goto err_close_obj;
 
        if (json_output)
                jsonw_null(json_wtr);
 
+       bpf_object__close(obj);
+
        return 0;
+
+err_close_obj:
+       bpf_object__close(obj);
+       return -1;
 }
 
 static int do_help(int argc, char **argv)
index a4bbb984941df2c150ec7209e4905ac1baacbf81..950c1504ca37ecda43542a46730fe805a0f05866 100644 (file)
@@ -63,8 +63,8 @@ dep-cmd = $(if $(wildcard $(fixdep)),
            $(fixdep) $(depfile) $@ '$(make-cmd)' > $(dot-target).tmp;           \
            rm -f $(depfile);                                                    \
            mv -f $(dot-target).tmp $(dot-target).cmd,                           \
-           printf '\# cannot find fixdep (%s)\n' $(fixdep) > $(dot-target).cmd; \
-           printf '\# using basic dep data\n\n' >> $(dot-target).cmd;           \
+           printf '$(pound) cannot find fixdep (%s)\n' $(fixdep) > $(dot-target).cmd; \
+           printf '$(pound) using basic dep data\n\n' >> $(dot-target).cmd;           \
            cat $(depfile) >> $(dot-target).cmd;                                 \
            printf '\n%s\n' 'cmd_$@ := $(make-cmd)' >> $(dot-target).cmd)
 
@@ -98,4 +98,4 @@ cxx_flags = -Wp,-MD,$(depfile) -Wp,-MT,$@ $(CXXFLAGS) -D"BUILD_STR(s)=\#s" $(CXX
 ###
 ## HOSTCC C flags
 
-host_c_flags = -Wp,-MD,$(depfile) -Wp,-MT,$@ $(CHOSTFLAGS) -D"BUILD_STR(s)=\#s" $(CHOSTFLAGS_$(basetarget).o) $(CHOSTFLAGS_$(obj))
+host_c_flags = -Wp,-MD,$(depfile) -Wp,-MT,$@ $(HOSTCFLAGS) -D"BUILD_STR(s)=\#s" $(HOSTCFLAGS_$(basetarget).o) $(HOSTCFLAGS_$(obj))
index 5eb4b5ad79cb778f0e949a07f719743d7a62d3c7..5edf65e684ab70bb65bfd0e8dc821a61b605be6f 100644 (file)
@@ -43,7 +43,7 @@ $(OUTPUT)fixdep-in.o: FORCE
        $(Q)$(MAKE) $(build)=fixdep
 
 $(OUTPUT)fixdep: $(OUTPUT)fixdep-in.o
-       $(QUIET_LINK)$(HOSTCC) $(LDFLAGS) -o $@ $<
+       $(QUIET_LINK)$(HOSTCC) $(HOSTLDFLAGS) -o $@ $<
 
 FORCE:
 
diff --git a/tools/include/uapi/asm-generic/unistd.h b/tools/include/uapi/asm-generic/unistd.h
new file mode 100644 (file)
index 0000000..4299067
--- /dev/null
@@ -0,0 +1,783 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#include <asm/bitsperlong.h>
+
+/*
+ * This file contains the system call numbers, based on the
+ * layout of the x86-64 architecture, which embeds the
+ * pointer to the syscall in the table.
+ *
+ * As a basic principle, no duplication of functionality
+ * should be added, e.g. we don't use lseek when llseek
+ * is present. New architectures should use this file
+ * and implement the less feature-full calls in user space.
+ */
+
+#ifndef __SYSCALL
+#define __SYSCALL(x, y)
+#endif
+
+#if __BITS_PER_LONG == 32 || defined(__SYSCALL_COMPAT)
+#define __SC_3264(_nr, _32, _64) __SYSCALL(_nr, _32)
+#else
+#define __SC_3264(_nr, _32, _64) __SYSCALL(_nr, _64)
+#endif
+
+#ifdef __SYSCALL_COMPAT
+#define __SC_COMP(_nr, _sys, _comp) __SYSCALL(_nr, _comp)
+#define __SC_COMP_3264(_nr, _32, _64, _comp) __SYSCALL(_nr, _comp)
+#else
+#define __SC_COMP(_nr, _sys, _comp) __SYSCALL(_nr, _sys)
+#define __SC_COMP_3264(_nr, _32, _64, _comp) __SC_3264(_nr, _32, _64)
+#endif
+
+#define __NR_io_setup 0
+__SC_COMP(__NR_io_setup, sys_io_setup, compat_sys_io_setup)
+#define __NR_io_destroy 1
+__SYSCALL(__NR_io_destroy, sys_io_destroy)
+#define __NR_io_submit 2
+__SC_COMP(__NR_io_submit, sys_io_submit, compat_sys_io_submit)
+#define __NR_io_cancel 3
+__SYSCALL(__NR_io_cancel, sys_io_cancel)
+#define __NR_io_getevents 4
+__SC_COMP(__NR_io_getevents, sys_io_getevents, compat_sys_io_getevents)
+
+/* fs/xattr.c */
+#define __NR_setxattr 5
+__SYSCALL(__NR_setxattr, sys_setxattr)
+#define __NR_lsetxattr 6
+__SYSCALL(__NR_lsetxattr, sys_lsetxattr)
+#define __NR_fsetxattr 7
+__SYSCALL(__NR_fsetxattr, sys_fsetxattr)
+#define __NR_getxattr 8
+__SYSCALL(__NR_getxattr, sys_getxattr)
+#define __NR_lgetxattr 9
+__SYSCALL(__NR_lgetxattr, sys_lgetxattr)
+#define __NR_fgetxattr 10
+__SYSCALL(__NR_fgetxattr, sys_fgetxattr)
+#define __NR_listxattr 11
+__SYSCALL(__NR_listxattr, sys_listxattr)
+#define __NR_llistxattr 12
+__SYSCALL(__NR_llistxattr, sys_llistxattr)
+#define __NR_flistxattr 13
+__SYSCALL(__NR_flistxattr, sys_flistxattr)
+#define __NR_removexattr 14
+__SYSCALL(__NR_removexattr, sys_removexattr)
+#define __NR_lremovexattr 15
+__SYSCALL(__NR_lremovexattr, sys_lremovexattr)
+#define __NR_fremovexattr 16
+__SYSCALL(__NR_fremovexattr, sys_fremovexattr)
+
+/* fs/dcache.c */
+#define __NR_getcwd 17
+__SYSCALL(__NR_getcwd, sys_getcwd)
+
+/* fs/cookies.c */
+#define __NR_lookup_dcookie 18
+__SC_COMP(__NR_lookup_dcookie, sys_lookup_dcookie, compat_sys_lookup_dcookie)
+
+/* fs/eventfd.c */
+#define __NR_eventfd2 19
+__SYSCALL(__NR_eventfd2, sys_eventfd2)
+
+/* fs/eventpoll.c */
+#define __NR_epoll_create1 20
+__SYSCALL(__NR_epoll_create1, sys_epoll_create1)
+#define __NR_epoll_ctl 21
+__SYSCALL(__NR_epoll_ctl, sys_epoll_ctl)
+#define __NR_epoll_pwait 22
+__SC_COMP(__NR_epoll_pwait, sys_epoll_pwait, compat_sys_epoll_pwait)
+
+/* fs/fcntl.c */
+#define __NR_dup 23
+__SYSCALL(__NR_dup, sys_dup)
+#define __NR_dup3 24
+__SYSCALL(__NR_dup3, sys_dup3)
+#define __NR3264_fcntl 25
+__SC_COMP_3264(__NR3264_fcntl, sys_fcntl64, sys_fcntl, compat_sys_fcntl64)
+
+/* fs/inotify_user.c */
+#define __NR_inotify_init1 26
+__SYSCALL(__NR_inotify_init1, sys_inotify_init1)
+#define __NR_inotify_add_watch 27
+__SYSCALL(__NR_inotify_add_watch, sys_inotify_add_watch)
+#define __NR_inotify_rm_watch 28
+__SYSCALL(__NR_inotify_rm_watch, sys_inotify_rm_watch)
+
+/* fs/ioctl.c */
+#define __NR_ioctl 29
+__SC_COMP(__NR_ioctl, sys_ioctl, compat_sys_ioctl)
+
+/* fs/ioprio.c */
+#define __NR_ioprio_set 30
+__SYSCALL(__NR_ioprio_set, sys_ioprio_set)
+#define __NR_ioprio_get 31
+__SYSCALL(__NR_ioprio_get, sys_ioprio_get)
+
+/* fs/locks.c */
+#define __NR_flock 32
+__SYSCALL(__NR_flock, sys_flock)
+
+/* fs/namei.c */
+#define __NR_mknodat 33
+__SYSCALL(__NR_mknodat, sys_mknodat)
+#define __NR_mkdirat 34
+__SYSCALL(__NR_mkdirat, sys_mkdirat)
+#define __NR_unlinkat 35
+__SYSCALL(__NR_unlinkat, sys_unlinkat)
+#define __NR_symlinkat 36
+__SYSCALL(__NR_symlinkat, sys_symlinkat)
+#define __NR_linkat 37
+__SYSCALL(__NR_linkat, sys_linkat)
+#ifdef __ARCH_WANT_RENAMEAT
+/* renameat is superseded with flags by renameat2 */
+#define __NR_renameat 38
+__SYSCALL(__NR_renameat, sys_renameat)
+#endif /* __ARCH_WANT_RENAMEAT */
+
+/* fs/namespace.c */
+#define __NR_umount2 39
+__SYSCALL(__NR_umount2, sys_umount)
+#define __NR_mount 40
+__SC_COMP(__NR_mount, sys_mount, compat_sys_mount)
+#define __NR_pivot_root 41
+__SYSCALL(__NR_pivot_root, sys_pivot_root)
+
+/* fs/nfsctl.c */
+#define __NR_nfsservctl 42
+__SYSCALL(__NR_nfsservctl, sys_ni_syscall)
+
+/* fs/open.c */
+#define __NR3264_statfs 43
+__SC_COMP_3264(__NR3264_statfs, sys_statfs64, sys_statfs, \
+              compat_sys_statfs64)
+#define __NR3264_fstatfs 44
+__SC_COMP_3264(__NR3264_fstatfs, sys_fstatfs64, sys_fstatfs, \
+              compat_sys_fstatfs64)
+#define __NR3264_truncate 45
+__SC_COMP_3264(__NR3264_truncate, sys_truncate64, sys_truncate, \
+              compat_sys_truncate64)
+#define __NR3264_ftruncate 46
+__SC_COMP_3264(__NR3264_ftruncate, sys_ftruncate64, sys_ftruncate, \
+              compat_sys_ftruncate64)
+
+#define __NR_fallocate 47
+__SC_COMP(__NR_fallocate, sys_fallocate, compat_sys_fallocate)
+#define __NR_faccessat 48
+__SYSCALL(__NR_faccessat, sys_faccessat)
+#define __NR_chdir 49
+__SYSCALL(__NR_chdir, sys_chdir)
+#define __NR_fchdir 50
+__SYSCALL(__NR_fchdir, sys_fchdir)
+#define __NR_chroot 51
+__SYSCALL(__NR_chroot, sys_chroot)
+#define __NR_fchmod 52
+__SYSCALL(__NR_fchmod, sys_fchmod)
+#define __NR_fchmodat 53
+__SYSCALL(__NR_fchmodat, sys_fchmodat)
+#define __NR_fchownat 54
+__SYSCALL(__NR_fchownat, sys_fchownat)
+#define __NR_fchown 55
+__SYSCALL(__NR_fchown, sys_fchown)
+#define __NR_openat 56
+__SC_COMP(__NR_openat, sys_openat, compat_sys_openat)
+#define __NR_close 57
+__SYSCALL(__NR_close, sys_close)
+#define __NR_vhangup 58
+__SYSCALL(__NR_vhangup, sys_vhangup)
+
+/* fs/pipe.c */
+#define __NR_pipe2 59
+__SYSCALL(__NR_pipe2, sys_pipe2)
+
+/* fs/quota.c */
+#define __NR_quotactl 60
+__SYSCALL(__NR_quotactl, sys_quotactl)
+
+/* fs/readdir.c */
+#define __NR_getdents64 61
+__SYSCALL(__NR_getdents64, sys_getdents64)
+
+/* fs/read_write.c */
+#define __NR3264_lseek 62
+__SC_3264(__NR3264_lseek, sys_llseek, sys_lseek)
+#define __NR_read 63
+__SYSCALL(__NR_read, sys_read)
+#define __NR_write 64
+__SYSCALL(__NR_write, sys_write)
+#define __NR_readv 65
+__SC_COMP(__NR_readv, sys_readv, compat_sys_readv)
+#define __NR_writev 66
+__SC_COMP(__NR_writev, sys_writev, compat_sys_writev)
+#define __NR_pread64 67
+__SC_COMP(__NR_pread64, sys_pread64, compat_sys_pread64)
+#define __NR_pwrite64 68
+__SC_COMP(__NR_pwrite64, sys_pwrite64, compat_sys_pwrite64)
+#define __NR_preadv 69
+__SC_COMP(__NR_preadv, sys_preadv, compat_sys_preadv)
+#define __NR_pwritev 70
+__SC_COMP(__NR_pwritev, sys_pwritev, compat_sys_pwritev)
+
+/* fs/sendfile.c */
+#define __NR3264_sendfile 71
+__SYSCALL(__NR3264_sendfile, sys_sendfile64)
+
+/* fs/select.c */
+#define __NR_pselect6 72
+__SC_COMP(__NR_pselect6, sys_pselect6, compat_sys_pselect6)
+#define __NR_ppoll 73
+__SC_COMP(__NR_ppoll, sys_ppoll, compat_sys_ppoll)
+
+/* fs/signalfd.c */
+#define __NR_signalfd4 74
+__SC_COMP(__NR_signalfd4, sys_signalfd4, compat_sys_signalfd4)
+
+/* fs/splice.c */
+#define __NR_vmsplice 75
+__SC_COMP(__NR_vmsplice, sys_vmsplice, compat_sys_vmsplice)
+#define __NR_splice 76
+__SYSCALL(__NR_splice, sys_splice)
+#define __NR_tee 77
+__SYSCALL(__NR_tee, sys_tee)
+
+/* fs/stat.c */
+#define __NR_readlinkat 78
+__SYSCALL(__NR_readlinkat, sys_readlinkat)
+#define __NR3264_fstatat 79
+__SC_3264(__NR3264_fstatat, sys_fstatat64, sys_newfstatat)
+#define __NR3264_fstat 80
+__SC_3264(__NR3264_fstat, sys_fstat64, sys_newfstat)
+
+/* fs/sync.c */
+#define __NR_sync 81
+__SYSCALL(__NR_sync, sys_sync)
+#define __NR_fsync 82
+__SYSCALL(__NR_fsync, sys_fsync)
+#define __NR_fdatasync 83
+__SYSCALL(__NR_fdatasync, sys_fdatasync)
+#ifdef __ARCH_WANT_SYNC_FILE_RANGE2
+#define __NR_sync_file_range2 84
+__SC_COMP(__NR_sync_file_range2, sys_sync_file_range2, \
+         compat_sys_sync_file_range2)
+#else
+#define __NR_sync_file_range 84
+__SC_COMP(__NR_sync_file_range, sys_sync_file_range, \
+         compat_sys_sync_file_range)
+#endif
+
+/* fs/timerfd.c */
+#define __NR_timerfd_create 85
+__SYSCALL(__NR_timerfd_create, sys_timerfd_create)
+#define __NR_timerfd_settime 86
+__SC_COMP(__NR_timerfd_settime, sys_timerfd_settime, \
+         compat_sys_timerfd_settime)
+#define __NR_timerfd_gettime 87
+__SC_COMP(__NR_timerfd_gettime, sys_timerfd_gettime, \
+         compat_sys_timerfd_gettime)
+
+/* fs/utimes.c */
+#define __NR_utimensat 88
+__SC_COMP(__NR_utimensat, sys_utimensat, compat_sys_utimensat)
+
+/* kernel/acct.c */
+#define __NR_acct 89
+__SYSCALL(__NR_acct, sys_acct)
+
+/* kernel/capability.c */
+#define __NR_capget 90
+__SYSCALL(__NR_capget, sys_capget)
+#define __NR_capset 91
+__SYSCALL(__NR_capset, sys_capset)
+
+/* kernel/exec_domain.c */
+#define __NR_personality 92
+__SYSCALL(__NR_personality, sys_personality)
+
+/* kernel/exit.c */
+#define __NR_exit 93
+__SYSCALL(__NR_exit, sys_exit)
+#define __NR_exit_group 94
+__SYSCALL(__NR_exit_group, sys_exit_group)
+#define __NR_waitid 95
+__SC_COMP(__NR_waitid, sys_waitid, compat_sys_waitid)
+
+/* kernel/fork.c */
+#define __NR_set_tid_address 96
+__SYSCALL(__NR_set_tid_address, sys_set_tid_address)
+#define __NR_unshare 97
+__SYSCALL(__NR_unshare, sys_unshare)
+
+/* kernel/futex.c */
+#define __NR_futex 98
+__SC_COMP(__NR_futex, sys_futex, compat_sys_futex)
+#define __NR_set_robust_list 99
+__SC_COMP(__NR_set_robust_list, sys_set_robust_list, \
+         compat_sys_set_robust_list)
+#define __NR_get_robust_list 100
+__SC_COMP(__NR_get_robust_list, sys_get_robust_list, \
+         compat_sys_get_robust_list)
+
+/* kernel/hrtimer.c */
+#define __NR_nanosleep 101
+__SC_COMP(__NR_nanosleep, sys_nanosleep, compat_sys_nanosleep)
+
+/* kernel/itimer.c */
+#define __NR_getitimer 102
+__SC_COMP(__NR_getitimer, sys_getitimer, compat_sys_getitimer)
+#define __NR_setitimer 103
+__SC_COMP(__NR_setitimer, sys_setitimer, compat_sys_setitimer)
+
+/* kernel/kexec.c */
+#define __NR_kexec_load 104
+__SC_COMP(__NR_kexec_load, sys_kexec_load, compat_sys_kexec_load)
+
+/* kernel/module.c */
+#define __NR_init_module 105
+__SYSCALL(__NR_init_module, sys_init_module)
+#define __NR_delete_module 106
+__SYSCALL(__NR_delete_module, sys_delete_module)
+
+/* kernel/posix-timers.c */
+#define __NR_timer_create 107
+__SC_COMP(__NR_timer_create, sys_timer_create, compat_sys_timer_create)
+#define __NR_timer_gettime 108
+__SC_COMP(__NR_timer_gettime, sys_timer_gettime, compat_sys_timer_gettime)
+#define __NR_timer_getoverrun 109
+__SYSCALL(__NR_timer_getoverrun, sys_timer_getoverrun)
+#define __NR_timer_settime 110
+__SC_COMP(__NR_timer_settime, sys_timer_settime, compat_sys_timer_settime)
+#define __NR_timer_delete 111
+__SYSCALL(__NR_timer_delete, sys_timer_delete)
+#define __NR_clock_settime 112
+__SC_COMP(__NR_clock_settime, sys_clock_settime, compat_sys_clock_settime)
+#define __NR_clock_gettime 113
+__SC_COMP(__NR_clock_gettime, sys_clock_gettime, compat_sys_clock_gettime)
+#define __NR_clock_getres 114
+__SC_COMP(__NR_clock_getres, sys_clock_getres, compat_sys_clock_getres)
+#define __NR_clock_nanosleep 115
+__SC_COMP(__NR_clock_nanosleep, sys_clock_nanosleep, \
+         compat_sys_clock_nanosleep)
+
+/* kernel/printk.c */
+#define __NR_syslog 116
+__SYSCALL(__NR_syslog, sys_syslog)
+
+/* kernel/ptrace.c */
+#define __NR_ptrace 117
+__SYSCALL(__NR_ptrace, sys_ptrace)
+
+/* kernel/sched/core.c */
+#define __NR_sched_setparam 118
+__SYSCALL(__NR_sched_setparam, sys_sched_setparam)
+#define __NR_sched_setscheduler 119
+__SYSCALL(__NR_sched_setscheduler, sys_sched_setscheduler)
+#define __NR_sched_getscheduler 120
+__SYSCALL(__NR_sched_getscheduler, sys_sched_getscheduler)
+#define __NR_sched_getparam 121
+__SYSCALL(__NR_sched_getparam, sys_sched_getparam)
+#define __NR_sched_setaffinity 122
+__SC_COMP(__NR_sched_setaffinity, sys_sched_setaffinity, \
+         compat_sys_sched_setaffinity)
+#define __NR_sched_getaffinity 123
+__SC_COMP(__NR_sched_getaffinity, sys_sched_getaffinity, \
+         compat_sys_sched_getaffinity)
+#define __NR_sched_yield 124
+__SYSCALL(__NR_sched_yield, sys_sched_yield)
+#define __NR_sched_get_priority_max 125
+__SYSCALL(__NR_sched_get_priority_max, sys_sched_get_priority_max)
+#define __NR_sched_get_priority_min 126
+__SYSCALL(__NR_sched_get_priority_min, sys_sched_get_priority_min)
+#define __NR_sched_rr_get_interval 127
+__SC_COMP(__NR_sched_rr_get_interval, sys_sched_rr_get_interval, \
+         compat_sys_sched_rr_get_interval)
+
+/* kernel/signal.c */
+#define __NR_restart_syscall 128
+__SYSCALL(__NR_restart_syscall, sys_restart_syscall)
+#define __NR_kill 129
+__SYSCALL(__NR_kill, sys_kill)
+#define __NR_tkill 130
+__SYSCALL(__NR_tkill, sys_tkill)
+#define __NR_tgkill 131
+__SYSCALL(__NR_tgkill, sys_tgkill)
+#define __NR_sigaltstack 132
+__SC_COMP(__NR_sigaltstack, sys_sigaltstack, compat_sys_sigaltstack)
+#define __NR_rt_sigsuspend 133
+__SC_COMP(__NR_rt_sigsuspend, sys_rt_sigsuspend, compat_sys_rt_sigsuspend)
+#define __NR_rt_sigaction 134
+__SC_COMP(__NR_rt_sigaction, sys_rt_sigaction, compat_sys_rt_sigaction)
+#define __NR_rt_sigprocmask 135
+__SC_COMP(__NR_rt_sigprocmask, sys_rt_sigprocmask, compat_sys_rt_sigprocmask)
+#define __NR_rt_sigpending 136
+__SC_COMP(__NR_rt_sigpending, sys_rt_sigpending, compat_sys_rt_sigpending)
+#define __NR_rt_sigtimedwait 137
+__SC_COMP(__NR_rt_sigtimedwait, sys_rt_sigtimedwait, \
+         compat_sys_rt_sigtimedwait)
+#define __NR_rt_sigqueueinfo 138
+__SC_COMP(__NR_rt_sigqueueinfo, sys_rt_sigqueueinfo, \
+         compat_sys_rt_sigqueueinfo)
+#define __NR_rt_sigreturn 139
+__SC_COMP(__NR_rt_sigreturn, sys_rt_sigreturn, compat_sys_rt_sigreturn)
+
+/* kernel/sys.c */
+#define __NR_setpriority 140
+__SYSCALL(__NR_setpriority, sys_setpriority)
+#define __NR_getpriority 141
+__SYSCALL(__NR_getpriority, sys_getpriority)
+#define __NR_reboot 142
+__SYSCALL(__NR_reboot, sys_reboot)
+#define __NR_setregid 143
+__SYSCALL(__NR_setregid, sys_setregid)
+#define __NR_setgid 144
+__SYSCALL(__NR_setgid, sys_setgid)
+#define __NR_setreuid 145
+__SYSCALL(__NR_setreuid, sys_setreuid)
+#define __NR_setuid 146
+__SYSCALL(__NR_setuid, sys_setuid)
+#define __NR_setresuid 147
+__SYSCALL(__NR_setresuid, sys_setresuid)
+#define __NR_getresuid 148
+__SYSCALL(__NR_getresuid, sys_getresuid)
+#define __NR_setresgid 149
+__SYSCALL(__NR_setresgid, sys_setresgid)
+#define __NR_getresgid 150
+__SYSCALL(__NR_getresgid, sys_getresgid)
+#define __NR_setfsuid 151
+__SYSCALL(__NR_setfsuid, sys_setfsuid)
+#define __NR_setfsgid 152
+__SYSCALL(__NR_setfsgid, sys_setfsgid)
+#define __NR_times 153
+__SC_COMP(__NR_times, sys_times, compat_sys_times)
+#define __NR_setpgid 154
+__SYSCALL(__NR_setpgid, sys_setpgid)
+#define __NR_getpgid 155
+__SYSCALL(__NR_getpgid, sys_getpgid)
+#define __NR_getsid 156
+__SYSCALL(__NR_getsid, sys_getsid)
+#define __NR_setsid 157
+__SYSCALL(__NR_setsid, sys_setsid)
+#define __NR_getgroups 158
+__SYSCALL(__NR_getgroups, sys_getgroups)
+#define __NR_setgroups 159
+__SYSCALL(__NR_setgroups, sys_setgroups)
+#define __NR_uname 160
+__SYSCALL(__NR_uname, sys_newuname)
+#define __NR_sethostname 161
+__SYSCALL(__NR_sethostname, sys_sethostname)
+#define __NR_setdomainname 162
+__SYSCALL(__NR_setdomainname, sys_setdomainname)
+#define __NR_getrlimit 163
+__SC_COMP(__NR_getrlimit, sys_getrlimit, compat_sys_getrlimit)
+#define __NR_setrlimit 164
+__SC_COMP(__NR_setrlimit, sys_setrlimit, compat_sys_setrlimit)
+#define __NR_getrusage 165
+__SC_COMP(__NR_getrusage, sys_getrusage, compat_sys_getrusage)
+#define __NR_umask 166
+__SYSCALL(__NR_umask, sys_umask)
+#define __NR_prctl 167
+__SYSCALL(__NR_prctl, sys_prctl)
+#define __NR_getcpu 168
+__SYSCALL(__NR_getcpu, sys_getcpu)
+
+/* kernel/time.c */
+#define __NR_gettimeofday 169
+__SC_COMP(__NR_gettimeofday, sys_gettimeofday, compat_sys_gettimeofday)
+#define __NR_settimeofday 170
+__SC_COMP(__NR_settimeofday, sys_settimeofday, compat_sys_settimeofday)
+#define __NR_adjtimex 171
+__SC_COMP(__NR_adjtimex, sys_adjtimex, compat_sys_adjtimex)
+
+/* kernel/timer.c */
+#define __NR_getpid 172
+__SYSCALL(__NR_getpid, sys_getpid)
+#define __NR_getppid 173
+__SYSCALL(__NR_getppid, sys_getppid)
+#define __NR_getuid 174
+__SYSCALL(__NR_getuid, sys_getuid)
+#define __NR_geteuid 175
+__SYSCALL(__NR_geteuid, sys_geteuid)
+#define __NR_getgid 176
+__SYSCALL(__NR_getgid, sys_getgid)
+#define __NR_getegid 177
+__SYSCALL(__NR_getegid, sys_getegid)
+#define __NR_gettid 178
+__SYSCALL(__NR_gettid, sys_gettid)
+#define __NR_sysinfo 179
+__SC_COMP(__NR_sysinfo, sys_sysinfo, compat_sys_sysinfo)
+
+/* ipc/mqueue.c */
+#define __NR_mq_open 180
+__SC_COMP(__NR_mq_open, sys_mq_open, compat_sys_mq_open)
+#define __NR_mq_unlink 181
+__SYSCALL(__NR_mq_unlink, sys_mq_unlink)
+#define __NR_mq_timedsend 182
+__SC_COMP(__NR_mq_timedsend, sys_mq_timedsend, compat_sys_mq_timedsend)
+#define __NR_mq_timedreceive 183
+__SC_COMP(__NR_mq_timedreceive, sys_mq_timedreceive, \
+         compat_sys_mq_timedreceive)
+#define __NR_mq_notify 184
+__SC_COMP(__NR_mq_notify, sys_mq_notify, compat_sys_mq_notify)
+#define __NR_mq_getsetattr 185
+__SC_COMP(__NR_mq_getsetattr, sys_mq_getsetattr, compat_sys_mq_getsetattr)
+
+/* ipc/msg.c */
+#define __NR_msgget 186
+__SYSCALL(__NR_msgget, sys_msgget)
+#define __NR_msgctl 187
+__SC_COMP(__NR_msgctl, sys_msgctl, compat_sys_msgctl)
+#define __NR_msgrcv 188
+__SC_COMP(__NR_msgrcv, sys_msgrcv, compat_sys_msgrcv)
+#define __NR_msgsnd 189
+__SC_COMP(__NR_msgsnd, sys_msgsnd, compat_sys_msgsnd)
+
+/* ipc/sem.c */
+#define __NR_semget 190
+__SYSCALL(__NR_semget, sys_semget)
+#define __NR_semctl 191
+__SC_COMP(__NR_semctl, sys_semctl, compat_sys_semctl)
+#define __NR_semtimedop 192
+__SC_COMP(__NR_semtimedop, sys_semtimedop, compat_sys_semtimedop)
+#define __NR_semop 193
+__SYSCALL(__NR_semop, sys_semop)
+
+/* ipc/shm.c */
+#define __NR_shmget 194
+__SYSCALL(__NR_shmget, sys_shmget)
+#define __NR_shmctl 195
+__SC_COMP(__NR_shmctl, sys_shmctl, compat_sys_shmctl)
+#define __NR_shmat 196
+__SC_COMP(__NR_shmat, sys_shmat, compat_sys_shmat)
+#define __NR_shmdt 197
+__SYSCALL(__NR_shmdt, sys_shmdt)
+
+/* net/socket.c */
+#define __NR_socket 198
+__SYSCALL(__NR_socket, sys_socket)
+#define __NR_socketpair 199
+__SYSCALL(__NR_socketpair, sys_socketpair)
+#define __NR_bind 200
+__SYSCALL(__NR_bind, sys_bind)
+#define __NR_listen 201
+__SYSCALL(__NR_listen, sys_listen)
+#define __NR_accept 202
+__SYSCALL(__NR_accept, sys_accept)
+#define __NR_connect 203
+__SYSCALL(__NR_connect, sys_connect)
+#define __NR_getsockname 204
+__SYSCALL(__NR_getsockname, sys_getsockname)
+#define __NR_getpeername 205
+__SYSCALL(__NR_getpeername, sys_getpeername)
+#define __NR_sendto 206
+__SYSCALL(__NR_sendto, sys_sendto)
+#define __NR_recvfrom 207
+__SC_COMP(__NR_recvfrom, sys_recvfrom, compat_sys_recvfrom)
+#define __NR_setsockopt 208
+__SC_COMP(__NR_setsockopt, sys_setsockopt, compat_sys_setsockopt)
+#define __NR_getsockopt 209
+__SC_COMP(__NR_getsockopt, sys_getsockopt, compat_sys_getsockopt)
+#define __NR_shutdown 210
+__SYSCALL(__NR_shutdown, sys_shutdown)
+#define __NR_sendmsg 211
+__SC_COMP(__NR_sendmsg, sys_sendmsg, compat_sys_sendmsg)
+#define __NR_recvmsg 212
+__SC_COMP(__NR_recvmsg, sys_recvmsg, compat_sys_recvmsg)
+
+/* mm/filemap.c */
+#define __NR_readahead 213
+__SC_COMP(__NR_readahead, sys_readahead, compat_sys_readahead)
+
+/* mm/nommu.c, also with MMU */
+#define __NR_brk 214
+__SYSCALL(__NR_brk, sys_brk)
+#define __NR_munmap 215
+__SYSCALL(__NR_munmap, sys_munmap)
+#define __NR_mremap 216
+__SYSCALL(__NR_mremap, sys_mremap)
+
+/* security/keys/keyctl.c */
+#define __NR_add_key 217
+__SYSCALL(__NR_add_key, sys_add_key)
+#define __NR_request_key 218
+__SYSCALL(__NR_request_key, sys_request_key)
+#define __NR_keyctl 219
+__SC_COMP(__NR_keyctl, sys_keyctl, compat_sys_keyctl)
+
+/* arch/example/kernel/sys_example.c */
+#define __NR_clone 220
+__SYSCALL(__NR_clone, sys_clone)
+#define __NR_execve 221
+__SC_COMP(__NR_execve, sys_execve, compat_sys_execve)
+
+#define __NR3264_mmap 222
+__SC_3264(__NR3264_mmap, sys_mmap2, sys_mmap)
+/* mm/fadvise.c */
+#define __NR3264_fadvise64 223
+__SC_COMP(__NR3264_fadvise64, sys_fadvise64_64, compat_sys_fadvise64_64)
+
+/* mm/, CONFIG_MMU only */
+#ifndef __ARCH_NOMMU
+#define __NR_swapon 224
+__SYSCALL(__NR_swapon, sys_swapon)
+#define __NR_swapoff 225
+__SYSCALL(__NR_swapoff, sys_swapoff)
+#define __NR_mprotect 226
+__SYSCALL(__NR_mprotect, sys_mprotect)
+#define __NR_msync 227
+__SYSCALL(__NR_msync, sys_msync)
+#define __NR_mlock 228
+__SYSCALL(__NR_mlock, sys_mlock)
+#define __NR_munlock 229
+__SYSCALL(__NR_munlock, sys_munlock)
+#define __NR_mlockall 230
+__SYSCALL(__NR_mlockall, sys_mlockall)
+#define __NR_munlockall 231
+__SYSCALL(__NR_munlockall, sys_munlockall)
+#define __NR_mincore 232
+__SYSCALL(__NR_mincore, sys_mincore)
+#define __NR_madvise 233
+__SYSCALL(__NR_madvise, sys_madvise)
+#define __NR_remap_file_pages 234
+__SYSCALL(__NR_remap_file_pages, sys_remap_file_pages)
+#define __NR_mbind 235
+__SC_COMP(__NR_mbind, sys_mbind, compat_sys_mbind)
+#define __NR_get_mempolicy 236
+__SC_COMP(__NR_get_mempolicy, sys_get_mempolicy, compat_sys_get_mempolicy)
+#define __NR_set_mempolicy 237
+__SC_COMP(__NR_set_mempolicy, sys_set_mempolicy, compat_sys_set_mempolicy)
+#define __NR_migrate_pages 238
+__SC_COMP(__NR_migrate_pages, sys_migrate_pages, compat_sys_migrate_pages)
+#define __NR_move_pages 239
+__SC_COMP(__NR_move_pages, sys_move_pages, compat_sys_move_pages)
+#endif
+
+#define __NR_rt_tgsigqueueinfo 240
+__SC_COMP(__NR_rt_tgsigqueueinfo, sys_rt_tgsigqueueinfo, \
+         compat_sys_rt_tgsigqueueinfo)
+#define __NR_perf_event_open 241
+__SYSCALL(__NR_perf_event_open, sys_perf_event_open)
+#define __NR_accept4 242
+__SYSCALL(__NR_accept4, sys_accept4)
+#define __NR_recvmmsg 243
+__SC_COMP(__NR_recvmmsg, sys_recvmmsg, compat_sys_recvmmsg)
+
+/*
+ * Architectures may provide up to 16 syscalls of their own
+ * starting with this value.
+ */
+#define __NR_arch_specific_syscall 244
+
+#define __NR_wait4 260
+__SC_COMP(__NR_wait4, sys_wait4, compat_sys_wait4)
+#define __NR_prlimit64 261
+__SYSCALL(__NR_prlimit64, sys_prlimit64)
+#define __NR_fanotify_init 262
+__SYSCALL(__NR_fanotify_init, sys_fanotify_init)
+#define __NR_fanotify_mark 263
+__SYSCALL(__NR_fanotify_mark, sys_fanotify_mark)
+#define __NR_name_to_handle_at         264
+__SYSCALL(__NR_name_to_handle_at, sys_name_to_handle_at)
+#define __NR_open_by_handle_at         265
+__SC_COMP(__NR_open_by_handle_at, sys_open_by_handle_at, \
+         compat_sys_open_by_handle_at)
+#define __NR_clock_adjtime 266
+__SC_COMP(__NR_clock_adjtime, sys_clock_adjtime, compat_sys_clock_adjtime)
+#define __NR_syncfs 267
+__SYSCALL(__NR_syncfs, sys_syncfs)
+#define __NR_setns 268
+__SYSCALL(__NR_setns, sys_setns)
+#define __NR_sendmmsg 269
+__SC_COMP(__NR_sendmmsg, sys_sendmmsg, compat_sys_sendmmsg)
+#define __NR_process_vm_readv 270
+__SC_COMP(__NR_process_vm_readv, sys_process_vm_readv, \
+          compat_sys_process_vm_readv)
+#define __NR_process_vm_writev 271
+__SC_COMP(__NR_process_vm_writev, sys_process_vm_writev, \
+          compat_sys_process_vm_writev)
+#define __NR_kcmp 272
+__SYSCALL(__NR_kcmp, sys_kcmp)
+#define __NR_finit_module 273
+__SYSCALL(__NR_finit_module, sys_finit_module)
+#define __NR_sched_setattr 274
+__SYSCALL(__NR_sched_setattr, sys_sched_setattr)
+#define __NR_sched_getattr 275
+__SYSCALL(__NR_sched_getattr, sys_sched_getattr)
+#define __NR_renameat2 276
+__SYSCALL(__NR_renameat2, sys_renameat2)
+#define __NR_seccomp 277
+__SYSCALL(__NR_seccomp, sys_seccomp)
+#define __NR_getrandom 278
+__SYSCALL(__NR_getrandom, sys_getrandom)
+#define __NR_memfd_create 279
+__SYSCALL(__NR_memfd_create, sys_memfd_create)
+#define __NR_bpf 280
+__SYSCALL(__NR_bpf, sys_bpf)
+#define __NR_execveat 281
+__SC_COMP(__NR_execveat, sys_execveat, compat_sys_execveat)
+#define __NR_userfaultfd 282
+__SYSCALL(__NR_userfaultfd, sys_userfaultfd)
+#define __NR_membarrier 283
+__SYSCALL(__NR_membarrier, sys_membarrier)
+#define __NR_mlock2 284
+__SYSCALL(__NR_mlock2, sys_mlock2)
+#define __NR_copy_file_range 285
+__SYSCALL(__NR_copy_file_range, sys_copy_file_range)
+#define __NR_preadv2 286
+__SC_COMP(__NR_preadv2, sys_preadv2, compat_sys_preadv2)
+#define __NR_pwritev2 287
+__SC_COMP(__NR_pwritev2, sys_pwritev2, compat_sys_pwritev2)
+#define __NR_pkey_mprotect 288
+__SYSCALL(__NR_pkey_mprotect, sys_pkey_mprotect)
+#define __NR_pkey_alloc 289
+__SYSCALL(__NR_pkey_alloc,    sys_pkey_alloc)
+#define __NR_pkey_free 290
+__SYSCALL(__NR_pkey_free,     sys_pkey_free)
+#define __NR_statx 291
+__SYSCALL(__NR_statx,     sys_statx)
+#define __NR_io_pgetevents 292
+__SC_COMP(__NR_io_pgetevents, sys_io_pgetevents, compat_sys_io_pgetevents)
+
+#undef __NR_syscalls
+#define __NR_syscalls 293
+
+/*
+ * 32 bit systems traditionally used different
+ * syscalls for off_t and loff_t arguments, while
+ * 64 bit systems only need the off_t version.
+ * For new 32 bit platforms, there is no need to
+ * implement the old 32 bit off_t syscalls, so
+ * they take different names.
+ * Here we map the numbers so that both versions
+ * use the same syscall table layout.
+ */
+#if __BITS_PER_LONG == 64 && !defined(__SYSCALL_COMPAT)
+#define __NR_fcntl __NR3264_fcntl
+#define __NR_statfs __NR3264_statfs
+#define __NR_fstatfs __NR3264_fstatfs
+#define __NR_truncate __NR3264_truncate
+#define __NR_ftruncate __NR3264_ftruncate
+#define __NR_lseek __NR3264_lseek
+#define __NR_sendfile __NR3264_sendfile
+#define __NR_newfstatat __NR3264_fstatat
+#define __NR_fstat __NR3264_fstat
+#define __NR_mmap __NR3264_mmap
+#define __NR_fadvise64 __NR3264_fadvise64
+#ifdef __NR3264_stat
+#define __NR_stat __NR3264_stat
+#define __NR_lstat __NR3264_lstat
+#endif
+#else
+#define __NR_fcntl64 __NR3264_fcntl
+#define __NR_statfs64 __NR3264_statfs
+#define __NR_fstatfs64 __NR3264_fstatfs
+#define __NR_truncate64 __NR3264_truncate
+#define __NR_ftruncate64 __NR3264_ftruncate
+#define __NR_llseek __NR3264_lseek
+#define __NR_sendfile64 __NR3264_sendfile
+#define __NR_fstatat64 __NR3264_fstatat
+#define __NR_fstat64 __NR3264_fstat
+#define __NR_mmap2 __NR3264_mmap
+#define __NR_fadvise64_64 __NR3264_fadvise64
+#ifdef __NR3264_stat
+#define __NR_stat64 __NR3264_stat
+#define __NR_lstat64 __NR3264_lstat
+#endif
+#endif
index 6fdff5945c8a08f27af713f6b59cb27b315da447..9c660e1688abe1cd6bf0e22bf709515e8a463e0d 100644 (file)
@@ -680,6 +680,13 @@ struct drm_get_cap {
  */
 #define DRM_CLIENT_CAP_ATOMIC  3
 
+/**
+ * DRM_CLIENT_CAP_ASPECT_RATIO
+ *
+ * If set to 1, the DRM core will provide aspect ratio information in modes.
+ */
+#define DRM_CLIENT_CAP_ASPECT_RATIO    4
+
 /** DRM_IOCTL_SET_CLIENT_CAP ioctl argument type */
 struct drm_set_client_cap {
        __u64 capability;
index e0b06784f2279d9f428da4a0bce526721a452570..b7db3261c62d124760e98d9c851c1b01e64bdb03 100644 (file)
@@ -1857,7 +1857,8 @@ union bpf_attr {
  *             is resolved), the nexthop address is returned in ipv4_dst
  *             or ipv6_dst based on family, smac is set to mac address of
  *             egress device, dmac is set to nexthop mac address, rt_metric
- *             is set to metric from route (IPv4/IPv6 only).
+ *             is set to metric from route (IPv4/IPv6 only), and ifindex
+ *             is set to the device index of the nexthop from the FIB lookup.
  *
  *             *plen* argument is the size of the passed in struct.
  *             *flags* argument can be a combination of one or more of the
@@ -1873,9 +1874,10 @@ union bpf_attr {
  *             *ctx* is either **struct xdp_md** for XDP programs or
  *             **struct sk_buff** tc cls_act programs.
  *     Return
- *             Egress device index on success, 0 if packet needs to continue
- *             up the stack for further processing or a negative error in case
- *             of failure.
+ *             * < 0 if any input argument is invalid
+ *             *   0 on success (packet is forwarded, nexthop neighbor exists)
+ *             * > 0 one of **BPF_FIB_LKUP_RET_** codes explaining why the
+ *             *     packet is not forwarded or needs assist from full stack
  *
  * int bpf_sock_hash_update(struct bpf_sock_ops_kern *skops, struct bpf_map *map, void *key, u64 flags)
  *     Description
@@ -2612,6 +2614,18 @@ struct bpf_raw_tracepoint_args {
 #define BPF_FIB_LOOKUP_DIRECT  BIT(0)
 #define BPF_FIB_LOOKUP_OUTPUT  BIT(1)
 
+enum {
+       BPF_FIB_LKUP_RET_SUCCESS,      /* lookup successful */
+       BPF_FIB_LKUP_RET_BLACKHOLE,    /* dest is blackholed; can be dropped */
+       BPF_FIB_LKUP_RET_UNREACHABLE,  /* dest is unreachable; can be dropped */
+       BPF_FIB_LKUP_RET_PROHIBIT,     /* dest not allowed; can be dropped */
+       BPF_FIB_LKUP_RET_NOT_FWDED,    /* packet is not forwarded */
+       BPF_FIB_LKUP_RET_FWD_DISABLED, /* fwding is not enabled on ingress */
+       BPF_FIB_LKUP_RET_UNSUPP_LWT,   /* fwd requires encapsulation */
+       BPF_FIB_LKUP_RET_NO_NEIGH,     /* no neighbor entry for nh */
+       BPF_FIB_LKUP_RET_FRAG_NEEDED,  /* fragmentation required to fwd */
+};
+
 struct bpf_fib_lookup {
        /* input:  network family for lookup (AF_INET, AF_INET6)
         * output: network family of egress nexthop
@@ -2625,12 +2639,16 @@ struct bpf_fib_lookup {
 
        /* total length of packet from network header - used for MTU check */
        __u16   tot_len;
-       __u32   ifindex;  /* L3 device index for lookup */
+
+       /* input: L3 device index for lookup
+        * output: device index from FIB lookup
+        */
+       __u32   ifindex;
 
        union {
                /* inputs to lookup */
                __u8    tos;            /* AF_INET  */
-               __be32  flowlabel;      /* AF_INET6 */
+               __be32  flowinfo;       /* AF_INET6, flow_label + priority */
 
                /* output: metric of fib result (IPv4/IPv6 only) */
                __u32   rt_metric;
index 0b5ddbe135a47aa7f39b40ca44e665e5757014de..972265f328717b8286edc2fc93c9d2a54ed394f8 100644 (file)
@@ -76,7 +76,7 @@ struct btf_type {
  */
 #define BTF_INT_ENCODING(VAL)  (((VAL) & 0x0f000000) >> 24)
 #define BTF_INT_OFFSET(VAL)    (((VAL  & 0x00ff0000)) >> 16)
-#define BTF_INT_BITS(VAL)      ((VAL)  & 0x0000ffff)
+#define BTF_INT_BITS(VAL)      ((VAL)  & 0x000000ff)
 
 /* Attributes stored in the BTF_INT_ENCODING */
 #define BTF_INT_SIGNED (1 << 0)
index 68699f654118592527096dc26336f57da6a01cdc..cf01b68242448512416c1b1aa25f0904915aad0a 100644 (file)
@@ -333,6 +333,7 @@ enum {
        IFLA_BRPORT_BCAST_FLOOD,
        IFLA_BRPORT_GROUP_FWD_MASK,
        IFLA_BRPORT_NEIGH_SUPPRESS,
+       IFLA_BRPORT_ISOLATED,
        __IFLA_BRPORT_MAX
 };
 #define IFLA_BRPORT_MAX (__IFLA_BRPORT_MAX - 1)
@@ -516,6 +517,7 @@ enum {
        IFLA_VXLAN_COLLECT_METADATA,
        IFLA_VXLAN_LABEL,
        IFLA_VXLAN_GPE,
+       IFLA_VXLAN_TTL_INHERIT,
        __IFLA_VXLAN_MAX
 };
 #define IFLA_VXLAN_MAX (__IFLA_VXLAN_MAX - 1)
diff --git a/tools/include/uapi/linux/in.h b/tools/include/uapi/linux/in.h
new file mode 100644 (file)
index 0000000..48e8a22
--- /dev/null
@@ -0,0 +1,301 @@
+/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
+/*
+ * INET                An implementation of the TCP/IP protocol suite for the LINUX
+ *             operating system.  INET is implemented using the  BSD Socket
+ *             interface as the means of communication with the user level.
+ *
+ *             Definitions of the Internet Protocol.
+ *
+ * Version:    @(#)in.h        1.0.1   04/21/93
+ *
+ * Authors:    Original taken from the GNU Project <netinet/in.h> file.
+ *             Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ *
+ *             This program is free software; you can redistribute it and/or
+ *             modify it under the terms of the GNU General Public License
+ *             as published by the Free Software Foundation; either version
+ *             2 of the License, or (at your option) any later version.
+ */
+#ifndef _UAPI_LINUX_IN_H
+#define _UAPI_LINUX_IN_H
+
+#include <linux/types.h>
+#include <linux/libc-compat.h>
+#include <linux/socket.h>
+
+#if __UAPI_DEF_IN_IPPROTO
+/* Standard well-defined IP protocols.  */
+enum {
+  IPPROTO_IP = 0,              /* Dummy protocol for TCP               */
+#define IPPROTO_IP             IPPROTO_IP
+  IPPROTO_ICMP = 1,            /* Internet Control Message Protocol    */
+#define IPPROTO_ICMP           IPPROTO_ICMP
+  IPPROTO_IGMP = 2,            /* Internet Group Management Protocol   */
+#define IPPROTO_IGMP           IPPROTO_IGMP
+  IPPROTO_IPIP = 4,            /* IPIP tunnels (older KA9Q tunnels use 94) */
+#define IPPROTO_IPIP           IPPROTO_IPIP
+  IPPROTO_TCP = 6,             /* Transmission Control Protocol        */
+#define IPPROTO_TCP            IPPROTO_TCP
+  IPPROTO_EGP = 8,             /* Exterior Gateway Protocol            */
+#define IPPROTO_EGP            IPPROTO_EGP
+  IPPROTO_PUP = 12,            /* PUP protocol                         */
+#define IPPROTO_PUP            IPPROTO_PUP
+  IPPROTO_UDP = 17,            /* User Datagram Protocol               */
+#define IPPROTO_UDP            IPPROTO_UDP
+  IPPROTO_IDP = 22,            /* XNS IDP protocol                     */
+#define IPPROTO_IDP            IPPROTO_IDP
+  IPPROTO_TP = 29,             /* SO Transport Protocol Class 4        */
+#define IPPROTO_TP             IPPROTO_TP
+  IPPROTO_DCCP = 33,           /* Datagram Congestion Control Protocol */
+#define IPPROTO_DCCP           IPPROTO_DCCP
+  IPPROTO_IPV6 = 41,           /* IPv6-in-IPv4 tunnelling              */
+#define IPPROTO_IPV6           IPPROTO_IPV6
+  IPPROTO_RSVP = 46,           /* RSVP Protocol                        */
+#define IPPROTO_RSVP           IPPROTO_RSVP
+  IPPROTO_GRE = 47,            /* Cisco GRE tunnels (rfc 1701,1702)    */
+#define IPPROTO_GRE            IPPROTO_GRE
+  IPPROTO_ESP = 50,            /* Encapsulation Security Payload protocol */
+#define IPPROTO_ESP            IPPROTO_ESP
+  IPPROTO_AH = 51,             /* Authentication Header protocol       */
+#define IPPROTO_AH             IPPROTO_AH
+  IPPROTO_MTP = 92,            /* Multicast Transport Protocol         */
+#define IPPROTO_MTP            IPPROTO_MTP
+  IPPROTO_BEETPH = 94,         /* IP option pseudo header for BEET     */
+#define IPPROTO_BEETPH         IPPROTO_BEETPH
+  IPPROTO_ENCAP = 98,          /* Encapsulation Header                 */
+#define IPPROTO_ENCAP          IPPROTO_ENCAP
+  IPPROTO_PIM = 103,           /* Protocol Independent Multicast       */
+#define IPPROTO_PIM            IPPROTO_PIM
+  IPPROTO_COMP = 108,          /* Compression Header Protocol          */
+#define IPPROTO_COMP           IPPROTO_COMP
+  IPPROTO_SCTP = 132,          /* Stream Control Transport Protocol    */
+#define IPPROTO_SCTP           IPPROTO_SCTP
+  IPPROTO_UDPLITE = 136,       /* UDP-Lite (RFC 3828)                  */
+#define IPPROTO_UDPLITE                IPPROTO_UDPLITE
+  IPPROTO_MPLS = 137,          /* MPLS in IP (RFC 4023)                */
+#define IPPROTO_MPLS           IPPROTO_MPLS
+  IPPROTO_RAW = 255,           /* Raw IP packets                       */
+#define IPPROTO_RAW            IPPROTO_RAW
+  IPPROTO_MAX
+};
+#endif
+
+#if __UAPI_DEF_IN_ADDR
+/* Internet address. */
+struct in_addr {
+       __be32  s_addr;
+};
+#endif
+
+#define IP_TOS         1
+#define IP_TTL         2
+#define IP_HDRINCL     3
+#define IP_OPTIONS     4
+#define IP_ROUTER_ALERT        5
+#define IP_RECVOPTS    6
+#define IP_RETOPTS     7
+#define IP_PKTINFO     8
+#define IP_PKTOPTIONS  9
+#define IP_MTU_DISCOVER        10
+#define IP_RECVERR     11
+#define IP_RECVTTL     12
+#define        IP_RECVTOS      13
+#define IP_MTU         14
+#define IP_FREEBIND    15
+#define IP_IPSEC_POLICY        16
+#define IP_XFRM_POLICY 17
+#define IP_PASSSEC     18
+#define IP_TRANSPARENT 19
+
+/* BSD compatibility */
+#define IP_RECVRETOPTS IP_RETOPTS
+
+/* TProxy original addresses */
+#define IP_ORIGDSTADDR       20
+#define IP_RECVORIGDSTADDR   IP_ORIGDSTADDR
+
+#define IP_MINTTL       21
+#define IP_NODEFRAG     22
+#define IP_CHECKSUM    23
+#define IP_BIND_ADDRESS_NO_PORT        24
+#define IP_RECVFRAGSIZE        25
+
+/* IP_MTU_DISCOVER values */
+#define IP_PMTUDISC_DONT               0       /* Never send DF frames */
+#define IP_PMTUDISC_WANT               1       /* Use per route hints  */
+#define IP_PMTUDISC_DO                 2       /* Always DF            */
+#define IP_PMTUDISC_PROBE              3       /* Ignore dst pmtu      */
+/* Always use interface mtu (ignores dst pmtu) but don't set DF flag.
+ * Also incoming ICMP frag_needed notifications will be ignored on
+ * this socket to prevent accepting spoofed ones.
+ */
+#define IP_PMTUDISC_INTERFACE          4
+/* weaker version of IP_PMTUDISC_INTERFACE, which allos packets to get
+ * fragmented if they exeed the interface mtu
+ */
+#define IP_PMTUDISC_OMIT               5
+
+#define IP_MULTICAST_IF                        32
+#define IP_MULTICAST_TTL               33
+#define IP_MULTICAST_LOOP              34
+#define IP_ADD_MEMBERSHIP              35
+#define IP_DROP_MEMBERSHIP             36
+#define IP_UNBLOCK_SOURCE              37
+#define IP_BLOCK_SOURCE                        38
+#define IP_ADD_SOURCE_MEMBERSHIP       39
+#define IP_DROP_SOURCE_MEMBERSHIP      40
+#define IP_MSFILTER                    41
+#define MCAST_JOIN_GROUP               42
+#define MCAST_BLOCK_SOURCE             43
+#define MCAST_UNBLOCK_SOURCE           44
+#define MCAST_LEAVE_GROUP              45
+#define MCAST_JOIN_SOURCE_GROUP                46
+#define MCAST_LEAVE_SOURCE_GROUP       47
+#define MCAST_MSFILTER                 48
+#define IP_MULTICAST_ALL               49
+#define IP_UNICAST_IF                  50
+
+#define MCAST_EXCLUDE  0
+#define MCAST_INCLUDE  1
+
+/* These need to appear somewhere around here */
+#define IP_DEFAULT_MULTICAST_TTL        1
+#define IP_DEFAULT_MULTICAST_LOOP       1
+
+/* Request struct for multicast socket ops */
+
+#if __UAPI_DEF_IP_MREQ
+struct ip_mreq  {
+       struct in_addr imr_multiaddr;   /* IP multicast address of group */
+       struct in_addr imr_interface;   /* local IP address of interface */
+};
+
+struct ip_mreqn {
+       struct in_addr  imr_multiaddr;          /* IP multicast address of group */
+       struct in_addr  imr_address;            /* local IP address of interface */
+       int             imr_ifindex;            /* Interface index */
+};
+
+struct ip_mreq_source {
+       __be32          imr_multiaddr;
+       __be32          imr_interface;
+       __be32          imr_sourceaddr;
+};
+
+struct ip_msfilter {
+       __be32          imsf_multiaddr;
+       __be32          imsf_interface;
+       __u32           imsf_fmode;
+       __u32           imsf_numsrc;
+       __be32          imsf_slist[1];
+};
+
+#define IP_MSFILTER_SIZE(numsrc) \
+       (sizeof(struct ip_msfilter) - sizeof(__u32) \
+       + (numsrc) * sizeof(__u32))
+
+struct group_req {
+       __u32                            gr_interface;  /* interface index */
+       struct __kernel_sockaddr_storage gr_group;      /* group address */
+};
+
+struct group_source_req {
+       __u32                            gsr_interface; /* interface index */
+       struct __kernel_sockaddr_storage gsr_group;     /* group address */
+       struct __kernel_sockaddr_storage gsr_source;    /* source address */
+};
+
+struct group_filter {
+       __u32                            gf_interface;  /* interface index */
+       struct __kernel_sockaddr_storage gf_group;      /* multicast address */
+       __u32                            gf_fmode;      /* filter mode */
+       __u32                            gf_numsrc;     /* number of sources */
+       struct __kernel_sockaddr_storage gf_slist[1];   /* interface index */
+};
+
+#define GROUP_FILTER_SIZE(numsrc) \
+       (sizeof(struct group_filter) - sizeof(struct __kernel_sockaddr_storage) \
+       + (numsrc) * sizeof(struct __kernel_sockaddr_storage))
+#endif
+
+#if __UAPI_DEF_IN_PKTINFO
+struct in_pktinfo {
+       int             ipi_ifindex;
+       struct in_addr  ipi_spec_dst;
+       struct in_addr  ipi_addr;
+};
+#endif
+
+/* Structure describing an Internet (IP) socket address. */
+#if  __UAPI_DEF_SOCKADDR_IN
+#define __SOCK_SIZE__  16              /* sizeof(struct sockaddr)      */
+struct sockaddr_in {
+  __kernel_sa_family_t sin_family;     /* Address family               */
+  __be16               sin_port;       /* Port number                  */
+  struct in_addr       sin_addr;       /* Internet address             */
+
+  /* Pad to size of `struct sockaddr'. */
+  unsigned char                __pad[__SOCK_SIZE__ - sizeof(short int) -
+                       sizeof(unsigned short int) - sizeof(struct in_addr)];
+};
+#define sin_zero       __pad           /* for BSD UNIX comp. -FvK      */
+#endif
+
+#if __UAPI_DEF_IN_CLASS
+/*
+ * Definitions of the bits in an Internet address integer.
+ * On subnets, host and network parts are found according
+ * to the subnet mask, not these masks.
+ */
+#define        IN_CLASSA(a)            ((((long int) (a)) & 0x80000000) == 0)
+#define        IN_CLASSA_NET           0xff000000
+#define        IN_CLASSA_NSHIFT        24
+#define        IN_CLASSA_HOST          (0xffffffff & ~IN_CLASSA_NET)
+#define        IN_CLASSA_MAX           128
+
+#define        IN_CLASSB(a)            ((((long int) (a)) & 0xc0000000) == 0x80000000)
+#define        IN_CLASSB_NET           0xffff0000
+#define        IN_CLASSB_NSHIFT        16
+#define        IN_CLASSB_HOST          (0xffffffff & ~IN_CLASSB_NET)
+#define        IN_CLASSB_MAX           65536
+
+#define        IN_CLASSC(a)            ((((long int) (a)) & 0xe0000000) == 0xc0000000)
+#define        IN_CLASSC_NET           0xffffff00
+#define        IN_CLASSC_NSHIFT        8
+#define        IN_CLASSC_HOST          (0xffffffff & ~IN_CLASSC_NET)
+
+#define        IN_CLASSD(a)            ((((long int) (a)) & 0xf0000000) == 0xe0000000)
+#define        IN_MULTICAST(a)         IN_CLASSD(a)
+#define IN_MULTICAST_NET       0xF0000000
+
+#define        IN_EXPERIMENTAL(a)      ((((long int) (a)) & 0xf0000000) == 0xf0000000)
+#define        IN_BADCLASS(a)          IN_EXPERIMENTAL((a))
+
+/* Address to accept any incoming messages. */
+#define        INADDR_ANY              ((unsigned long int) 0x00000000)
+
+/* Address to send to all hosts. */
+#define        INADDR_BROADCAST        ((unsigned long int) 0xffffffff)
+
+/* Address indicating an error return. */
+#define        INADDR_NONE             ((unsigned long int) 0xffffffff)
+
+/* Network number for local host loopback. */
+#define        IN_LOOPBACKNET          127
+
+/* Address to loopback in software to local host.  */
+#define        INADDR_LOOPBACK         0x7f000001      /* 127.0.0.1   */
+#define        IN_LOOPBACK(a)          ((((long int) (a)) & 0xff000000) == 0x7f000000)
+
+/* Defines for Multicast INADDR */
+#define INADDR_UNSPEC_GROUP    0xe0000000U     /* 224.0.0.0   */
+#define INADDR_ALLHOSTS_GROUP  0xe0000001U     /* 224.0.0.1   */
+#define INADDR_ALLRTRS_GROUP    0xe0000002U    /* 224.0.0.2 */
+#define INADDR_MAX_LOCAL_GROUP  0xe00000ffU    /* 224.0.0.255 */
+#endif
+
+/* <asm/byteorder.h> contains the htonl type stuff.. */
+#include <asm/byteorder.h> 
+
+
+#endif /* _UAPI_LINUX_IN_H */
index 39e364c70caf780312808e179a1bb234aa45460e..b6270a3b38e9f3fb410e8c80d8658b2c01a8ef96 100644 (file)
@@ -948,6 +948,7 @@ struct kvm_ppc_resize_hpt {
 #define KVM_CAP_S390_BPB 152
 #define KVM_CAP_GET_MSR_FEATURES 153
 #define KVM_CAP_HYPERV_EVENTFD 154
+#define KVM_CAP_HYPERV_TLBFLUSH 155
 
 #ifdef KVM_CAP_IRQ_ROUTING
 
index b8e288a1f7409012d50e464e7993b96d4c404610..eeb787b1c53c72771c8d684154b7a87dc029a45b 100644 (file)
@@ -143,6 +143,8 @@ enum perf_event_sample_format {
        PERF_SAMPLE_PHYS_ADDR                   = 1U << 19,
 
        PERF_SAMPLE_MAX = 1U << 20,             /* non-ABI */
+
+       __PERF_SAMPLE_CALLCHAIN_EARLY           = 1ULL << 63,
 };
 
 /*
index 8c54a4b6f187539cfe21d7a93e5748cab8b4ad59..c36a3a76986a5ee17ad51a876af10ddb06492fba 100644 (file)
@@ -1,8 +1,7 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+// SPDX-License-Identifier: LGPL-2.1
 /* Copyright (c) 2018 Facebook */
 
 #include <stdlib.h>
-#include <stdint.h>
 #include <string.h>
 #include <unistd.h>
 #include <errno.h>
@@ -27,13 +26,13 @@ struct btf {
        struct btf_type **types;
        const char *strings;
        void *nohdr_data;
-       uint32_t nr_types;
-       uint32_t types_size;
-       uint32_t data_size;
+       __u32 nr_types;
+       __u32 types_size;
+       __u32 data_size;
        int fd;
 };
 
-static const char *btf_name_by_offset(const struct btf *btf, uint32_t offset)
+static const char *btf_name_by_offset(const struct btf *btf, __u32 offset)
 {
        if (offset < btf->hdr->str_len)
                return &btf->strings[offset];
@@ -45,7 +44,7 @@ static int btf_add_type(struct btf *btf, struct btf_type *t)
 {
        if (btf->types_size - btf->nr_types < 2) {
                struct btf_type **new_types;
-               u32 expand_by, new_size;
+               __u32 expand_by, new_size;
 
                if (btf->types_size == BTF_MAX_NR_TYPES)
                        return -E2BIG;
@@ -72,7 +71,7 @@ static int btf_add_type(struct btf *btf, struct btf_type *t)
 static int btf_parse_hdr(struct btf *btf, btf_print_fn_t err_log)
 {
        const struct btf_header *hdr = btf->hdr;
-       u32 meta_left;
+       __u32 meta_left;
 
        if (btf->data_size < sizeof(struct btf_header)) {
                elog("BTF header not found\n");
@@ -151,7 +150,7 @@ static int btf_parse_type_sec(struct btf *btf, btf_print_fn_t err_log)
 
        while (next_type < end_type) {
                struct btf_type *t = next_type;
-               uint16_t vlen = BTF_INFO_VLEN(t->info);
+               __u16 vlen = BTF_INFO_VLEN(t->info);
                int err;
 
                next_type += sizeof(*t);
@@ -190,8 +189,7 @@ static int btf_parse_type_sec(struct btf *btf, btf_print_fn_t err_log)
        return 0;
 }
 
-static const struct btf_type *btf_type_by_id(const struct btf *btf,
-                                            uint32_t type_id)
+const struct btf_type *btf__type_by_id(const struct btf *btf, __u32 type_id)
 {
        if (type_id > btf->nr_types)
                return NULL;
@@ -209,7 +207,7 @@ static bool btf_type_is_void_or_null(const struct btf_type *t)
        return !t || btf_type_is_void(t);
 }
 
-static int64_t btf_type_size(const struct btf_type *t)
+static __s64 btf_type_size(const struct btf_type *t)
 {
        switch (BTF_INFO_KIND(t->info)) {
        case BTF_KIND_INT:
@@ -226,15 +224,15 @@ static int64_t btf_type_size(const struct btf_type *t)
 
 #define MAX_RESOLVE_DEPTH 32
 
-int64_t btf__resolve_size(const struct btf *btf, uint32_t type_id)
+__s64 btf__resolve_size(const struct btf *btf, __u32 type_id)
 {
        const struct btf_array *array;
        const struct btf_type *t;
-       uint32_t nelems = 1;
-       int64_t size = -1;
+       __u32 nelems = 1;
+       __s64 size = -1;
        int i;
 
-       t = btf_type_by_id(btf, type_id);
+       t = btf__type_by_id(btf, type_id);
        for (i = 0; i < MAX_RESOLVE_DEPTH && !btf_type_is_void_or_null(t);
             i++) {
                size = btf_type_size(t);
@@ -259,7 +257,7 @@ int64_t btf__resolve_size(const struct btf *btf, uint32_t type_id)
                        return -EINVAL;
                }
 
-               t = btf_type_by_id(btf, type_id);
+               t = btf__type_by_id(btf, type_id);
        }
 
        if (size < 0)
@@ -271,9 +269,9 @@ int64_t btf__resolve_size(const struct btf *btf, uint32_t type_id)
        return nelems * size;
 }
 
-int32_t btf__find_by_name(const struct btf *btf, const char *type_name)
+__s32 btf__find_by_name(const struct btf *btf, const char *type_name)
 {
-       uint32_t i;
+       __u32 i;
 
        if (!strcmp(type_name, "void"))
                return 0;
@@ -302,10 +300,9 @@ void btf__free(struct btf *btf)
        free(btf);
 }
 
-struct btf *btf__new(uint8_t *data, uint32_t size,
-                    btf_print_fn_t err_log)
+struct btf *btf__new(__u8 *data, __u32 size, btf_print_fn_t err_log)
 {
-       uint32_t log_buf_size = 0;
+       __u32 log_buf_size = 0;
        char *log_buf = NULL;
        struct btf *btf;
        int err;
index 74bb344035bb9cb3d5a4391f7d80a21ee05fb064..caac3a404dc54221b46b2c4feb373801c311bddf 100644 (file)
@@ -1,22 +1,24 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: LGPL-2.1 */
 /* Copyright (c) 2018 Facebook */
 
 #ifndef __BPF_BTF_H
 #define __BPF_BTF_H
 
-#include <stdint.h>
+#include <linux/types.h>
 
 #define BTF_ELF_SEC ".BTF"
 
 struct btf;
+struct btf_type;
 
 typedef int (*btf_print_fn_t)(const char *, ...)
        __attribute__((format(printf, 1, 2)));
 
 void btf__free(struct btf *btf);
-struct btf *btf__new(uint8_t *data, uint32_t size, btf_print_fn_t err_log);
-int32_t btf__find_by_name(const struct btf *btf, const char *type_name);
-int64_t btf__resolve_size(const struct btf *btf, uint32_t type_id);
+struct btf *btf__new(__u8 *data, __u32 size, btf_print_fn_t err_log);
+__s32 btf__find_by_name(const struct btf *btf, const char *type_name);
+const struct btf_type *btf__type_by_id(const struct btf *btf, __u32 id);
+__s64 btf__resolve_size(const struct btf *btf, __u32 type_id);
 int btf__fd(const struct btf *btf);
 
 #endif
index a1e96b5de5ff88c13926fdb28a70150f821ec694..1aafdbe827fedce6434a372041d2df43385e549a 100644 (file)
@@ -36,6 +36,7 @@
 #include <linux/err.h>
 #include <linux/kernel.h>
 #include <linux/bpf.h>
+#include <linux/btf.h>
 #include <linux/list.h>
 #include <linux/limits.h>
 #include <sys/stat.h>
@@ -216,8 +217,8 @@ struct bpf_map {
        size_t offset;
        int map_ifindex;
        struct bpf_map_def def;
-       uint32_t btf_key_type_id;
-       uint32_t btf_value_type_id;
+       __u32 btf_key_type_id;
+       __u32 btf_value_type_id;
        void *priv;
        bpf_map_clear_priv_t clear_priv;
 };
@@ -1014,68 +1015,72 @@ bpf_program__collect_reloc(struct bpf_program *prog, GElf_Shdr *shdr,
 
 static int bpf_map_find_btf_info(struct bpf_map *map, const struct btf *btf)
 {
+       const struct btf_type *container_type;
+       const struct btf_member *key, *value;
        struct bpf_map_def *def = &map->def;
        const size_t max_name = 256;
-       int64_t key_size, value_size;
-       int32_t key_id, value_id;
-       char name[max_name];
+       char container_name[max_name];
+       __s64 key_size, value_size;
+       __s32 container_id;
 
-       /* Find key type by name from BTF */
-       if (snprintf(name, max_name, "%s_key", map->name) == max_name) {
-               pr_warning("map:%s length of BTF key_type:%s_key is too long\n",
+       if (snprintf(container_name, max_name, "____btf_map_%s", map->name) ==
+           max_name) {
+               pr_warning("map:%s length of '____btf_map_%s' is too long\n",
                           map->name, map->name);
                return -EINVAL;
        }
 
-       key_id = btf__find_by_name(btf, name);
-       if (key_id < 0) {
-               pr_debug("map:%s key_type:%s cannot be found in BTF\n",
-                        map->name, name);
-               return key_id;
+       container_id = btf__find_by_name(btf, container_name);
+       if (container_id < 0) {
+               pr_debug("map:%s container_name:%s cannot be found in BTF. Missing BPF_ANNOTATE_KV_PAIR?\n",
+                        map->name, container_name);
+               return container_id;
        }
 
-       key_size = btf__resolve_size(btf, key_id);
-       if (key_size < 0) {
-               pr_warning("map:%s key_type:%s cannot get the BTF type_size\n",
-                          map->name, name);
-               return key_size;
+       container_type = btf__type_by_id(btf, container_id);
+       if (!container_type) {
+               pr_warning("map:%s cannot find BTF type for container_id:%u\n",
+                          map->name, container_id);
+               return -EINVAL;
        }
 
-       if (def->key_size != key_size) {
-               pr_warning("map:%s key_type:%s has BTF type_size:%u != key_size:%u\n",
-                          map->name, name, (unsigned int)key_size, def->key_size);
+       if (BTF_INFO_KIND(container_type->info) != BTF_KIND_STRUCT ||
+           BTF_INFO_VLEN(container_type->info) < 2) {
+               pr_warning("map:%s container_name:%s is an invalid container struct\n",
+                          map->name, container_name);
                return -EINVAL;
        }
 
-       /* Find value type from BTF */
-       if (snprintf(name, max_name, "%s_value", map->name) == max_name) {
-               pr_warning("map:%s length of BTF value_type:%s_value is too long\n",
-                         map->name, map->name);
-               return -EINVAL;
+       key = (struct btf_member *)(container_type + 1);
+       value = key + 1;
+
+       key_size = btf__resolve_size(btf, key->type);
+       if (key_size < 0) {
+               pr_warning("map:%s invalid BTF key_type_size\n",
+                          map->name);
+               return key_size;
        }
 
-       value_id = btf__find_by_name(btf, name);
-       if (value_id < 0) {
-               pr_debug("map:%s value_type:%s cannot be found in BTF\n",
-                        map->name, name);
-               return value_id;
+       if (def->key_size != key_size) {
+               pr_warning("map:%s btf_key_type_size:%u != map_def_key_size:%u\n",
+                          map->name, (__u32)key_size, def->key_size);
+               return -EINVAL;
        }
 
-       value_size = btf__resolve_size(btf, value_id);
+       value_size = btf__resolve_size(btf, value->type);
        if (value_size < 0) {
-               pr_warning("map:%s value_type:%s cannot get the BTF type_size\n",
-                          map->name, name);
+               pr_warning("map:%s invalid BTF value_type_size\n", map->name);
                return value_size;
        }
 
        if (def->value_size != value_size) {
-               pr_warning("map:%s value_type:%s has BTF type_size:%u != value_size:%u\n",
-                          map->name, name, (unsigned int)value_size, def->value_size);
+               pr_warning("map:%s btf_value_type_size:%u != map_def_value_size:%u\n",
+                          map->name, (__u32)value_size, def->value_size);
                return -EINVAL;
        }
 
-       map->btf_key_type_id = key_id;
-       map->btf_value_type_id = value_id;
+       map->btf_key_type_id = key->type;
+       map->btf_value_type_id = value->type;
 
        return 0;
 }
@@ -2089,12 +2094,12 @@ const char *bpf_map__name(struct bpf_map *map)
        return map ? map->name : NULL;
 }
 
-uint32_t bpf_map__btf_key_type_id(const struct bpf_map *map)
+__u32 bpf_map__btf_key_type_id(const struct bpf_map *map)
 {
        return map ? map->btf_key_type_id : 0;
 }
 
-uint32_t bpf_map__btf_value_type_id(const struct bpf_map *map)
+__u32 bpf_map__btf_value_type_id(const struct bpf_map *map)
 {
        return map ? map->btf_value_type_id : 0;
 }
@@ -2268,8 +2273,8 @@ bpf_perf_event_read_simple(void *mem, unsigned long size,
        volatile struct perf_event_mmap_page *header = mem;
        __u64 data_tail = header->data_tail;
        __u64 data_head = header->data_head;
+       int ret = LIBBPF_PERF_EVENT_ERROR;
        void *base, *begin, *end;
-       int ret;
 
        asm volatile("" ::: "memory"); /* in real code it should be smp_rmb() */
        if (data_head == data_tail)
index 09976531aa74dc7583a8ad9db422ea9774a2c87f..b33ae02f7d0e4f6c0e301d9f96f33e86a4585b5c 100644 (file)
@@ -244,8 +244,8 @@ bpf_map__next(struct bpf_map *map, struct bpf_object *obj);
 int bpf_map__fd(struct bpf_map *map);
 const struct bpf_map_def *bpf_map__def(struct bpf_map *map);
 const char *bpf_map__name(struct bpf_map *map);
-uint32_t bpf_map__btf_key_type_id(const struct bpf_map *map);
-uint32_t bpf_map__btf_value_type_id(const struct bpf_map *map);
+__u32 bpf_map__btf_key_type_id(const struct bpf_map *map);
+__u32 bpf_map__btf_value_type_id(const struct bpf_map *map);
 
 typedef void (*bpf_map_clear_priv_t)(struct bpf_map *, void *);
 int bpf_map__set_priv(struct bpf_map *map, void *priv,
index 1b09f3175a1fedc910bb73b3e35780556e448874..0cbd1ef8f86d2de87462ab1fcc041291b3d342bc 100644 (file)
@@ -804,7 +804,7 @@ type of fence:
 Second, some types of fence affect the way the memory subsystem
 propagates stores.  When a fence instruction is executed on CPU C:
 
-       For each other CPU C', smb_wmb() forces all po-earlier stores
+       For each other CPU C', smp_wmb() forces all po-earlier stores
        on C to propagate to C' before any po-later stores do.
 
        For each other CPU C', any store which propagates to C before
index ee4309a87fc45b625581a9478e69c547b6e9e1ef..af72700cc20a20ca8d8d2d524c1d777dd9c1e988 100644 (file)
@@ -126,7 +126,7 @@ However, it is not necessarily the case that accesses ordered by
 locking will be seen as ordered by CPUs not holding that lock.
 Consider this example:
 
-       /* See Z6.0+pooncelock+pooncelock+pombonce.litmus. */
+       /* See Z6.0+pooncerelease+poacquirerelease+fencembonceonce.litmus. */
        void CPU0(void)
        {
                spin_lock(&mylock);
@@ -292,7 +292,7 @@ and to use smp_load_acquire() instead of smp_rmb().  However, the older
 smp_wmb() and smp_rmb() APIs are still heavily used, so it is important
 to understand their use cases.  The general approach is shown below:
 
-       /* See MP+wmbonceonce+rmbonceonce.litmus. */
+       /* See MP+fencewmbonceonce+fencermbonceonce.litmus. */
        void CPU0(void)
        {
                WRITE_ONCE(x, 1);
@@ -322,9 +322,9 @@ the following write-side code fragment:
 And the xlog_valid_lsn() function in fs/xfs/xfs_log_priv.h contains
 the corresponding read-side code fragment:
 
-       cur_cycle = ACCESS_ONCE(log->l_curr_cycle);
+       cur_cycle = READ_ONCE(log->l_curr_cycle);
        smp_rmb();
-       cur_block = ACCESS_ONCE(log->l_curr_block);
+       cur_block = READ_ONCE(log->l_curr_block);
 
 Alternatively, consider the following comment in function
 perf_output_put_handle() in kernel/events/ring_buffer.c:
@@ -360,7 +360,7 @@ can be seen in the LB+poonceonces.litmus litmus test.
 One way of avoiding the counter-intuitive outcome is through the use of a
 control dependency paired with a full memory barrier:
 
-       /* See LB+ctrlonceonce+mbonceonce.litmus. */
+       /* See LB+fencembonceonce+ctrlonceonce.litmus. */
        void CPU0(void)
        {
                r0 = READ_ONCE(x);
@@ -476,7 +476,7 @@ that one CPU first stores to one variable and then loads from a second,
 while another CPU stores to the second variable and then loads from the
 first.  Preserving order requires nothing less than full barriers:
 
-       /* See SB+mbonceonces.litmus. */
+       /* See SB+fencembonceonces.litmus. */
        void CPU0(void)
        {
                WRITE_ONCE(x, 1);
index 734f7feaa5dc59f294bc351416aa370024d4818c..ee987ce20aaec7376fe9417354b27e27356f3c43 100644 (file)
@@ -35,13 +35,13 @@ BASIC USAGE: HERD7
 The memory model is used, in conjunction with "herd7", to exhaustively
 explore the state space of small litmus tests.
 
-For example, to run SB+mbonceonces.litmus against the memory model:
+For example, to run SB+fencembonceonces.litmus against the memory model:
 
-  $ herd7 -conf linux-kernel.cfg litmus-tests/SB+mbonceonces.litmus
+  $ herd7 -conf linux-kernel.cfg litmus-tests/SB+fencembonceonces.litmus
 
 Here is the corresponding output:
 
-  Test SB+mbonceonces Allowed
+  Test SB+fencembonceonces Allowed
   States 3
   0:r0=0; 1:r0=1;
   0:r0=1; 1:r0=0;
@@ -50,8 +50,8 @@ Here is the corresponding output:
   Witnesses
   Positive: 0 Negative: 3
   Condition exists (0:r0=0 /\ 1:r0=0)
-  Observation SB+mbonceonces Never 0 3
-  Time SB+mbonceonces 0.01
+  Observation SB+fencembonceonces Never 0 3
+  Time SB+fencembonceonces 0.01
   Hash=d66d99523e2cac6b06e66f4c995ebb48
 
 The "Positive: 0 Negative: 3" and the "Never 0 3" each indicate that
@@ -67,16 +67,16 @@ BASIC USAGE: KLITMUS7
 The "klitmus7" tool converts a litmus test into a Linux kernel module,
 which may then be loaded and run.
 
-For example, to run SB+mbonceonces.litmus against hardware:
+For example, to run SB+fencembonceonces.litmus against hardware:
 
   $ mkdir mymodules
-  $ klitmus7 -o mymodules litmus-tests/SB+mbonceonces.litmus
+  $ klitmus7 -o mymodules litmus-tests/SB+fencembonceonces.litmus
   $ cd mymodules ; make
   $ sudo sh run.sh
 
 The corresponding output includes:
 
-  Test SB+mbonceonces Allowed
+  Test SB+fencembonceonces Allowed
   Histogram (3 states)
   644580  :>0:r0=1; 1:r0=0;
   644328  :>0:r0=0; 1:r0=1;
@@ -86,8 +86,8 @@ The corresponding output includes:
   Positive: 0, Negative: 2000000
   Condition exists (0:r0=0 /\ 1:r0=0) is NOT validated
   Hash=d66d99523e2cac6b06e66f4c995ebb48
-  Observation SB+mbonceonces Never 0 2000000
-  Time SB+mbonceonces 0.16
+  Observation SB+fencembonceonces Never 0 2000000
+  Time SB+fencembonceonces 0.16
 
 The "Positive: 0 Negative: 2000000" and the "Never 0 2000000" indicate
 that during two million trials, the state specified in this litmus
index 64f5740e0e7515ea611586422d262c3c1da2aad3..b84fb2f67109e03c2e32558599dbf48357d6863e 100644 (file)
@@ -13,7 +13,7 @@
 
 "Linux-kernel memory consistency model"
 
-enum Accesses = 'once (*READ_ONCE,WRITE_ONCE,ACCESS_ONCE*) ||
+enum Accesses = 'once (*READ_ONCE,WRITE_ONCE*) ||
                'release (*smp_store_release*) ||
                'acquire (*smp_load_acquire*) ||
                'noreturn (* R of non-return RMW *)
similarity index 95%
rename from tools/memory-model/litmus-tests/IRIW+mbonceonces+OnceOnce.litmus
rename to tools/memory-model/litmus-tests/IRIW+fencembonceonces+OnceOnce.litmus
index 98a3716efa37ecc9ba4fd59e8560519ef33fab0a..e729d2776e89a7854bc8d2dfc8a856213e4b6a35 100644 (file)
@@ -1,4 +1,4 @@
-C IRIW+mbonceonces+OnceOnce
+C IRIW+fencembonceonces+OnceOnce
 
 (*
  * Result: Never
index 7a39a0aaa976cbf9dc2a5d05be7924dc46fc4d9c..0f749e419b34351749c44eddf35c62f34d024011 100644 (file)
@@ -1,4 +1,4 @@
-C ISA2+pooncelock+pooncelock+pombonce.litmus
+C ISA2+pooncelock+pooncelock+pombonce
 
 (*
  * Result: Sometimes
similarity index 95%
rename from tools/memory-model/litmus-tests/LB+ctrlonceonce+mbonceonce.litmus
rename to tools/memory-model/litmus-tests/LB+fencembonceonce+ctrlonceonce.litmus
index de6708229dd11b075a95c3aaa938675b340c3005..4727f5aaf03b05b1c98a6d2d53ded40c8d7023e4 100644 (file)
@@ -1,4 +1,4 @@
-C LB+ctrlonceonce+mbonceonce
+C LB+fencembonceonce+ctrlonceonce
 
 (*
  * Result: Never
similarity index 95%
rename from tools/memory-model/litmus-tests/R+mbonceonces.litmus
rename to tools/memory-model/litmus-tests/R+fencembonceonces.litmus
index a0e884ad213210bd0bf149d3a9a9c08805e4cfa6..222a0b850b4a5d98d087c2d94ab5158e928c43ae 100644 (file)
@@ -1,4 +1,4 @@
-C R+mbonceonces
+C R+fencembonceonces
 
 (*
  * Result: Never
index 17eb9a8c222dae122fa22b925b7af521190728f4..4581ec2d3c575fcd2439b01a1144ba2c868ccae0 100644 (file)
@@ -18,7 +18,7 @@ CoWW+poonceonce.litmus
        Test of write-write coherence, that is, whether or not two
        successive writes to the same variable are ordered.
 
-IRIW+mbonceonces+OnceOnce.litmus
+IRIW+fencembonceonces+OnceOnce.litmus
        Test of independent reads from independent writes with smp_mb()
        between each pairs of reads.  In other words, is smp_mb()
        sufficient to cause two different reading processes to agree on
@@ -47,7 +47,7 @@ ISA2+pooncerelease+poacquirerelease+poacquireonce.litmus
        Can a release-acquire chain order a prior store against
        a later load?
 
-LB+ctrlonceonce+mbonceonce.litmus
+LB+fencembonceonce+ctrlonceonce.litmus
        Does a control dependency and an smp_mb() suffice for the
        load-buffering litmus test, where each process reads from one
        of two variables then writes to the other?
@@ -88,14 +88,14 @@ MP+porevlocks.litmus
        As below, but with the first access of the writer process
        and the second access of reader process protected by a lock.
 
-MP+wmbonceonce+rmbonceonce.litmus
+MP+fencewmbonceonce+fencermbonceonce.litmus
        Does a smp_wmb() (between the stores) and an smp_rmb() (between
        the loads) suffice for the message-passing litmus test, where one
        process writes data and then a flag, and the other process reads
        the flag and then the data.  (This is similar to the ISA2 tests,
        but with two processes instead of three.)
 
-R+mbonceonces.litmus
+R+fencembonceonces.litmus
        This is the fully ordered (via smp_mb()) version of one of
        the classic counterintuitive litmus tests that illustrates the
        effects of store propagation delays.
@@ -103,7 +103,7 @@ R+mbonceonces.litmus
 R+poonceonces.litmus
        As above, but without the smp_mb() invocations.
 
-SB+mbonceonces.litmus
+SB+fencembonceonces.litmus
        This is the fully ordered (again, via smp_mb() version of store
        buffering, which forms the core of Dekker's mutual-exclusion
        algorithm.
@@ -111,15 +111,24 @@ SB+mbonceonces.litmus
 SB+poonceonces.litmus
        As above, but without the smp_mb() invocations.
 
+SB+rfionceonce-poonceonces.litmus
+       This litmus test demonstrates that LKMM is not fully multicopy
+       atomic.  (Neither is it other multicopy atomic.)  This litmus test
+       also demonstrates the "locations" debugging aid, which designates
+       additional registers and locations to be printed out in the dump
+       of final states in the herd7 output.  Without the "locations"
+       statement, only those registers and locations mentioned in the
+       "exists" clause will be printed.
+
 S+poonceonces.litmus
        As below, but without the smp_wmb() and acquire load.
 
-S+wmbonceonce+poacquireonce.litmus
+S+fencewmbonceonce+poacquireonce.litmus
        Can a smp_wmb(), instead of a release, and an acquire order
        a prior store against a subsequent store?
 
 WRC+poonceonces+Once.litmus
-WRC+pooncerelease+rmbonceonce+Once.litmus
+WRC+pooncerelease+fencermbonceonce+Once.litmus
        These two are members of an extension of the MP litmus-test
        class in which the first write is moved to a separate process.
        The second is forbidden because smp_store_release() is
@@ -134,7 +143,7 @@ Z6.0+pooncelock+poonceLock+pombonce.litmus
        As above, but with smp_mb__after_spinlock() immediately
        following the spin_lock().
 
-Z6.0+pooncerelease+poacquirerelease+mbonceonce.litmus
+Z6.0+pooncerelease+poacquirerelease+fencembonceonce.litmus
        Is the ordering provided by a release-acquire chain sufficient
        to make ordering apparent to accesses by a process that does
        not participate in that release-acquire chain?
similarity index 90%
rename from tools/memory-model/litmus-tests/S+wmbonceonce+poacquireonce.litmus
rename to tools/memory-model/litmus-tests/S+fencewmbonceonce+poacquireonce.litmus
index c53350205d282a35afea68db4b94dad73bc6fdef..18479823cd6ccd1315b6723a215bf8df7f38daf2 100644 (file)
@@ -1,4 +1,4 @@
-C S+wmbonceonce+poacquireonce
+C S+fencewmbonceonce+poacquireonce
 
 (*
  * Result: Never
similarity index 95%
rename from tools/memory-model/litmus-tests/SB+mbonceonces.litmus
rename to tools/memory-model/litmus-tests/SB+fencembonceonces.litmus
index 74b874ffa8dadce5c9bb66015555dff7749d4b9e..ed5fff18d2232b722ab4aab2d8bc8403a6fb908b 100644 (file)
@@ -1,4 +1,4 @@
-C SB+mbonceonces
+C SB+fencembonceonces
 
 (*
  * Result: Never
diff --git a/tools/memory-model/litmus-tests/SB+rfionceonce-poonceonces.litmus b/tools/memory-model/litmus-tests/SB+rfionceonce-poonceonces.litmus
new file mode 100644 (file)
index 0000000..04a1660
--- /dev/null
@@ -0,0 +1,32 @@
+C SB+rfionceonce-poonceonces
+
+(*
+ * Result: Sometimes
+ *
+ * This litmus test demonstrates that LKMM is not fully multicopy atomic.
+ *)
+
+{}
+
+P0(int *x, int *y)
+{
+       int r1;
+       int r2;
+
+       WRITE_ONCE(*x, 1);
+       r1 = READ_ONCE(*x);
+       r2 = READ_ONCE(*y);
+}
+
+P1(int *x, int *y)
+{
+       int r3;
+       int r4;
+
+       WRITE_ONCE(*y, 1);
+       r3 = READ_ONCE(*y);
+       r4 = READ_ONCE(*x);
+}
+
+locations [0:r1; 1:r3; x; y] (* Debug aid: Print things not in "exists". *)
+exists (0:r2=0 /\ 1:r4=0)
old mode 100644 (file)
new mode 100755 (executable)
index af0aa15..ca528f9
@@ -9,7 +9,7 @@
 # appended.
 #
 # Usage:
-#      sh checkalllitmus.sh [ directory ]
+#      checkalllitmus.sh [ directory ]
 #
 # The LINUX_HERD_OPTIONS environment variable may be used to specify
 # arguments to herd, whose default is defined by the checklitmus.sh script.
old mode 100644 (file)
new mode 100755 (executable)
index e2e4774..bf12a75
@@ -8,7 +8,7 @@
 # with ".out" appended.
 #
 # Usage:
-#      sh checklitmus.sh file.litmus
+#      checklitmus.sh file.litmus
 #
 # The LINUX_HERD_OPTIONS environment variable may be used to specify
 # arguments to herd, which default to "-conf linux-kernel.cfg".  Thus,
index 9c9dc579bd7db172287f5fdea9628cabaeff56af..46f516dd80ce9fbe0f01406b406ea9d582f6bbcc 100644 (file)
@@ -88,6 +88,7 @@ struct orc_entry {
        unsigned        sp_reg:4;
        unsigned        bp_reg:4;
        unsigned        type:2;
+       unsigned        end:1;
 } __packed;
 
 /*
@@ -101,6 +102,7 @@ struct unwind_hint {
        s16             sp_offset;
        u8              sp_reg;
        u8              type;
+       u8              end;
 };
 #endif /* __ASSEMBLY__ */
 
index 38047c6aa57576d170b3281eb0de0376894e9f41..2928939b98ec208fee9069cfc3ddf4aea91f01f3 100644 (file)
@@ -164,6 +164,7 @@ static int __dead_end_function(struct objtool_file *file, struct symbol *func,
                "lbug_with_loc",
                "fortify_panic",
                "usercopy_abort",
+               "machine_real_restart",
        };
 
        if (func->bind == STB_WEAK)
@@ -1156,6 +1157,7 @@ static int read_unwind_hints(struct objtool_file *file)
 
                cfa->offset = hint->sp_offset;
                insn->state.type = hint->type;
+               insn->state.end = hint->end;
        }
 
        return 0;
index c6b68fcb926ff76c6e44675ff636c16f3552a184..95700a2bcb7c1ee429c6b2e0270428a5bfb8b54a 100644 (file)
@@ -31,7 +31,7 @@ struct insn_state {
        int stack_size;
        unsigned char type;
        bool bp_scratch;
-       bool drap;
+       bool drap, end;
        int drap_reg, drap_offset;
        struct cfi_reg vals[CFI_NUM_REGS];
 };
index 4e60e105583ee803916589ca56df0e81e12b8fb3..7ec85d567598c5047fbe00b9660c9e7fc76870cf 100644 (file)
@@ -302,19 +302,34 @@ static int read_symbols(struct elf *elf)
                                continue;
                        sym->pfunc = sym->cfunc = sym;
                        coldstr = strstr(sym->name, ".cold.");
-                       if (coldstr) {
-                               coldstr[0] = '\0';
-                               pfunc = find_symbol_by_name(elf, sym->name);
-                               coldstr[0] = '.';
-
-                               if (!pfunc) {
-                                       WARN("%s(): can't find parent function",
-                                            sym->name);
-                                       goto err;
-                               }
-
-                               sym->pfunc = pfunc;
-                               pfunc->cfunc = sym;
+                       if (!coldstr)
+                               continue;
+
+                       coldstr[0] = '\0';
+                       pfunc = find_symbol_by_name(elf, sym->name);
+                       coldstr[0] = '.';
+
+                       if (!pfunc) {
+                               WARN("%s(): can't find parent function",
+                                    sym->name);
+                               goto err;
+                       }
+
+                       sym->pfunc = pfunc;
+                       pfunc->cfunc = sym;
+
+                       /*
+                        * Unfortunately, -fnoreorder-functions puts the child
+                        * inside the parent.  Remove the overlap so we can
+                        * have sane assumptions.
+                        *
+                        * Note that pfunc->len now no longer matches
+                        * pfunc->sym.st_size.
+                        */
+                       if (sym->sec == pfunc->sec &&
+                           sym->offset >= pfunc->offset &&
+                           sym->offset + sym->len == pfunc->offset + pfunc->len) {
+                               pfunc->len -= sym->len;
                        }
                }
        }
@@ -504,10 +519,12 @@ struct section *elf_create_section(struct elf *elf, const char *name,
        sec->sh.sh_flags = SHF_ALLOC;
 
 
-       /* Add section name to .shstrtab */
+       /* Add section name to .shstrtab (or .strtab for Clang) */
        shstrtab = find_section_by_name(elf, ".shstrtab");
+       if (!shstrtab)
+               shstrtab = find_section_by_name(elf, ".strtab");
        if (!shstrtab) {
-               WARN("can't find .shstrtab section");
+               WARN("can't find .shstrtab or .strtab section");
                return NULL;
        }
 
index c3343820916a6dccf0e49bf50cbfbc811b4525fd..faa444270ee3ab297933504b703e4628321087ae 100644 (file)
@@ -203,7 +203,8 @@ int orc_dump(const char *_objname)
 
                print_reg(orc[i].bp_reg, orc[i].bp_offset);
 
-               printf(" type:%s\n", orc_type_name(orc[i].type));
+               printf(" type:%s end:%d\n",
+                      orc_type_name(orc[i].type), orc[i].end);
        }
 
        elf_end(elf);
index 18384d9be4e170f5d15b5c8981d0a576e1fac87a..3f98dcfbc177b12147f491d9fd577916c02b2076 100644 (file)
@@ -31,6 +31,8 @@ int create_orc(struct objtool_file *file)
                struct cfi_reg *cfa = &insn->state.cfa;
                struct cfi_reg *bp = &insn->state.regs[CFI_BP];
 
+               orc->end = insn->state.end;
+
                if (cfa->base == CFI_UNDEFINED) {
                        orc->sp_reg = ORC_REG_UNDEFINED;
                        continue;
index 11300dbe35c5dc2ffd37f1839dabc0733c755f2e..236b9b97dfdb1d5d52d6cc9dcb9198cfd2ec1739 100644 (file)
@@ -18,6 +18,10 @@ various perf commands with the -e option.
 
 OPTIONS
 -------
+-d::
+--desc::
+Print extra event descriptions. (default)
+
 --no-desc::
 Don't print descriptions.
 
@@ -25,11 +29,13 @@ Don't print descriptions.
 --long-desc::
 Print longer event descriptions.
 
+--debug::
+Enable debugging output.
+
 --details::
 Print how named events are resolved internally into perf events, and also
 any extra expressions computed by perf stat.
 
-
 [[EVENT_MODIFIERS]]
 EVENT MODIFIERS
 ---------------
@@ -234,7 +240,7 @@ perf also supports group leader sampling using the :S specifier.
   perf record -e '{cycles,instructions}:S' ...
   perf report --group
 
-Normally all events in a event group sample, but with :S only
+Normally all events in an event group sample, but with :S only
 the first event (the leader) samples, and it only reads the values of the
 other events in the group.
 
index 04168da4268e2cd436bbd4902a24c22d7777ee47..246dee081efda287e39060a55e4bfc8ca7bfde8e 100644 (file)
@@ -94,7 +94,7 @@ OPTIONS
          "perf report" to view group events together.
 
 --filter=<filter>::
-        Event filter. This option should follow a event selector (-e) which
+        Event filter. This option should follow an event selector (-e) which
        selects either tracepoint event(s) or a hardware trace PMU
        (e.g. Intel PT or CoreSight).
 
@@ -153,7 +153,7 @@ OPTIONS
 
 --exclude-perf::
        Don't record events issued by perf itself. This option should follow
-       a event selector (-e) which selects tracepoint event(s). It adds a
+       an event selector (-e) which selects tracepoint event(s). It adds a
        filter expression 'common_pid != $PERFPID' to filters. If other
        '--filter' exists, the new filter expression will be combined with
        them by '&&'.
index 5dfe102fb5b533979a2fb726b621cedd398d0935..b10a90b6a7181f8968420a875a2b2fc2b3919321 100644 (file)
@@ -178,6 +178,9 @@ Print count deltas for fixed number of times.
 This option should be used together with "-I" option.
        example: 'perf stat -I 1000 --interval-count 2 -e cycles -a'
 
+--interval-clear::
+Clear the screen before next interval.
+
 --timeout msecs::
 Stop the 'perf stat' session and print count deltas after N milliseconds (minimum: 10 ms).
 This option is not supported with the "-I" option.
index b5ac356ba323c8a363b96e10082205078f12a3f8..f6d1a03c7523edc36b56d59a54d827e744c6dc9f 100644 (file)
@@ -54,6 +54,8 @@ endif
 
 ifeq ($(SRCARCH),arm64)
   NO_PERF_REGS := 0
+  NO_SYSCALL_TABLE := 0
+  CFLAGS += -I$(OUTPUT)arch/arm64/include/generated
   LIBUNWIND_LIBS = -lunwind -lunwind-aarch64
 endif
 
@@ -207,8 +209,7 @@ ifdef PYTHON_CONFIG
   PYTHON_EMBED_LDOPTS := $(shell $(PYTHON_CONFIG_SQ) --ldflags 2>/dev/null)
   PYTHON_EMBED_LDFLAGS := $(call strip-libs,$(PYTHON_EMBED_LDOPTS))
   PYTHON_EMBED_LIBADD := $(call grep-libs,$(PYTHON_EMBED_LDOPTS)) -lutil
-  PYTHON_EMBED_CCOPTS := $(shell $(PYTHON_CONFIG_SQ) --cflags 2>/dev/null)
-  PYTHON_EMBED_CCOPTS := $(filter-out -specs=%,$(PYTHON_EMBED_CCOPTS))
+  PYTHON_EMBED_CCOPTS := $(shell $(PYTHON_CONFIG_SQ) --includes 2>/dev/null)
   FLAGS_PYTHON_EMBED := $(PYTHON_EMBED_CCOPTS) $(PYTHON_EMBED_LDOPTS)
 endif
 
@@ -906,8 +907,8 @@ bindir = $(abspath $(prefix)/$(bindir_relative))
 mandir = share/man
 infodir = share/info
 perfexecdir = libexec/perf-core
-perf_include_dir = lib/include/perf
-perf_examples_dir = lib/examples/perf
+perf_include_dir = lib/perf/include
+perf_examples_dir = lib/perf/examples
 sharedir = $(prefix)/share
 template_dir = share/perf-core/templates
 STRACE_GROUPS_DIR = share/perf-core/strace/groups
index ecc9fc9526550d114d353a71a2325533172f6f42..b3d1b12a5081ba10a92d19c79b38c6fb4654a597 100644 (file)
@@ -384,6 +384,8 @@ export INSTALL SHELL_PATH
 
 SHELL = $(SHELL_PATH)
 
+linux_uapi_dir := $(srctree)/tools/include/uapi/linux
+
 beauty_outdir := $(OUTPUT)trace/beauty/generated
 beauty_ioctl_outdir := $(beauty_outdir)/ioctl
 drm_ioctl_array := $(beauty_ioctl_outdir)/drm_ioctl_array.c
@@ -431,6 +433,12 @@ kvm_ioctl_tbl := $(srctree)/tools/perf/trace/beauty/kvm_ioctl.sh
 $(kvm_ioctl_array): $(kvm_hdr_dir)/kvm.h $(kvm_ioctl_tbl)
        $(Q)$(SHELL) '$(kvm_ioctl_tbl)' $(kvm_hdr_dir) > $@
 
+socket_ipproto_array := $(beauty_outdir)/socket_ipproto_array.c
+socket_ipproto_tbl := $(srctree)/tools/perf/trace/beauty/socket_ipproto.sh
+
+$(socket_ipproto_array): $(linux_uapi_dir)/in.h $(socket_ipproto_tbl)
+       $(Q)$(SHELL) '$(socket_ipproto_tbl)' $(linux_uapi_dir) > $@
+
 vhost_virtio_ioctl_array := $(beauty_ioctl_outdir)/vhost_virtio_ioctl_array.c
 vhost_virtio_hdr_dir := $(srctree)/tools/include/uapi/linux
 vhost_virtio_ioctl_tbl := $(srctree)/tools/perf/trace/beauty/vhost_virtio_ioctl.sh
@@ -566,6 +574,7 @@ prepare: $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)common-cmds.h archheaders $(drm_ioc
        $(sndrv_ctl_ioctl_array) \
        $(kcmp_type_array) \
        $(kvm_ioctl_array) \
+       $(socket_ipproto_array) \
        $(vhost_virtio_ioctl_array) \
        $(madvise_behavior_array) \
        $(perf_ioctl_array) \
@@ -860,6 +869,7 @@ clean:: $(LIBTRACEEVENT)-clean $(LIBAPI)-clean $(LIBBPF)-clean $(LIBSUBCMD)-clea
                $(OUTPUT)$(sndrv_pcm_ioctl_array) \
                $(OUTPUT)$(kvm_ioctl_array) \
                $(OUTPUT)$(kcmp_type_array) \
+               $(OUTPUT)$(socket_ipproto_array) \
                $(OUTPUT)$(vhost_virtio_ioctl_array) \
                $(OUTPUT)$(perf_ioctl_array) \
                $(OUTPUT)$(prctl_option_array) \
index 91de4860faadfee93e26c066a1ee8861957c1185..f013b115dc860d001120233df6d912ad35c842ec 100644 (file)
@@ -4,3 +4,24 @@ PERF_HAVE_DWARF_REGS := 1
 endif
 PERF_HAVE_JITDUMP := 1
 PERF_HAVE_ARCH_REGS_QUERY_REGISTER_OFFSET := 1
+
+#
+# Syscall table generation for perf
+#
+
+out    := $(OUTPUT)arch/arm64/include/generated/asm
+header := $(out)/syscalls.c
+sysdef := $(srctree)/tools/include/uapi/asm-generic/unistd.h
+sysprf := $(srctree)/tools/perf/arch/arm64/entry/syscalls/
+systbl := $(sysprf)/mksyscalltbl
+
+# Create output directory if not already present
+_dummy := $(shell [ -d '$(out)' ] || mkdir -p '$(out)')
+
+$(header): $(sysdef) $(systbl)
+       $(Q)$(SHELL) '$(systbl)' '$(CC)' '$(HOSTCC)' $(sysdef) > $@
+
+clean::
+       $(call QUIET_CLEAN, arm64) $(RM) $(header)
+
+archheaders: $(header)
diff --git a/tools/perf/arch/arm64/entry/syscalls/mksyscalltbl b/tools/perf/arch/arm64/entry/syscalls/mksyscalltbl
new file mode 100755 (executable)
index 0000000..52e1973
--- /dev/null
@@ -0,0 +1,62 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+#
+# Generate system call table for perf. Derived from
+# powerpc script.
+#
+# Copyright IBM Corp. 2017
+# Author(s):  Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
+# Changed by: Ravi Bangoria <ravi.bangoria@linux.vnet.ibm.com>
+# Changed by: Kim Phillips <kim.phillips@arm.com>
+
+gcc=$1
+hostcc=$2
+input=$3
+
+if ! test -r $input; then
+       echo "Could not read input file" >&2
+       exit 1
+fi
+
+create_table_from_c()
+{
+       local sc nr last_sc
+
+       create_table_exe=`mktemp /tmp/create-table-XXXXXX`
+
+       {
+
+       cat <<-_EoHEADER
+               #include <stdio.h>
+               #define __ARCH_WANT_RENAMEAT
+               #include "$input"
+               int main(int argc, char *argv[])
+               {
+       _EoHEADER
+
+       while read sc nr; do
+               printf "%s\n" " printf(\"\\t[%d] = \\\"$sc\\\",\\n\", __NR_$sc);"
+               last_sc=$sc
+       done
+
+       printf "%s\n" " printf(\"#define SYSCALLTBL_ARM64_MAX_ID %d\\n\", __NR_$last_sc);"
+       printf "}\n"
+
+       } | $hostcc -o $create_table_exe -x c -
+
+       $create_table_exe
+
+       rm -f $create_table_exe
+}
+
+create_table()
+{
+       echo "static const char *syscalltbl_arm64[] = {"
+       create_table_from_c
+       echo "};"
+}
+
+$gcc -E -dM -x c  $input              \
+       |sed -ne 's/^#define __NR_//p' \
+       |sort -t' ' -k2 -nu            \
+       |create_table
index 3598b8b75d274c8ebcc6fc0452091dec34c797b7..7c6eeb4633fe139e49d9434d5bec4d03849c4712 100644 (file)
@@ -58,9 +58,13 @@ static int check_return_reg(int ra_regno, Dwarf_Frame *frame)
        }
 
        /*
-        * Check if return address is on the stack.
+        * Check if return address is on the stack. If return address
+        * is in a register (typically R0), it is yet to be saved on
+        * the stack.
         */
-       if (nops != 0 || ops != NULL)
+       if ((nops != 0 || ops != NULL) &&
+               !(nops == 1 && ops[0].atom == DW_OP_regx &&
+                       ops[0].number2 == 0 && ops[0].offset == 0))
                return 0;
 
        /*
@@ -243,10 +247,10 @@ int arch_skip_callchain_idx(struct thread *thread, struct ip_callchain *chain)
        u64 ip;
        u64 skip_slot = -1;
 
-       if (chain->nr < 3)
+       if (!chain || chain->nr < 3)
                return skip_slot;
 
-       ip = chain->ips[2];
+       ip = chain->ips[1];
 
        thread__find_symbol(thread, PERF_RECORD_MISC_USER, ip, &al);
 
index d233e2eb959223174b9d6808016778361b338b0b..aaabab5e28300d4120cdbf44c29c7d7b61563603 100644 (file)
@@ -102,7 +102,7 @@ const char * const kvm_skip_events[] = {
 
 int cpu_isa_init(struct perf_kvm_stat *kvm, const char *cpuid)
 {
-       if (strstr(cpuid, "IBM/S390")) {
+       if (strstr(cpuid, "IBM")) {
                kvm->exit_reasons = sie_exit_reasons;
                kvm->exit_reasons_isa = "SIE";
        } else
index 4dfe42666d0ce6e20214e70f0c2a6a3884106290..f0b1709a5ffb2b0901d7f2492252876d17bc25a0 100644 (file)
 330    common  pkey_alloc              __x64_sys_pkey_alloc
 331    common  pkey_free               __x64_sys_pkey_free
 332    common  statx                   __x64_sys_statx
+333    common  io_pgetevents           __x64_sys_io_pgetevents
+334    common  rseq                    __x64_sys_rseq
 
 #
 # x32-specific system call numbers start at 512 to avoid cache impact
index 4b2caf6d48e794d3cd5a88aaa978db10aabb3185..fead6b3b4206e409fc4042ce5d850cae2629bae9 100644 (file)
@@ -226,7 +226,7 @@ int arch_sdt_arg_parse_op(char *old_op, char **new_op)
                else if (rm[2].rm_so != rm[2].rm_eo)
                        prefix[0] = '+';
                else
-                       strncpy(prefix, "+0", 2);
+                       scnprintf(prefix, sizeof(prefix), "+0");
        }
 
        /* Rename register */
index 63a74c32ddc5d4a00028e7d01b5377d3ef731285..e33ef5bc31c57f08aa40ac682b4ca8931aba7751 100644 (file)
@@ -1,6 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0
 #include <string.h>
 
+#include <linux/stddef.h>
 #include <linux/perf_event.h>
 
 #include "../../util/intel-pt.h"
index 06bae7023a5103f28b2734075752d871072b2172..950539f9a4f7778d50bdb4110c49a94d764d4596 100644 (file)
@@ -2,6 +2,7 @@
 #include <stdbool.h>
 #include <errno.h>
 
+#include <linux/stddef.h>
 #include <linux/perf_event.h>
 
 #include "../../perf.h"
index 60bf119430479272a80c17e4231de4633bad2b6e..eafce1a130a17f5ba883d429a76296d702c07f5b 100644 (file)
@@ -7,6 +7,7 @@ perf-y += futex-wake-parallel.o
 perf-y += futex-requeue.o
 perf-y += futex-lock-pi.o
 
+perf-$(CONFIG_X86_64) += mem-memcpy-x86-64-lib.o
 perf-$(CONFIG_X86_64) += mem-memcpy-x86-64-asm.o
 perf-$(CONFIG_X86_64) += mem-memset-x86-64-asm.o
 
index b43f8d2a34ec101c1cc517e145ed063566085e42..9ad015a1e20247a001e115284def3902e036bf67 100644 (file)
@@ -6,6 +6,7 @@
 #define altinstr_replacement text
 #define globl p2align 4; .globl
 #define _ASM_EXTABLE_FAULT(x, y)
+#define _ASM_EXTABLE(x, y)
 
 #include "../../arch/x86/lib/memcpy_64.S"
 /*
diff --git a/tools/perf/bench/mem-memcpy-x86-64-lib.c b/tools/perf/bench/mem-memcpy-x86-64-lib.c
new file mode 100644 (file)
index 0000000..4130734
--- /dev/null
@@ -0,0 +1,24 @@
+/*
+ * From code in arch/x86/lib/usercopy_64.c, copied to keep tools/ copy
+ * of the kernel's arch/x86/lib/memcpy_64.s used in 'perf bench mem memcpy'
+ * happy.
+ */
+#include <linux/types.h>
+
+unsigned long __memcpy_mcsafe(void *dst, const void *src, size_t cnt);
+unsigned long mcsafe_handle_tail(char *to, char *from, unsigned len);
+
+unsigned long mcsafe_handle_tail(char *to, char *from, unsigned len)
+{
+       for (; len; --len, to++, from++) {
+               /*
+                * Call the assembly routine back directly since
+                * memcpy_mcsafe() may silently fallback to memcpy.
+                */
+               unsigned long rem = __memcpy_mcsafe(to, from, 1);
+
+               if (rem)
+                       break;
+       }
+       return len;
+}
index 63eb49082774c94dfbabe7a18db73bdc2403fb6a..44195514b19e65a5ee0287b48fa0ab25fa44d66f 100644 (file)
@@ -1098,7 +1098,7 @@ static void *worker_thread(void *__tdata)
        u8 *global_data;
        u8 *process_data;
        u8 *thread_data;
-       u64 bytes_done;
+       u64 bytes_done, secs;
        long work_done;
        u32 l;
        struct rusage rusage;
@@ -1254,7 +1254,8 @@ static void *worker_thread(void *__tdata)
        timersub(&stop, &start0, &diff);
        td->runtime_ns = diff.tv_sec * NSEC_PER_SEC;
        td->runtime_ns += diff.tv_usec * NSEC_PER_USEC;
-       td->speed_gbs = bytes_done / (td->runtime_ns / NSEC_PER_SEC) / 1e9;
+       secs = td->runtime_ns / NSEC_PER_SEC;
+       td->speed_gbs = secs ? bytes_done / secs / 1e9 : 0;
 
        getrusage(RUSAGE_THREAD, &rusage);
        td->system_time_ns = rusage.ru_stime.tv_sec * NSEC_PER_SEC;
index 5eb22cc563636c11d4e12bf42c71f00b3e1255db..8180319285af3377810c30c0298f37c73cb9bb8d 100644 (file)
@@ -283,6 +283,15 @@ out_put:
        return ret;
 }
 
+static int process_feature_event(struct perf_tool *tool,
+                                union perf_event *event,
+                                struct perf_session *session)
+{
+       if (event->feat.feat_id < HEADER_LAST_FEATURE)
+               return perf_event__process_feature(tool, event, session);
+       return 0;
+}
+
 static int hist_entry__tty_annotate(struct hist_entry *he,
                                    struct perf_evsel *evsel,
                                    struct perf_annotate *ann)
@@ -471,7 +480,7 @@ int cmd_annotate(int argc, const char **argv)
                        .attr   = perf_event__process_attr,
                        .build_id = perf_event__process_build_id,
                        .tracing_data   = perf_event__process_tracing_data,
-                       .feature        = perf_event__process_feature,
+                       .feature        = process_feature_event,
                        .ordered_events = true,
                        .ordering_requires_timestamps = true,
                },
index 307b3594525f34cc9e14d758b71bcdf266414c3e..f3aa9d02a5ab5d3cc5a78fd15d9d483fdbd64907 100644 (file)
@@ -56,16 +56,16 @@ struct c2c_hist_entry {
 
        struct compute_stats     cstats;
 
+       unsigned long            paddr;
+       unsigned long            paddr_cnt;
+       bool                     paddr_zero;
+       char                    *nodestr;
+
        /*
         * must be at the end,
         * because of its callchain dynamic entry
         */
        struct hist_entry       he;
-
-       unsigned long            paddr;
-       unsigned long            paddr_cnt;
-       bool                     paddr_zero;
-       char                    *nodestr;
 };
 
 static char const *coalesce_default = "pid,iaddr";
@@ -2193,7 +2193,7 @@ static void print_cacheline(struct c2c_hists *c2c_hists,
        fprintf(out, "%s\n", bf);
        fprintf(out, "  -------------------------------------------------------------\n");
 
-       hists__fprintf(&c2c_hists->hists, false, 0, 0, 0, out, true);
+       hists__fprintf(&c2c_hists->hists, false, 0, 0, 0, out, false);
 }
 
 static void print_pareto(FILE *out)
@@ -2268,7 +2268,7 @@ static void perf_c2c__hists_fprintf(FILE *out, struct perf_session *session)
        fprintf(out, "=================================================\n");
        fprintf(out, "#\n");
 
-       hists__fprintf(&c2c.hists.hists, true, 0, 0, 0, stdout, false);
+       hists__fprintf(&c2c.hists.hists, true, 0, 0, 0, stdout, true);
 
        fprintf(out, "\n");
        fprintf(out, "=================================================\n");
@@ -2349,6 +2349,9 @@ static int perf_c2c__browse_cacheline(struct hist_entry *he)
        " s             Toggle full length of symbol and source line columns \n"
        " q             Return back to cacheline list \n";
 
+       if (!he)
+               return 0;
+
        /* Display compact version first. */
        c2c.symbol_full = false;
 
index d660cb7b222b23bfa6646dfd1fd04d75ca55d337..39db2ee32d484703cc4ecc02a6df283cf6d7577b 100644 (file)
@@ -696,7 +696,7 @@ static void hists__process(struct hists *hists)
        hists__output_resort(hists, NULL);
 
        hists__fprintf(hists, !quiet, 0, 0, 0, stdout,
-                      symbol_conf.use_callchain);
+                      !symbol_conf.use_callchain);
 }
 
 static void data__fprintf(void)
index cdb5b694983273de734fa1f45848c49eeac239d2..02f7a3c27761f717b9e8c5cf39d4788b2014297b 100644 (file)
@@ -217,7 +217,8 @@ static int process_feature_event(struct perf_tool *tool,
        }
 
        /*
-        * All features are received, we can force the
+        * (feat_id = HEADER_LAST_FEATURE) is the end marker which
+        * means all features are received, now we can force the
         * group if needed.
         */
        setup_forced_leader(rep, session->evlist);
@@ -477,8 +478,8 @@ static int perf_evlist__tty_browse_hists(struct perf_evlist *evlist,
 
                hists__fprintf_nr_sample_events(hists, rep, evname, stdout);
                hists__fprintf(hists, !quiet, 0, 0, rep->min_percent, stdout,
-                              symbol_conf.use_callchain ||
-                              symbol_conf.show_branchflag_count);
+                              !(symbol_conf.use_callchain ||
+                                symbol_conf.show_branchflag_count));
                fprintf(stdout, "\n\n");
        }
 
index b3bf35512d2198a94e46a7ecaf6052bca616ee7d..568ddfac3213e084c1f4c6077cd73943bf0644b9 100644 (file)
@@ -180,6 +180,18 @@ static struct {
                                  PERF_OUTPUT_EVNAME | PERF_OUTPUT_TRACE
        },
 
+       [PERF_TYPE_HW_CACHE] = {
+               .user_set = false,
+
+               .fields = PERF_OUTPUT_COMM | PERF_OUTPUT_TID |
+                             PERF_OUTPUT_CPU | PERF_OUTPUT_TIME |
+                             PERF_OUTPUT_EVNAME | PERF_OUTPUT_IP |
+                             PERF_OUTPUT_SYM | PERF_OUTPUT_SYMOFFSET |
+                             PERF_OUTPUT_DSO | PERF_OUTPUT_PERIOD,
+
+               .invalid_fields = PERF_OUTPUT_TRACE | PERF_OUTPUT_BPF_OUTPUT,
+       },
+
        [PERF_TYPE_RAW] = {
                .user_set = false,
 
@@ -1822,6 +1834,7 @@ static int process_attr(struct perf_tool *tool, union perf_event *event,
        struct perf_evlist *evlist;
        struct perf_evsel *evsel, *pos;
        int err;
+       static struct perf_evsel_script *es;
 
        err = perf_event__process_attr(tool, event, pevlist);
        if (err)
@@ -1830,6 +1843,19 @@ static int process_attr(struct perf_tool *tool, union perf_event *event,
        evlist = *pevlist;
        evsel = perf_evlist__last(*pevlist);
 
+       if (!evsel->priv) {
+               if (scr->per_event_dump) {
+                       evsel->priv = perf_evsel_script__new(evsel,
+                                               scr->session->data);
+               } else {
+                       es = zalloc(sizeof(*es));
+                       if (!es)
+                               return -ENOMEM;
+                       es->fp = stdout;
+                       evsel->priv = es;
+               }
+       }
+
        if (evsel->attr.type >= PERF_TYPE_MAX &&
            evsel->attr.type != PERF_TYPE_SYNTH)
                return 0;
@@ -3018,6 +3044,15 @@ int process_cpu_map_event(struct perf_tool *tool __maybe_unused,
        return set_maps(script);
 }
 
+static int process_feature_event(struct perf_tool *tool,
+                                union perf_event *event,
+                                struct perf_session *session)
+{
+       if (event->feat.feat_id < HEADER_LAST_FEATURE)
+               return perf_event__process_feature(tool, event, session);
+       return 0;
+}
+
 #ifdef HAVE_AUXTRACE_SUPPORT
 static int perf_script__process_auxtrace_info(struct perf_tool *tool,
                                              union perf_event *event,
@@ -3062,7 +3097,7 @@ int cmd_script(int argc, const char **argv)
                        .attr            = process_attr,
                        .event_update   = perf_event__process_event_update,
                        .tracing_data    = perf_event__process_tracing_data,
-                       .feature         = perf_event__process_feature,
+                       .feature         = process_feature_event,
                        .build_id        = perf_event__process_build_id,
                        .id_index        = perf_event__process_id_index,
                        .auxtrace_info   = perf_script__process_auxtrace_info,
@@ -3113,8 +3148,9 @@ int cmd_script(int argc, const char **argv)
                     "+field to add and -field to remove."
                     "Valid types: hw,sw,trace,raw,synth. "
                     "Fields: comm,tid,pid,time,cpu,event,trace,ip,sym,dso,"
-                    "addr,symoff,period,iregs,uregs,brstack,brstacksym,flags,"
-                    "bpf-output,callindent,insn,insnlen,brstackinsn,synth,phys_addr",
+                    "addr,symoff,srcline,period,iregs,uregs,brstack,"
+                    "brstacksym,flags,bpf-output,brstackinsn,brstackoff,"
+                    "callindent,insn,insnlen,synth,phys_addr,metric,misc",
                     parse_output_fields),
        OPT_BOOLEAN('a', "all-cpus", &system_wide,
                    "system-wide collection from all CPUs"),
index 096ccb25c11ff7786c6df0c24b695cd0eff5bec0..d097b5b47eb81e16b83a8a37f3385aeea124a0ed 100644 (file)
@@ -65,6 +65,7 @@
 #include "util/tool.h"
 #include "util/string2.h"
 #include "util/metricgroup.h"
+#include "util/top.h"
 #include "asm/bug.h"
 
 #include <linux/time64.h>
@@ -144,6 +145,8 @@ static struct target target = {
 
 typedef int (*aggr_get_id_t)(struct cpu_map *m, int cpu);
 
+#define METRIC_ONLY_LEN 20
+
 static int                     run_count                       =  1;
 static bool                    no_inherit                      = false;
 static volatile pid_t          child_pid                       = -1;
@@ -173,6 +176,7 @@ static struct cpu_map               *aggr_map;
 static aggr_get_id_t           aggr_get_id;
 static bool                    append_file;
 static bool                    interval_count;
+static bool                    interval_clear;
 static const char              *output_name;
 static int                     output_fd;
 static int                     print_free_counters_hint;
@@ -180,6 +184,7 @@ static int                  print_mixed_hw_group_error;
 static u64                     *walltime_run;
 static bool                    ru_display                      = false;
 static struct rusage           ru_data;
+static unsigned int            metric_only_len                 = METRIC_ONLY_LEN;
 
 struct perf_stat {
        bool                     record;
@@ -291,18 +296,6 @@ static int create_perf_stat_counter(struct perf_evsel *evsel)
        return perf_evsel__open_per_thread(evsel, evsel_list->threads);
 }
 
-/*
- * Does the counter have nsecs as a unit?
- */
-static inline int nsec_counter(struct perf_evsel *evsel)
-{
-       if (perf_evsel__match(evsel, SOFTWARE, SW_CPU_CLOCK) ||
-           perf_evsel__match(evsel, SOFTWARE, SW_TASK_CLOCK))
-               return 1;
-
-       return 0;
-}
-
 static int process_synthesized_event(struct perf_tool *tool __maybe_unused,
                                     union perf_event *event,
                                     struct perf_sample *sample __maybe_unused,
@@ -967,8 +960,6 @@ static void print_metric_csv(void *ctx,
        fprintf(out, "%s%s%s%s", csv_sep, vals, csv_sep, unit);
 }
 
-#define METRIC_ONLY_LEN 20
-
 /* Filter out some columns that don't work well in metrics only mode */
 
 static bool valid_only_metric(const char *unit)
@@ -999,22 +990,20 @@ static void print_metric_only(void *ctx, const char *color, const char *fmt,
 {
        struct outstate *os = ctx;
        FILE *out = os->fh;
-       int n;
-       char buf[1024];
-       unsigned mlen = METRIC_ONLY_LEN;
+       char buf[1024], str[1024];
+       unsigned mlen = metric_only_len;
 
        if (!valid_only_metric(unit))
                return;
        unit = fixunit(buf, os->evsel, unit);
-       if (color)
-               n = color_fprintf(out, color, fmt, val);
-       else
-               n = fprintf(out, fmt, val);
-       if (n > METRIC_ONLY_LEN)
-               n = METRIC_ONLY_LEN;
        if (mlen < strlen(unit))
                mlen = strlen(unit) + 1;
-       fprintf(out, "%*s", mlen - n, "");
+
+       if (color)
+               mlen += strlen(color) + sizeof(PERF_COLOR_RESET) - 1;
+
+       color_snprintf(str, sizeof(str), color ?: "", fmt, val);
+       fprintf(out, "%*s ", mlen, str);
 }
 
 static void print_metric_only_csv(void *ctx, const char *color __maybe_unused,
@@ -1054,35 +1043,7 @@ static void print_metric_header(void *ctx, const char *color __maybe_unused,
        if (csv_output)
                fprintf(os->fh, "%s%s", unit, csv_sep);
        else
-               fprintf(os->fh, "%-*s ", METRIC_ONLY_LEN, unit);
-}
-
-static void nsec_printout(int id, int nr, struct perf_evsel *evsel, double avg)
-{
-       FILE *output = stat_config.output;
-       double msecs = avg / NSEC_PER_MSEC;
-       const char *fmt_v, *fmt_n;
-       char name[25];
-
-       fmt_v = csv_output ? "%.6f%s" : "%18.6f%s";
-       fmt_n = csv_output ? "%s" : "%-25s";
-
-       aggr_printout(evsel, id, nr);
-
-       scnprintf(name, sizeof(name), "%s%s",
-                 perf_evsel__name(evsel), csv_output ? "" : " (msec)");
-
-       fprintf(output, fmt_v, msecs, csv_sep);
-
-       if (csv_output)
-               fprintf(output, "%s%s", evsel->unit, csv_sep);
-       else
-               fprintf(output, "%-*s%s", unit_width, evsel->unit, csv_sep);
-
-       fprintf(output, fmt_n, name);
-
-       if (evsel->cgrp)
-               fprintf(output, "%s%s", csv_sep, evsel->cgrp->name);
+               fprintf(os->fh, "%*s ", metric_only_len, unit);
 }
 
 static int first_shadow_cpu(struct perf_evsel *evsel, int id)
@@ -1240,11 +1201,7 @@ static void printout(int id, int nr, struct perf_evsel *counter, double uval,
                return;
        }
 
-       if (metric_only)
-               /* nothing */;
-       else if (nsec_counter(counter))
-               nsec_printout(id, nr, counter, uval);
-       else
+       if (!metric_only)
                abs_printout(id, nr, counter, uval);
 
        out.print_metric = pm;
@@ -1330,7 +1287,7 @@ static void collect_all_aliases(struct perf_evsel *counter,
                    alias->scale != counter->scale ||
                    alias->cgrp != counter->cgrp ||
                    strcmp(alias->unit, counter->unit) ||
-                   nsec_counter(alias) != nsec_counter(counter))
+                   perf_evsel__is_clock(alias) != perf_evsel__is_clock(counter))
                        break;
                alias->merged_stat = true;
                cb(alias, data, false);
@@ -1704,9 +1661,12 @@ static void print_interval(char *prefix, struct timespec *ts)
        FILE *output = stat_config.output;
        static int num_print_interval;
 
+       if (interval_clear)
+               puts(CONSOLE_CLEAR);
+
        sprintf(prefix, "%6lu.%09lu%s", ts->tv_sec, ts->tv_nsec, csv_sep);
 
-       if (num_print_interval == 0 && !csv_output) {
+       if ((num_print_interval == 0 && !csv_output) || interval_clear) {
                switch (stat_config.aggr_mode) {
                case AGGR_SOCKET:
                        fprintf(output, "#           time socket cpus");
@@ -1719,7 +1679,7 @@ static void print_interval(char *prefix, struct timespec *ts)
                                fprintf(output, "             counts %*s events\n", unit_width, "unit");
                        break;
                case AGGR_NONE:
-                       fprintf(output, "#           time CPU");
+                       fprintf(output, "#           time CPU    ");
                        if (!metric_only)
                                fprintf(output, "                counts %*s events\n", unit_width, "unit");
                        break;
@@ -1738,7 +1698,7 @@ static void print_interval(char *prefix, struct timespec *ts)
                }
        }
 
-       if (num_print_interval == 0 && metric_only)
+       if ((num_print_interval == 0 || interval_clear) && metric_only)
                print_metric_headers(" ", true);
        if (++num_print_interval == 25)
                num_print_interval = 0;
@@ -2057,6 +2017,8 @@ static const struct option stat_options[] = {
                    "(overhead is possible for values <= 100ms)"),
        OPT_INTEGER(0, "interval-count", &stat_config.times,
                    "print counts for fixed number of times"),
+       OPT_BOOLEAN(0, "interval-clear", &interval_clear,
+                   "clear screen in between new interval"),
        OPT_UINTEGER(0, "timeout", &stat_config.timeout,
                    "stop workload and print counts after a timeout period in ms (>= 10ms)"),
        OPT_SET_UINT(0, "per-socket", &stat_config.aggr_mode,
@@ -2436,13 +2398,24 @@ static int add_default_attributes(void)
        (PERF_COUNT_HW_CACHE_OP_PREFETCH        <<  8) |
        (PERF_COUNT_HW_CACHE_RESULT_MISS        << 16)                          },
 };
+       struct parse_events_error errinfo;
 
        /* Set attrs if no event is selected and !null_run: */
        if (null_run)
                return 0;
 
        if (transaction_run) {
-               struct parse_events_error errinfo;
+               /* Handle -T as -M transaction. Once platform specific metrics
+                * support has been added to the json files, all archictures
+                * will use this approach. To determine transaction support
+                * on an architecture test for such a metric name.
+                */
+               if (metricgroup__has_metric("transaction")) {
+                       struct option opt = { .value = &evsel_list };
+
+                       return metricgroup__parse_groups(&opt, "transaction",
+                                                        &metric_events);
+               }
 
                if (pmu_have_event("cpu", "cycles-ct") &&
                    pmu_have_event("cpu", "el-start"))
@@ -2454,6 +2427,7 @@ static int add_default_attributes(void)
                                           &errinfo);
                if (err) {
                        fprintf(stderr, "Cannot set up transaction events\n");
+                       parse_events_print_error(&errinfo, transaction_attrs);
                        return -1;
                }
                return 0;
@@ -2479,10 +2453,11 @@ static int add_default_attributes(void)
                    pmu_have_event("msr", "smi")) {
                        if (!force_metric_only)
                                metric_only = true;
-                       err = parse_events(evsel_list, smi_cost_attrs, NULL);
+                       err = parse_events(evsel_list, smi_cost_attrs, &errinfo);
                } else {
                        fprintf(stderr, "To measure SMI cost, it needs "
                                "msr/aperf/, msr/smi/ and cpu/cycles/ support\n");
+                       parse_events_print_error(&errinfo, smi_cost_attrs);
                        return -1;
                }
                if (err) {
@@ -2517,12 +2492,13 @@ static int add_default_attributes(void)
                if (topdown_attrs[0] && str) {
                        if (warn)
                                arch_topdown_group_warn();
-                       err = parse_events(evsel_list, str, NULL);
+                       err = parse_events(evsel_list, str, &errinfo);
                        if (err) {
                                fprintf(stderr,
                                        "Cannot set up top down events %s: %d\n",
                                        str, err);
                                free(str);
+                               parse_events_print_error(&errinfo, str);
                                return -1;
                        }
                } else {
index ffdc2769ff9f757b0a9d650c573aa06e800270a8..d21d8751e74910db9639f0b3c450abf42eb223ab 100644 (file)
@@ -307,7 +307,7 @@ static void perf_top__print_sym_table(struct perf_top *top)
        hists__output_recalc_col_len(hists, top->print_entries - printed);
        putchar('\n');
        hists__fprintf(hists, false, top->print_entries - printed, win_width,
-                      top->min_percent, stdout, symbol_conf.use_callchain);
+                      top->min_percent, stdout, !symbol_conf.use_callchain);
 }
 
 static void prompt_integer(int *target, const char *msg)
index 6a748eca2edb4231a2517ae3d12e2023ca6bd076..88561eed79505f737570aeacd10cd0ec0b8db0b8 100644 (file)
@@ -291,7 +291,7 @@ size_t strarray__scnprintf(struct strarray *sa, char *bf, size_t size, const cha
 {
        int idx = val - sa->offset;
 
-       if (idx < 0 || idx >= sa->nr_entries)
+       if (idx < 0 || idx >= sa->nr_entries || sa->entries[idx] == NULL)
                return scnprintf(bf, size, intfmt, val);
 
        return scnprintf(bf, size, "%s", sa->entries[idx]);
@@ -761,10 +761,12 @@ static struct syscall_fmt {
          .arg = { [0] = STRARRAY(resource, rlimit_resources), }, },
        { .name     = "socket",
          .arg = { [0] = STRARRAY(family, socket_families),
-                  [1] = { .scnprintf = SCA_SK_TYPE, /* type */ }, }, },
+                  [1] = { .scnprintf = SCA_SK_TYPE, /* type */ },
+                  [2] = { .scnprintf = SCA_SK_PROTO, /* protocol */ }, }, },
        { .name     = "socketpair",
          .arg = { [0] = STRARRAY(family, socket_families),
-                  [1] = { .scnprintf = SCA_SK_TYPE, /* type */ }, }, },
+                  [1] = { .scnprintf = SCA_SK_TYPE, /* type */ },
+                  [2] = { .scnprintf = SCA_SK_PROTO, /* protocol */ }, }, },
        { .name     = "stat", .alias = "newstat", },
        { .name     = "statx",
          .arg = { [0] = { .scnprintf = SCA_FDAT,        /* fdat */ },
@@ -2990,6 +2992,7 @@ static int trace__parse_events_option(const struct option *opt, const char *str,
 
                if (trace__validate_ev_qualifier(trace))
                        goto out;
+               trace->trace_syscalls = true;
        }
 
        err = 0;
@@ -3045,7 +3048,7 @@ int cmd_trace(int argc, const char **argv)
                },
                .output = stderr,
                .show_comm = true,
-               .trace_syscalls = true,
+               .trace_syscalls = false,
                .kernel_syscallchains = false,
                .max_stack = UINT_MAX,
        };
@@ -3191,13 +3194,7 @@ int cmd_trace(int argc, const char **argv)
 
        if (!trace.trace_syscalls && !trace.trace_pgfaults &&
            trace.evlist->nr_entries == 0 /* Was --events used? */) {
-               pr_err("Please specify something to trace.\n");
-               return -1;
-       }
-
-       if (!trace.trace_syscalls && trace.ev_qualifier) {
-               pr_err("The -e option can't be used with --no-syscalls.\n");
-               goto out;
+               trace.trace_syscalls = true;
        }
 
        if (output_name != NULL) {
index 10f333e2e82507977e08c2662b8ef62d2450d20e..de28466c0186bf78652c99926c5449eaa5dc6a0b 100755 (executable)
@@ -7,6 +7,7 @@ include/uapi/drm/i915_drm.h
 include/uapi/linux/fcntl.h
 include/uapi/linux/kcmp.h
 include/uapi/linux/kvm.h
+include/uapi/linux/in.h
 include/uapi/linux/perf_event.h
 include/uapi/linux/prctl.h
 include/uapi/linux/sched.h
@@ -35,6 +36,7 @@ arch/s390/include/uapi/asm/ptrace.h
 arch/s390/include/uapi/asm/sie.h
 arch/arm/include/uapi/asm/kvm.h
 arch/arm64/include/uapi/asm/kvm.h
+arch/arm64/include/uapi/asm/unistd.h
 arch/alpha/include/uapi/asm/errno.h
 arch/mips/include/asm/errno.h
 arch/mips/include/uapi/asm/errno.h
@@ -53,6 +55,7 @@ include/uapi/asm-generic/errno.h
 include/uapi/asm-generic/errno-base.h
 include/uapi/asm-generic/ioctls.h
 include/uapi/asm-generic/mman-common.h
+include/uapi/asm-generic/unistd.h
 '
 
 check_2 () {
index dd764ad5efdfc70481bc8a91466d37f2527fb2bb..a63aa6241b7f36dd4f21f46e2cae350e6823f791 100644 (file)
@@ -1,6 +1,9 @@
 // SPDX-License-Identifier: GPL-2.0
 #ifndef _PERF_BPF_H
 #define _PERF_BPF_H
+
+#include <uapi/linux/bpf.h>
+
 #define SEC(NAME) __attribute__((section(NAME),  used))
 
 #define probe(function, vars) \
index 0c6d1002b524eaf62ef62cc32763b041b2f33ba1..ac1bcdc17dae7554f51a780b843605c441c6abbf 100644 (file)
@@ -35,6 +35,7 @@
 #include <sys/mman.h>
 #include <syscall.h> /* for gettid() */
 #include <err.h>
+#include <linux/kernel.h>
 
 #include "jvmti_agent.h"
 #include "../util/jitdump.h"
@@ -249,7 +250,7 @@ void *jvmti_open(void)
        /*
         * jitdump file name
         */
-       snprintf(dump_path, PATH_MAX, "%s/jit-%i.dump", jit_path, getpid());
+       scnprintf(dump_path, PATH_MAX, "%s/jit-%i.dump", jit_path, getpid());
 
        fd = open(dump_path, O_CREAT|O_TRUNC|O_RDWR, 0666);
        if (fd == -1)
index a1a97956136f966a8f7240f4838e131232a93da8..21bf7f5a3cf51a1a42e3169daa738c8e7e0a8d83 100644 (file)
@@ -5,6 +5,7 @@
 #include <time.h>
 #include <stdbool.h>
 #include <linux/types.h>
+#include <linux/stddef.h>
 #include <linux/perf_event.h>
 
 extern bool test_attr__enabled;
@@ -24,7 +25,9 @@ static inline unsigned long long rdclock(void)
        return ts.tv_sec * 1000000000ULL + ts.tv_nsec;
 }
 
+#ifndef MAX_NR_CPUS
 #define MAX_NR_CPUS                    1024
+#endif
 
 extern const char *input_name;
 extern bool perf_host, perf_guest;
index 17783913d3306a15c13d45745f447aa5ebafb487..215ba30b85343ad1874b1fc52c05fccbd8948bb3 100644 (file)
@@ -1,7 +1,7 @@
 hostprogs := jevents
 
 jevents-y      += json.o jsmn.o jevents.o
-CHOSTFLAGS_jevents.o   = -I$(srctree)/tools/include
+HOSTCFLAGS_jevents.o   = -I$(srctree)/tools/include
 pmu-events-y   += pmu-events.o
 JDIR           =  pmu-events/arch/$(SRCARCH)
 JSON           =  $(shell [ -d $(JDIR) ] &&                            \
index bc03c06c391842ba531bedca43b76fdcefca6707..752e47eb6977193a46345daca8faa6147d3600d0 100644 (file)
     {
         "ArchStdEvent": "L1D_CACHE_REFILL_WR",
     },
+    {
+        "ArchStdEvent": "L1D_CACHE_REFILL_INNER",
+    },
+    {
+        "ArchStdEvent": "L1D_CACHE_REFILL_OUTER",
+    },
+    {
+        "ArchStdEvent": "L1D_CACHE_WB_VICTIM",
+    },
+    {
+        "ArchStdEvent": "L1D_CACHE_WB_CLEAN",
+    },
+    {
+        "ArchStdEvent": "L1D_CACHE_INVAL",
+    },
     {
         "ArchStdEvent": "L1D_TLB_REFILL_RD",
     },
     {
         "ArchStdEvent": "L1D_TLB_WR",
     },
+    {
+        "ArchStdEvent": "L2D_TLB_REFILL_RD",
+    },
+    {
+        "ArchStdEvent": "L2D_TLB_REFILL_WR",
+    },
+    {
+        "ArchStdEvent": "L2D_TLB_RD",
+    },
+    {
+        "ArchStdEvent": "L2D_TLB_WR",
+    },
     {
         "ArchStdEvent": "BUS_ACCESS_RD",
-   },
-   {
+    },
+    {
         "ArchStdEvent": "BUS_ACCESS_WR",
-   }
+    },
+    {
+        "ArchStdEvent": "MEM_ACCESS_RD",
+    },
+    {
+        "ArchStdEvent": "MEM_ACCESS_WR",
+    },
+    {
+        "ArchStdEvent": "UNALIGNED_LD_SPEC",
+    },
+    {
+        "ArchStdEvent": "UNALIGNED_ST_SPEC",
+    },
+    {
+        "ArchStdEvent": "UNALIGNED_LDST_SPEC",
+    },
+    {
+        "ArchStdEvent": "EXC_UNDEF",
+    },
+    {
+        "ArchStdEvent": "EXC_SVC",
+    },
+    {
+        "ArchStdEvent": "EXC_PABORT",
+    },
+    {
+        "ArchStdEvent": "EXC_DABORT",
+    },
+    {
+        "ArchStdEvent": "EXC_IRQ",
+    },
+    {
+        "ArchStdEvent": "EXC_FIQ",
+    },
+    {
+        "ArchStdEvent": "EXC_SMC",
+    },
+    {
+        "ArchStdEvent": "EXC_HVC",
+    },
+    {
+        "ArchStdEvent": "EXC_TRAP_PABORT",
+    },
+    {
+        "ArchStdEvent": "EXC_TRAP_DABORT",
+    },
+    {
+        "ArchStdEvent": "EXC_TRAP_OTHER",
+    },
+    {
+        "ArchStdEvent": "EXC_TRAP_IRQ",
+    },
+    {
+        "ArchStdEvent": "EXC_TRAP_FIQ",
+    }
 ]
index 8bf16759ca531b6fb1418ad873957e29cfdcc516..2dd8dafff2efd94f9fb981179ad96680b06ff51b 100644 (file)
@@ -1,71 +1,83 @@
 [
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "0",
                "EventName": "CPU_CYCLES",
                "BriefDescription": "CPU Cycles",
                "PublicDescription": "Cycle Count"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "1",
                "EventName": "INSTRUCTIONS",
                "BriefDescription": "Instructions",
                "PublicDescription": "Instruction Count"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "2",
                "EventName": "L1I_DIR_WRITES",
                "BriefDescription": "L1I Directory Writes",
                "PublicDescription": "Level-1 I-Cache Directory Write Count"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "3",
                "EventName": "L1I_PENALTY_CYCLES",
                "BriefDescription": "L1I Penalty Cycles",
                "PublicDescription": "Level-1 I-Cache Penalty Cycle Count"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "4",
                "EventName": "L1D_DIR_WRITES",
                "BriefDescription": "L1D Directory Writes",
                "PublicDescription": "Level-1 D-Cache Directory Write Count"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "5",
                "EventName": "L1D_PENALTY_CYCLES",
                "BriefDescription": "L1D Penalty Cycles",
                "PublicDescription": "Level-1 D-Cache Penalty Cycle Count"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "32",
                "EventName": "PROBLEM_STATE_CPU_CYCLES",
                "BriefDescription": "Problem-State CPU Cycles",
                "PublicDescription": "Problem-State Cycle Count"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "33",
                "EventName": "PROBLEM_STATE_INSTRUCTIONS",
                "BriefDescription": "Problem-State Instructions",
                "PublicDescription": "Problem-State Instruction Count"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "34",
                "EventName": "PROBLEM_STATE_L1I_DIR_WRITES",
                "BriefDescription": "Problem-State L1I Directory Writes",
                "PublicDescription": "Problem-State Level-1 I-Cache Directory Write Count"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "35",
                "EventName": "PROBLEM_STATE_L1I_PENALTY_CYCLES",
                "BriefDescription": "Problem-State L1I Penalty Cycles",
                "PublicDescription": "Problem-State Level-1 I-Cache Penalty Cycle Count"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "36",
                "EventName": "PROBLEM_STATE_L1D_DIR_WRITES",
                "BriefDescription": "Problem-State L1D Directory Writes",
                "PublicDescription": "Problem-State Level-1 D-Cache Directory Write Count"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "37",
                "EventName": "PROBLEM_STATE_L1D_PENALTY_CYCLES",
                "BriefDescription": "Problem-State L1D Penalty Cycles",
index 7e5b72492141e036ad5f58e9defe44d855a0b372..db286f19e7b60576fb0937b5d3c89bf482b8cfda 100644 (file)
 [
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "64",
                "EventName": "PRNG_FUNCTIONS",
                "BriefDescription": "PRNG Functions",
                "PublicDescription": "Total number of the PRNG functions issued by the CPU"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "65",
                "EventName": "PRNG_CYCLES",
                "BriefDescription": "PRNG Cycles",
                "PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing PRNG functions issued by the CPU"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "66",
                "EventName": "PRNG_BLOCKED_FUNCTIONS",
                "BriefDescription": "PRNG Blocked Functions",
                "PublicDescription": "Total number of the PRNG functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "67",
                "EventName": "PRNG_BLOCKED_CYCLES",
                "BriefDescription": "PRNG Blocked Cycles",
                "PublicDescription": "Total number of CPU cycles blocked for the PRNG functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "68",
                "EventName": "SHA_FUNCTIONS",
                "BriefDescription": "SHA Functions",
                "PublicDescription": "Total number of SHA functions issued by the CPU"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "69",
                "EventName": "SHA_CYCLES",
                "BriefDescription": "SHA Cycles",
                "PublicDescription": "Total number of CPU cycles when the SHA coprocessor is busy performing the SHA functions issued by the CPU"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "70",
                "EventName": "SHA_BLOCKED_FUNCTIONS",
                "BriefDescription": "SHA Blocked Functions",
                "PublicDescription": "Total number of the SHA functions that are issued by the CPU and are blocked because the SHA coprocessor is busy performing a function issued by another CPU"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "71",
                "EventName": "SHA_BLOCKED_CYCLES",
                "BriefDescription": "SHA Bloced Cycles",
                "PublicDescription": "Total number of CPU cycles blocked for the SHA functions issued by the CPU because the SHA coprocessor is busy performing a function issued by another CPU"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "72",
                "EventName": "DEA_FUNCTIONS",
                "BriefDescription": "DEA Functions",
                "PublicDescription": "Total number of the DEA functions issued by the CPU"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "73",
                "EventName": "DEA_CYCLES",
                "BriefDescription": "DEA Cycles",
                "PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing the DEA functions issued by the CPU"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "74",
                "EventName": "DEA_BLOCKED_FUNCTIONS",
                "BriefDescription": "DEA Blocked Functions",
                "PublicDescription": "Total number of the DEA functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "75",
                "EventName": "DEA_BLOCKED_CYCLES",
                "BriefDescription": "DEA Blocked Cycles",
                "PublicDescription": "Total number of CPU cycles blocked for the DEA functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "76",
                "EventName": "AES_FUNCTIONS",
                "BriefDescription": "AES Functions",
                "PublicDescription": "Total number of AES functions issued by the CPU"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "77",
                "EventName": "AES_CYCLES",
                "BriefDescription": "AES Cycles",
                "PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing the AES functions issued by the CPU"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "78",
                "EventName": "AES_BLOCKED_FUNCTIONS",
                "BriefDescription": "AES Blocked Functions",
                "PublicDescription": "Total number of AES functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "79",
                "EventName": "AES_BLOCKED_CYCLES",
                "BriefDescription": "AES Blocked Cycles",
index 0feedb40f30f46f0712c41c853965898af3174d2..b6b7f29ca831c6463920887c64ad589cbd41ffca 100644 (file)
 [
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "128",
                "EventName": "L1I_L2_SOURCED_WRITES",
                "BriefDescription": "L1I L2 Sourced Writes",
                "PublicDescription": "A directory write to the Level-1 I-Cache directory where the returned cache line was sourced from the Level-2 (L1.5) cache"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "129",
                "EventName": "L1D_L2_SOURCED_WRITES",
                "BriefDescription": "L1D L2 Sourced Writes",
                "PublicDescription": "A directory write to the Level-1 D-Cache directory where the installed cache line was sourced from the Level-2 (L1.5) cache"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "130",
                "EventName": "L1I_L3_LOCAL_WRITES",
                "BriefDescription": "L1I L3 Local Writes",
                "PublicDescription": "A directory write to the Level-1 I-Cache directory where the installed cache line was sourced from the Level-3 cache that is on the same book as the Instruction cache (Local L2 cache)"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "131",
                "EventName": "L1D_L3_LOCAL_WRITES",
                "BriefDescription": "L1D L3 Local Writes",
                "PublicDescription": "A directory write to the Level-1 D-Cache directory where the installtion cache line was source from the Level-3 cache that is on the same book as the Data cache (Local L2 cache)"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "132",
                "EventName": "L1I_L3_REMOTE_WRITES",
                "BriefDescription": "L1I L3 Remote Writes",
                "PublicDescription": "A directory write to the Level-1 I-Cache directory where the installed cache line was sourced from a Level-3 cache that is not on the same book as the Instruction cache (Remote L2 cache)"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "133",
                "EventName": "L1D_L3_REMOTE_WRITES",
                "BriefDescription": "L1D L3 Remote Writes",
                "PublicDescription": "A directory write to the Level-1 D-Cache directory where the installed cache line was sourced from a Level-3 cache that is not on the same book as the Data cache (Remote L2 cache)"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "134",
                "EventName": "L1D_LMEM_SOURCED_WRITES",
                "BriefDescription": "L1D Local Memory Sourced Writes",
                "PublicDescription": "A directory write to the Level-1 D-Cache directory where the installed cache line was sourced from memory that is attached to the same book as the Data cache (Local Memory)"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "135",
                "EventName": "L1I_LMEM_SOURCED_WRITES",
                "BriefDescription": "L1I Local Memory Sourced Writes",
                "PublicDescription": "A directory write to the Level-1 I-Cache where the installed cache line was sourced from memory that is attached to the s ame book as the Instruction cache (Local Memory)"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "136",
                "EventName": "L1D_RO_EXCL_WRITES",
                "BriefDescription": "L1D Read-only Exclusive Writes",
                "PublicDescription": "A directory write to the Level-1 D-Cache where the line was originally in a Read-Only state in the cache but has been updated to be in the Exclusive state that allows stores to the cache line"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "137",
                "EventName": "L1I_CACHELINE_INVALIDATES",
                "BriefDescription": "L1I Cacheline Invalidates",
                "PublicDescription": "A cache line in the Level-1 I-Cache has been invalidated by a store on the same CPU as the Level-1 I-Cache"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "138",
                "EventName": "ITLB1_WRITES",
                "BriefDescription": "ITLB1 Writes",
                "PublicDescription": "A translation entry has been written into the Level-1 Instruction Translation Lookaside Buffer"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "139",
                "EventName": "DTLB1_WRITES",
                "BriefDescription": "DTLB1 Writes",
                "PublicDescription": "A translation entry has been written to the Level-1 Data Translation Lookaside Buffer"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "140",
                "EventName": "TLB2_PTE_WRITES",
                "BriefDescription": "TLB2 PTE Writes",
                "PublicDescription": "A translation entry has been written to the Level-2 TLB Page Table Entry arrays"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "141",
                "EventName": "TLB2_CRSTE_WRITES",
                "BriefDescription": "TLB2 CRSTE Writes",
                "PublicDescription": "A translation entry has been written to the Level-2 TLB Common Region Segment Table Entry arrays"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "142",
                "EventName": "TLB2_CRSTE_HPAGE_WRITES",
                "BriefDescription": "TLB2 CRSTE One-Megabyte Page Writes",
                "PublicDescription": "A translation entry has been written to the Level-2 TLB Common Region Segment Table Entry arrays for a one-megabyte large page translation"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "145",
                "EventName": "ITLB1_MISSES",
                "BriefDescription": "ITLB1 Misses",
                "PublicDescription": "Level-1 Instruction TLB miss in progress. Incremented by one for every cycle an ITLB1 miss is in progress"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "146",
                "EventName": "DTLB1_MISSES",
                "BriefDescription": "DTLB1 Misses",
                "PublicDescription": "Level-1 Data TLB miss in progress. Incremented by one for every cycle an DTLB1 miss is in progress"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "147",
                "EventName": "L2C_STORES_SENT",
                "BriefDescription": "L2C Stores Sent",
index 8bf16759ca531b6fb1418ad873957e29cfdcc516..2dd8dafff2efd94f9fb981179ad96680b06ff51b 100644 (file)
@@ -1,71 +1,83 @@
 [
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "0",
                "EventName": "CPU_CYCLES",
                "BriefDescription": "CPU Cycles",
                "PublicDescription": "Cycle Count"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "1",
                "EventName": "INSTRUCTIONS",
                "BriefDescription": "Instructions",
                "PublicDescription": "Instruction Count"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "2",
                "EventName": "L1I_DIR_WRITES",
                "BriefDescription": "L1I Directory Writes",
                "PublicDescription": "Level-1 I-Cache Directory Write Count"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "3",
                "EventName": "L1I_PENALTY_CYCLES",
                "BriefDescription": "L1I Penalty Cycles",
                "PublicDescription": "Level-1 I-Cache Penalty Cycle Count"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "4",
                "EventName": "L1D_DIR_WRITES",
                "BriefDescription": "L1D Directory Writes",
                "PublicDescription": "Level-1 D-Cache Directory Write Count"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "5",
                "EventName": "L1D_PENALTY_CYCLES",
                "BriefDescription": "L1D Penalty Cycles",
                "PublicDescription": "Level-1 D-Cache Penalty Cycle Count"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "32",
                "EventName": "PROBLEM_STATE_CPU_CYCLES",
                "BriefDescription": "Problem-State CPU Cycles",
                "PublicDescription": "Problem-State Cycle Count"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "33",
                "EventName": "PROBLEM_STATE_INSTRUCTIONS",
                "BriefDescription": "Problem-State Instructions",
                "PublicDescription": "Problem-State Instruction Count"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "34",
                "EventName": "PROBLEM_STATE_L1I_DIR_WRITES",
                "BriefDescription": "Problem-State L1I Directory Writes",
                "PublicDescription": "Problem-State Level-1 I-Cache Directory Write Count"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "35",
                "EventName": "PROBLEM_STATE_L1I_PENALTY_CYCLES",
                "BriefDescription": "Problem-State L1I Penalty Cycles",
                "PublicDescription": "Problem-State Level-1 I-Cache Penalty Cycle Count"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "36",
                "EventName": "PROBLEM_STATE_L1D_DIR_WRITES",
                "BriefDescription": "Problem-State L1D Directory Writes",
                "PublicDescription": "Problem-State Level-1 D-Cache Directory Write Count"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "37",
                "EventName": "PROBLEM_STATE_L1D_PENALTY_CYCLES",
                "BriefDescription": "Problem-State L1D Penalty Cycles",
index 7e5b72492141e036ad5f58e9defe44d855a0b372..db286f19e7b60576fb0937b5d3c89bf482b8cfda 100644 (file)
 [
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "64",
                "EventName": "PRNG_FUNCTIONS",
                "BriefDescription": "PRNG Functions",
                "PublicDescription": "Total number of the PRNG functions issued by the CPU"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "65",
                "EventName": "PRNG_CYCLES",
                "BriefDescription": "PRNG Cycles",
                "PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing PRNG functions issued by the CPU"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "66",
                "EventName": "PRNG_BLOCKED_FUNCTIONS",
                "BriefDescription": "PRNG Blocked Functions",
                "PublicDescription": "Total number of the PRNG functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "67",
                "EventName": "PRNG_BLOCKED_CYCLES",
                "BriefDescription": "PRNG Blocked Cycles",
                "PublicDescription": "Total number of CPU cycles blocked for the PRNG functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "68",
                "EventName": "SHA_FUNCTIONS",
                "BriefDescription": "SHA Functions",
                "PublicDescription": "Total number of SHA functions issued by the CPU"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "69",
                "EventName": "SHA_CYCLES",
                "BriefDescription": "SHA Cycles",
                "PublicDescription": "Total number of CPU cycles when the SHA coprocessor is busy performing the SHA functions issued by the CPU"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "70",
                "EventName": "SHA_BLOCKED_FUNCTIONS",
                "BriefDescription": "SHA Blocked Functions",
                "PublicDescription": "Total number of the SHA functions that are issued by the CPU and are blocked because the SHA coprocessor is busy performing a function issued by another CPU"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "71",
                "EventName": "SHA_BLOCKED_CYCLES",
                "BriefDescription": "SHA Bloced Cycles",
                "PublicDescription": "Total number of CPU cycles blocked for the SHA functions issued by the CPU because the SHA coprocessor is busy performing a function issued by another CPU"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "72",
                "EventName": "DEA_FUNCTIONS",
                "BriefDescription": "DEA Functions",
                "PublicDescription": "Total number of the DEA functions issued by the CPU"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "73",
                "EventName": "DEA_CYCLES",
                "BriefDescription": "DEA Cycles",
                "PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing the DEA functions issued by the CPU"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "74",
                "EventName": "DEA_BLOCKED_FUNCTIONS",
                "BriefDescription": "DEA Blocked Functions",
                "PublicDescription": "Total number of the DEA functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "75",
                "EventName": "DEA_BLOCKED_CYCLES",
                "BriefDescription": "DEA Blocked Cycles",
                "PublicDescription": "Total number of CPU cycles blocked for the DEA functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "76",
                "EventName": "AES_FUNCTIONS",
                "BriefDescription": "AES Functions",
                "PublicDescription": "Total number of AES functions issued by the CPU"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "77",
                "EventName": "AES_CYCLES",
                "BriefDescription": "AES Cycles",
                "PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing the AES functions issued by the CPU"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "78",
                "EventName": "AES_BLOCKED_FUNCTIONS",
                "BriefDescription": "AES Blocked Functions",
                "PublicDescription": "Total number of AES functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "79",
                "EventName": "AES_BLOCKED_CYCLES",
                "BriefDescription": "AES Blocked Cycles",
index 9a002b6967f1d468fa224bbf11504b6fb01b9446..436ce33f1182e7d1721e760016f3b21c442df4ba 100644 (file)
 [
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "128",
                "EventName": "L1D_RO_EXCL_WRITES",
                "BriefDescription": "L1D Read-only Exclusive Writes",
                "PublicDescription": "A directory write to the Level-1 Data cache where the line was originally in a Read-Only state in the cache but has been updated to be in the Exclusive state that allows stores to the cache line."
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "129",
                "EventName": "DTLB1_WRITES",
                "BriefDescription": "DTLB1 Writes",
                "PublicDescription": "A translation entry has been written to the Level-1 Data Translation Lookaside Buffer"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "130",
                "EventName": "DTLB1_MISSES",
                "BriefDescription": "DTLB1 Misses",
                "PublicDescription": "Level-1 Data TLB miss in progress. Incremented by one for every cycle a DTLB1 miss is in progress."
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "131",
                "EventName": "DTLB1_HPAGE_WRITES",
                "BriefDescription": "DTLB1 One-Megabyte Page Writes",
                "PublicDescription": "A translation entry has been written to the Level-1 Data Translation Lookaside Buffer for a one-megabyte page"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "132",
                "EventName": "DTLB1_GPAGE_WRITES",
                "BriefDescription": "DTLB1 Two-Gigabyte Page Writes",
                "PublicDescription": "Counter:132       Name:DTLB1_GPAGE_WRITES A translation entry has been written to the Level-1 Data Translation Lookaside Buffer for a two-gigabyte page."
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "133",
                "EventName": "L1D_L2D_SOURCED_WRITES",
                "BriefDescription": "L1D L2D Sourced Writes",
                "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from the Level-2 Data cache"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "134",
                "EventName": "ITLB1_WRITES",
                "BriefDescription": "ITLB1 Writes",
                "PublicDescription": "A translation entry has been written to the Level-1 Instruction Translation Lookaside Buffer"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "135",
                "EventName": "ITLB1_MISSES",
                "BriefDescription": "ITLB1 Misses",
                "PublicDescription": "Level-1 Instruction TLB miss in progress. Incremented by one for every cycle an ITLB1 miss is in progress"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "136",
                "EventName": "L1I_L2I_SOURCED_WRITES",
                "BriefDescription": "L1I L2I Sourced Writes",
                "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from the Level-2 Instruction cache"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "137",
                "EventName": "TLB2_PTE_WRITES",
                "BriefDescription": "TLB2 PTE Writes",
                "PublicDescription": "A translation entry has been written to the Level-2 TLB Page Table Entry arrays"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "138",
                "EventName": "TLB2_CRSTE_HPAGE_WRITES",
                "BriefDescription": "TLB2 CRSTE One-Megabyte Page Writes",
                "PublicDescription": "A translation entry has been written to the Level-2 TLB Combined Region Segment Table Entry arrays for a one-megabyte large page translation"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "139",
                "EventName": "TLB2_CRSTE_WRITES",
                "BriefDescription": "TLB2 CRSTE Writes",
                "PublicDescription": "A translation entry has been written to the Level-2 TLB Combined Region Segment Table Entry arrays"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "140",
                "EventName": "TX_C_TEND",
                "BriefDescription": "Completed TEND instructions in constrained TX mode",
                "PublicDescription": "A TEND instruction has completed in a constrained transactional-execution mode"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "141",
                "EventName": "TX_NC_TEND",
                "BriefDescription": "Completed TEND instructions in non-constrained TX mode",
                "PublicDescription": "A TEND instruction has completed in a non-constrained transactional-execution mode"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "143",
                "EventName": "L1C_TLB1_MISSES",
                "BriefDescription": "L1C TLB1 Misses",
                "PublicDescription": "Increments by one for any cycle where a Level-1 cache or Level-1 TLB miss is in progress."
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "144",
                "EventName": "L1D_ONCHIP_L3_SOURCED_WRITES",
                "BriefDescription": "L1D On-Chip L3 Sourced Writes",
                "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Chip Level-3 cache without intervention"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "145",
                "EventName": "L1D_ONCHIP_L3_SOURCED_WRITES_IV",
                "BriefDescription": "L1D On-Chip L3 Sourced Writes with Intervention",
                "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Chip Level-3 cache with intervention"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "146",
                "EventName": "L1D_ONNODE_L4_SOURCED_WRITES",
                "BriefDescription": "L1D On-Node L4 Sourced Writes",
                "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Node Level-4 cache"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "147",
                "EventName": "L1D_ONNODE_L3_SOURCED_WRITES_IV",
                "BriefDescription": "L1D On-Node L3 Sourced Writes with Intervention",
                "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Node Level-3 cache with intervention"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "148",
                "EventName": "L1D_ONNODE_L3_SOURCED_WRITES",
                "BriefDescription": "L1D On-Node L3 Sourced Writes",
                "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Node Level-3 cache without intervention"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "149",
                "EventName": "L1D_ONDRAWER_L4_SOURCED_WRITES",
                "BriefDescription": "L1D On-Drawer L4 Sourced Writes",
                "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Drawer Level-4 cache"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "150",
                "EventName": "L1D_ONDRAWER_L3_SOURCED_WRITES_IV",
                "BriefDescription": "L1D On-Drawer L3 Sourced Writes with Intervention",
                "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Drawer Level-3 cache with intervention"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "151",
                "EventName": "L1D_ONDRAWER_L3_SOURCED_WRITES",
                "BriefDescription": "L1D On-Drawer L3 Sourced Writes",
                "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Drawer Level-3 cache without intervention"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "152",
                "EventName": "L1D_OFFDRAWER_SCOL_L4_SOURCED_WRITES",
                "BriefDescription": "L1D Off-Drawer Same-Column L4 Sourced Writes",
                "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Drawer Same-Column Level-4 cache"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "153",
                "EventName": "L1D_OFFDRAWER_SCOL_L3_SOURCED_WRITES_IV",
                "BriefDescription": "L1D Off-Drawer Same-Column L3 Sourced Writes with Intervention",
                "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Drawer Same-Column Level-3 cache with intervention"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "154",
                "EventName": "L1D_OFFDRAWER_SCOL_L3_SOURCED_WRITES",
                "BriefDescription": "L1D Off-Drawer Same-Column L3 Sourced Writes",
                "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Drawer Same-Column Level-3 cache without intervention"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "155",
                "EventName": "L1D_OFFDRAWER_FCOL_L4_SOURCED_WRITES",
                "BriefDescription": "L1D Off-Drawer Far-Column L3 Sourced Writes",
                "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Drawer Far-Column Level-4 cache"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "156",
                "EventName": "L1D_OFFDRAWER_FCOL_L3_SOURCED_WRITES_IV",
                "BriefDescription": "L1D Off-Drawer Far-Column L3 Sourced Writes with Intervention",
                "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Drawer Far-Column Level-3 cache with intervention"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "157",
                "EventName": "L1D_OFFDRAWER_FCOL_L3_SOURCED_WRITES",
                "BriefDescription": "L1D Off-Drawer Far-Column L3 Sourced Writes",
                "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Drawer Far-Column Level-3 cache without intervention"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "158",
                "EventName": "L1D_ONNODE_MEM_SOURCED_WRITES",
                "BriefDescription": "L1D On-Node Memory Sourced Writes",
                "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Node memory"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "159",
                "EventName": "L1D_ONDRAWER_MEM_SOURCED_WRITES",
                "BriefDescription": "L1D On-Drawer Memory Sourced Writes",
                "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Drawer memory"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "160",
                "EventName": "L1D_OFFDRAWER_MEM_SOURCED_WRITES",
                "BriefDescription": "L1D Off-Drawer Memory Sourced Writes",
                "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Drawer memory"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "161",
                "EventName": "L1D_ONCHIP_MEM_SOURCED_WRITES",
                "BriefDescription": "L1D On-Chip Memory Sourced Writes",
                "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Chip memory"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "162",
                "EventName": "L1I_ONCHIP_L3_SOURCED_WRITES",
                "BriefDescription": "L1I On-Chip L3 Sourced Writes",
                "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Chip Level-3 cache without intervention"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "163",
                "EventName": "L1I_ONCHIP_L3_SOURCED_WRITES_IV",
                "BriefDescription": "L1I On-Chip L3 Sourced Writes with Intervention",
                "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On Chip Level-3 cache with intervention"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "164",
                "EventName": "L1I_ONNODE_L4_SOURCED_WRITES",
                "BriefDescription": "L1I On-Chip L4 Sourced Writes",
                "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Node Level-4 cache"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "165",
                "EventName": "L1I_ONNODE_L3_SOURCED_WRITES_IV",
                "BriefDescription": "L1I On-Node L3 Sourced Writes with Intervention",
                "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Node Level-3 cache with intervention"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "166",
                "EventName": "L1I_ONNODE_L3_SOURCED_WRITES",
                "BriefDescription": "L1I On-Node L3 Sourced Writes",
                "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Node Level-3 cache without intervention"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "167",
                "EventName": "L1I_ONDRAWER_L4_SOURCED_WRITES",
                "BriefDescription": "L1I On-Drawer L4 Sourced Writes",
                "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Drawer Level-4 cache"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "168",
                "EventName": "L1I_ONDRAWER_L3_SOURCED_WRITES_IV",
                "BriefDescription": "L1I On-Drawer L3 Sourced Writes with Intervention",
                "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Drawer Level-3 cache with intervention"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "169",
                "EventName": "L1I_ONDRAWER_L3_SOURCED_WRITES",
                "BriefDescription": "L1I On-Drawer L3 Sourced Writes",
                "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Drawer Level-3 cache without intervention"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "170",
                "EventName": "L1I_OFFDRAWER_SCOL_L4_SOURCED_WRITES",
                "BriefDescription": "L1I Off-Drawer Same-Column L4 Sourced Writes",
                "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Drawer Same-Column Level-4 cache"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "171",
                "EventName": "L1I_OFFDRAWER_SCOL_L3_SOURCED_WRITES_IV",
                "BriefDescription": "L1I Off-Drawer Same-Column L3 Sourced Writes with Intervention",
                "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Drawer Same-Column Level-3 cache with intervention"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "172",
                "EventName": "L1I_OFFDRAWER_SCOL_L3_SOURCED_WRITES",
                "BriefDescription": "L1I Off-Drawer Same-Column L3 Sourced Writes",
                "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Drawer Same-Column Level-3 cache without intervention"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "173",
                "EventName": "L1I_OFFDRAWER_FCOL_L4_SOURCED_WRITES",
                "BriefDescription": "L1I Off-Drawer Far-Column L4 Sourced Writes",
                "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Drawer Far-Column Level-4 cache"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "174",
                "EventName": "L1I_OFFDRAWER_FCOL_L3_SOURCED_WRITES_IV",
                "BriefDescription": "L1I Off-Drawer Far-Column L3 Sourced Writes with Intervention",
                "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Drawer Far-Column Level-3 cache with intervention"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "175",
                "EventName": "L1I_OFFDRAWER_FCOL_L3_SOURCED_WRITES",
                "BriefDescription": "L1I Off-Drawer Far-Column L3 Sourced Writes",
                "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Drawer Far-Column Level-3 cache without intervention"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "176",
                "EventName": "L1I_ONNODE_MEM_SOURCED_WRITES",
                "BriefDescription": "L1I On-Node Memory Sourced Writes",
                "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from On-Node memory"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "177",
                "EventName": "L1I_ONDRAWER_MEM_SOURCED_WRITES",
                "BriefDescription": "L1I On-Drawer Memory Sourced Writes",
                "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from On-Drawer memory"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "178",
                "EventName": "L1I_OFFDRAWER_MEM_SOURCED_WRITES",
                "BriefDescription": "L1I Off-Drawer Memory Sourced Writes",
                "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from On-Drawer memory"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "179",
                "EventName": "L1I_ONCHIP_MEM_SOURCED_WRITES",
                "BriefDescription": "L1I On-Chip Memory Sourced Writes",
                "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from On-Chip memory"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "218",
                "EventName": "TX_NC_TABORT",
                "BriefDescription": "Aborted transactions in non-constrained TX mode",
                "PublicDescription": "A transaction abort has occurred in a non-constrained transactional-execution mode"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "219",
                "EventName": "TX_C_TABORT_NO_SPECIAL",
                "BriefDescription": "Aborted transactions in constrained TX mode not using special completion logic",
                "PublicDescription": "A transaction abort has occurred in a constrained transactional-execution mode and the CPU is not using any special logic to allow the transaction to complete"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "220",
                "EventName": "TX_C_TABORT_SPECIAL",
                "BriefDescription": "Aborted transactions in constrained TX mode using special completion logic",
                "PublicDescription": "A transaction abort has occurred in a constrained transactional-execution mode and the CPU is using special logic to allow the transaction to complete"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "448",
                "EventName": "MT_DIAG_CYCLES_ONE_THR_ACTIVE",
                "BriefDescription": "Cycle count with one thread active",
                "PublicDescription": "Cycle count with one thread active"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "449",
                "EventName": "MT_DIAG_CYCLES_TWO_THR_ACTIVE",
                "BriefDescription": "Cycle count with two threads active",
diff --git a/tools/perf/pmu-events/arch/s390/cf_z13/transaction.json b/tools/perf/pmu-events/arch/s390/cf_z13/transaction.json
new file mode 100644 (file)
index 0000000..1a0034f
--- /dev/null
@@ -0,0 +1,7 @@
+[
+  {
+    "BriefDescription": "Transaction count",
+    "MetricName": "transaction",
+    "MetricExpr": "TX_C_TEND + TX_NC_TEND + TX_NC_TABORT + TX_C_TABORT_SPECIAL + TX_C_TABORT_NO_SPECIAL"
+  }
+]
index 8f653c9d899d96aa320c9989d2e35dae2384c169..17fb5241928be2dc9061d28364a3eb0413e8f491 100644 (file)
@@ -1,47 +1,55 @@
 [
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "0",
                "EventName": "CPU_CYCLES",
                "BriefDescription": "CPU Cycles",
                "PublicDescription": "Cycle Count"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "1",
                "EventName": "INSTRUCTIONS",
                "BriefDescription": "Instructions",
                "PublicDescription": "Instruction Count"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "2",
                "EventName": "L1I_DIR_WRITES",
                "BriefDescription": "L1I Directory Writes",
                "PublicDescription": "Level-1 I-Cache Directory Write Count"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "3",
                "EventName": "L1I_PENALTY_CYCLES",
                "BriefDescription": "L1I Penalty Cycles",
                "PublicDescription": "Level-1 I-Cache Penalty Cycle Count"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "4",
                "EventName": "L1D_DIR_WRITES",
                "BriefDescription": "L1D Directory Writes",
                "PublicDescription": "Level-1 D-Cache Directory Write Count"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "5",
                "EventName": "L1D_PENALTY_CYCLES",
                "BriefDescription": "L1D Penalty Cycles",
                "PublicDescription": "Level-1 D-Cache Penalty Cycle Count"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "32",
                "EventName": "PROBLEM_STATE_CPU_CYCLES",
                "BriefDescription": "Problem-State CPU Cycles",
                "PublicDescription": "Problem-State Cycle Count"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "33",
                "EventName": "PROBLEM_STATE_INSTRUCTIONS",
                "BriefDescription": "Problem-State Instructions",
index 7e5b72492141e036ad5f58e9defe44d855a0b372..db286f19e7b60576fb0937b5d3c89bf482b8cfda 100644 (file)
 [
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "64",
                "EventName": "PRNG_FUNCTIONS",
                "BriefDescription": "PRNG Functions",
                "PublicDescription": "Total number of the PRNG functions issued by the CPU"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "65",
                "EventName": "PRNG_CYCLES",
                "BriefDescription": "PRNG Cycles",
                "PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing PRNG functions issued by the CPU"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "66",
                "EventName": "PRNG_BLOCKED_FUNCTIONS",
                "BriefDescription": "PRNG Blocked Functions",
                "PublicDescription": "Total number of the PRNG functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "67",
                "EventName": "PRNG_BLOCKED_CYCLES",
                "BriefDescription": "PRNG Blocked Cycles",
                "PublicDescription": "Total number of CPU cycles blocked for the PRNG functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "68",
                "EventName": "SHA_FUNCTIONS",
                "BriefDescription": "SHA Functions",
                "PublicDescription": "Total number of SHA functions issued by the CPU"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "69",
                "EventName": "SHA_CYCLES",
                "BriefDescription": "SHA Cycles",
                "PublicDescription": "Total number of CPU cycles when the SHA coprocessor is busy performing the SHA functions issued by the CPU"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "70",
                "EventName": "SHA_BLOCKED_FUNCTIONS",
                "BriefDescription": "SHA Blocked Functions",
                "PublicDescription": "Total number of the SHA functions that are issued by the CPU and are blocked because the SHA coprocessor is busy performing a function issued by another CPU"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "71",
                "EventName": "SHA_BLOCKED_CYCLES",
                "BriefDescription": "SHA Bloced Cycles",
                "PublicDescription": "Total number of CPU cycles blocked for the SHA functions issued by the CPU because the SHA coprocessor is busy performing a function issued by another CPU"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "72",
                "EventName": "DEA_FUNCTIONS",
                "BriefDescription": "DEA Functions",
                "PublicDescription": "Total number of the DEA functions issued by the CPU"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "73",
                "EventName": "DEA_CYCLES",
                "BriefDescription": "DEA Cycles",
                "PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing the DEA functions issued by the CPU"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "74",
                "EventName": "DEA_BLOCKED_FUNCTIONS",
                "BriefDescription": "DEA Blocked Functions",
                "PublicDescription": "Total number of the DEA functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "75",
                "EventName": "DEA_BLOCKED_CYCLES",
                "BriefDescription": "DEA Blocked Cycles",
                "PublicDescription": "Total number of CPU cycles blocked for the DEA functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "76",
                "EventName": "AES_FUNCTIONS",
                "BriefDescription": "AES Functions",
                "PublicDescription": "Total number of AES functions issued by the CPU"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "77",
                "EventName": "AES_CYCLES",
                "BriefDescription": "AES Cycles",
                "PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing the AES functions issued by the CPU"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "78",
                "EventName": "AES_BLOCKED_FUNCTIONS",
                "BriefDescription": "AES Blocked Functions",
                "PublicDescription": "Total number of AES functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "79",
                "EventName": "AES_BLOCKED_CYCLES",
                "BriefDescription": "AES Blocked Cycles",
index aa4dfb46b65ba485e34ce563db6e0107cff51f86..e7a3524b748f01152e8a503392048966601a5540 100644 (file)
 [
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "128",
                "EventName": "L1D_RO_EXCL_WRITES",
                "BriefDescription": "L1D Read-only Exclusive Writes",
                "PublicDescription": "Counter:128       Name:L1D_RO_EXCL_WRITES A directory write to the Level-1 Data cache where the line was originally in a Read-Only state in the cache but has been updated to be in the Exclusive state that allows stores to the cache line"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "129",
                "EventName": "DTLB2_WRITES",
                "BriefDescription": "DTLB2 Writes",
                "PublicDescription": "A translation has been written into The Translation Lookaside Buffer 2 (TLB2) and the request was made by the data cache"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "130",
                "EventName": "DTLB2_MISSES",
                "BriefDescription": "DTLB2 Misses",
                "PublicDescription": "A TLB2 miss is in progress for a request made by the data cache. Incremented by one for every TLB2 miss in progress for the Level-1 Data cache on this cycle"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "131",
                "EventName": "DTLB2_HPAGE_WRITES",
                "BriefDescription": "DTLB2 One-Megabyte Page Writes",
                "PublicDescription": "A translation entry was written into the Combined Region and Segment Table Entry array in the Level-2 TLB for a one-megabyte page or a Last Host Translation was done"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "132",
                "EventName": "DTLB2_GPAGE_WRITES",
                "BriefDescription": "DTLB2 Two-Gigabyte Page Writes",
                "PublicDescription": "A translation entry for a two-gigabyte page was written into the Level-2 TLB"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "133",
                "EventName": "L1D_L2D_SOURCED_WRITES",
                "BriefDescription": "L1D L2D Sourced Writes",
                "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from the Level-2 Data cache"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "134",
                "EventName": "ITLB2_WRITES",
                "BriefDescription": "ITLB2 Writes",
                "PublicDescription": "A translation entry has been written into the Translation Lookaside Buffer 2 (TLB2) and the request was made by the instruction cache"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "135",
                "EventName": "ITLB2_MISSES",
                "BriefDescription": "ITLB2 Misses",
                "PublicDescription": "A TLB2 miss is in progress for a request made by the instruction cache. Incremented by one for every TLB2 miss in progress for the Level-1 Instruction cache in a cycle"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "136",
                "EventName": "L1I_L2I_SOURCED_WRITES",
                "BriefDescription": "L1I L2I Sourced Writes",
                "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from the Level-2 Instruction cache"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "137",
                "EventName": "TLB2_PTE_WRITES",
                "BriefDescription": "TLB2 PTE Writes",
                "PublicDescription": "A translation entry was written into the Page Table Entry array in the Level-2 TLB"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "138",
                "EventName": "TLB2_CRSTE_WRITES",
                "BriefDescription": "TLB2 CRSTE Writes",
                "PublicDescription": "Translation entries were written into the Combined Region and Segment Table Entry array and the Page Table Entry array in the Level-2 TLB"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "139",
                "EventName": "TLB2_ENGINES_BUSY",
                "BriefDescription": "TLB2 Engines Busy",
                "PublicDescription": "The number of Level-2 TLB translation engines busy in a cycle"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "140",
                "EventName": "TX_C_TEND",
                "BriefDescription": "Completed TEND instructions in constrained TX mode",
                "PublicDescription": "A TEND instruction has completed in a constrained transactional-execution mode"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "141",
                "EventName": "TX_NC_TEND",
                "BriefDescription": "Completed TEND instructions in non-constrained TX mode",
                "PublicDescription": "A TEND instruction has completed in a non-constrained transactional-execution mode"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "143",
                "EventName": "L1C_TLB2_MISSES",
                "BriefDescription": "L1C TLB2 Misses",
                "PublicDescription": "Increments by one for any cycle where a level-1 cache or level-2 TLB miss is in progress"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "144",
                "EventName": "L1D_ONCHIP_L3_SOURCED_WRITES",
                "BriefDescription": "L1D On-Chip L3 Sourced Writes",
                "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Chip Level-3 cache without intervention"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "145",
                "EventName": "L1D_ONCHIP_MEMORY_SOURCED_WRITES",
                "BriefDescription": "L1D On-Chip Memory Sourced Writes",
                "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Chip memory"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "146",
                "EventName": "L1D_ONCHIP_L3_SOURCED_WRITES_IV",
                "BriefDescription": "L1D On-Chip L3 Sourced Writes with Intervention",
                "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Chip Level-3 cache with intervention"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "147",
                "EventName": "L1D_ONCLUSTER_L3_SOURCED_WRITES",
                "BriefDescription": "L1D On-Cluster L3 Sourced Writes",
                "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Cluster Level-3 cache withountervention"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "148",
                "EventName": "L1D_ONCLUSTER_MEMORY_SOURCED_WRITES",
                "BriefDescription": "L1D On-Cluster Memory Sourced Writes",
                "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Cluster memory"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "149",
                "EventName": "L1D_ONCLUSTER_L3_SOURCED_WRITES_IV",
                "BriefDescription": "L1D On-Cluster L3 Sourced Writes with Intervention",
                "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Cluster Level-3 cache with intervention"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "150",
                "EventName": "L1D_OFFCLUSTER_L3_SOURCED_WRITES",
                "BriefDescription": "L1D Off-Cluster L3 Sourced Writes",
                "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Cluster Level-3 cache without intervention"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "151",
                "EventName": "L1D_OFFCLUSTER_MEMORY_SOURCED_WRITES",
                "BriefDescription": "L1D Off-Cluster Memory Sourced Writes",
                "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from Off-Cluster memory"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "152",
                "EventName": "L1D_OFFCLUSTER_L3_SOURCED_WRITES_IV",
                "BriefDescription": "L1D Off-Cluster L3 Sourced Writes with Intervention",
                "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Cluster Level-3 cache with intervention"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "153",
                "EventName": "L1D_OFFDRAWER_L3_SOURCED_WRITES",
                "BriefDescription": "L1D Off-Drawer L3 Sourced Writes",
                "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Drawer Level-3 cache without intervention"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "154",
                "EventName": "L1D_OFFDRAWER_MEMORY_SOURCED_WRITES",
                "BriefDescription": "L1D Off-Drawer Memory Sourced Writes",
                "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from Off-Drawer memory"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "155",
                "EventName": "L1D_OFFDRAWER_L3_SOURCED_WRITES_IV",
                "BriefDescription": "L1D Off-Drawer L3 Sourced Writes with Intervention",
                "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Drawer Level-3 cache with intervention"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "156",
                "EventName": "L1D_ONDRAWER_L4_SOURCED_WRITES",
                "BriefDescription": "L1D On-Drawer L4 Sourced Writes",
                "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Drawer Level-4 cache"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "157",
                "EventName": "L1D_OFFDRAWER_L4_SOURCED_WRITES",
                "BriefDescription": "L1D Off-Drawer L4 Sourced Writes",
                "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from Off-Drawer Level-4 cache"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "158",
                "EventName": "L1D_ONCHIP_L3_SOURCED_WRITES_RO",
                "BriefDescription": "L1D On-Chip L3 Sourced Writes read-only",
                "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Chip L3 but a read-only invalidate was done to remove other copies of the cache line"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "162",
                "EventName": "L1I_ONCHIP_L3_SOURCED_WRITES",
                "BriefDescription": "L1I On-Chip L3 Sourced Writes",
                "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache ine was sourced from an On-Chip Level-3 cache without intervention"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "163",
                "EventName": "L1I_ONCHIP_MEMORY_SOURCED_WRITES",
                "BriefDescription": "L1I On-Chip Memory Sourced Writes",
                "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache ine was sourced from On-Chip memory"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "164",
                "EventName": "L1I_ONCHIP_L3_SOURCED_WRITES_IV",
                "BriefDescription": "L1I On-Chip L3 Sourced Writes with Intervention",
                "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache ine was sourced from an On-Chip Level-3 cache with intervention"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "165",
                "EventName": "L1I_ONCLUSTER_L3_SOURCED_WRITES",
                "BriefDescription": "L1I On-Cluster L3 Sourced Writes",
                "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Cluster Level-3 cache without intervention"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "166",
                "EventName": "L1I_ONCLUSTER_MEMORY_SOURCED_WRITES",
                "BriefDescription": "L1I On-Cluster Memory Sourced Writes",
                "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Cluster memory"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "167",
                "EventName": "L1I_ONCLUSTER_L3_SOURCED_WRITES_IV",
                "BriefDescription": "L1I On-Cluster L3 Sourced Writes with Intervention",
                "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from On-Cluster Level-3 cache with intervention"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "168",
                "EventName": "L1I_OFFCLUSTER_L3_SOURCED_WRITES",
                "BriefDescription": "L1I Off-Cluster L3 Sourced Writes",
                "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Cluster Level-3 cache without intervention"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "169",
                "EventName": "L1I_OFFCLUSTER_MEMORY_SOURCED_WRITES",
                "BriefDescription": "L1I Off-Cluster Memory Sourced Writes",
                "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from Off-Cluster memory"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "170",
                "EventName": "L1I_OFFCLUSTER_L3_SOURCED_WRITES_IV",
                "BriefDescription": "L1I Off-Cluster L3 Sourced Writes with Intervention",
                "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Cluster Level-3 cache with intervention"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "171",
                "EventName": "L1I_OFFDRAWER_L3_SOURCED_WRITES",
                "BriefDescription": "L1I Off-Drawer L3 Sourced Writes",
                "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Drawer Level-3 cache without intervention"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "172",
                "EventName": "L1I_OFFDRAWER_MEMORY_SOURCED_WRITES",
                "BriefDescription": "L1I Off-Drawer Memory Sourced Writes",
                "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from Off-Drawer memory"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "173",
                "EventName": "L1I_OFFDRAWER_L3_SOURCED_WRITES_IV",
                "BriefDescription": "L1I Off-Drawer L3 Sourced Writes with Intervention",
                "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Drawer Level-3 cache with intervention"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "174",
                "EventName": "L1I_ONDRAWER_L4_SOURCED_WRITES",
                "BriefDescription": "L1I On-Drawer L4 Sourced Writes",
                "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from On-Drawer Level-4 cache"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "175",
                "EventName": "L1I_OFFDRAWER_L4_SOURCED_WRITES",
                "BriefDescription": "L1I Off-Drawer L4 Sourced Writes",
                "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from Off-Drawer Level-4 cache"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "224",
                "EventName": "BCD_DFP_EXECUTION_SLOTS",
                "BriefDescription": "BCD DFP Execution Slots",
                "PublicDescription": "Count of floating point execution slots used for finished Binary Coded Decimal to Decimal Floating Point conversions. Instructions: CDZT, CXZT, CZDT, CZXT"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "225",
                "EventName": "VX_BCD_EXECUTION_SLOTS",
                "BriefDescription": "VX BCD Execution Slots",
                "PublicDescription": "Count of floating point execution slots used for finished vector arithmetic Binary Coded Decimal instructions. Instructions: VAP, VSP, VMPVMSP, VDP, VSDP, VRP, VLIP, VSRP, VPSOPVCP, VTP, VPKZ, VUPKZ, VCVB, VCVBG, VCVDVCVDG"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "226",
                "EventName": "DECIMAL_INSTRUCTIONS",
                "BriefDescription": "Decimal Instructions",
                "PublicDescription": "Decimal instructions dispatched. Instructions: CVB, CVD, AP, CP, DP, ED, EDMK, MP, SRP, SP, ZAP"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "232",
                "EventName": "LAST_HOST_TRANSLATIONS",
                "BriefDescription": "Last host translation done",
                "PublicDescription": "Last Host Translation done"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "243",
                "EventName": "TX_NC_TABORT",
                "BriefDescription": "Aborted transactions in non-constrained TX mode",
                "PublicDescription": "A transaction abort has occurred in a non-constrained transactional-execution mode"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "244",
                "EventName": "TX_C_TABORT_NO_SPECIAL",
                "BriefDescription": "Aborted transactions in constrained TX mode not using special completion logic",
                "PublicDescription": "A transaction abort has occurred in a constrained transactional-execution mode and the CPU is not using any special logic to allow the transaction to complete"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "245",
                "EventName": "TX_C_TABORT_SPECIAL",
                "BriefDescription": "Aborted transactions in constrained TX mode using special completion logic",
                "PublicDescription": "A transaction abort has occurred in a constrained transactional-execution mode and the CPU is using special logic to allow the transaction to complete"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "448",
                "EventName": "MT_DIAG_CYCLES_ONE_THR_ACTIVE",
                "BriefDescription": "Cycle count with one thread active",
                "PublicDescription": "Cycle count with one thread active"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "449",
                "EventName": "MT_DIAG_CYCLES_TWO_THR_ACTIVE",
                "BriefDescription": "Cycle count with two threads active",
diff --git a/tools/perf/pmu-events/arch/s390/cf_z14/transaction.json b/tools/perf/pmu-events/arch/s390/cf_z14/transaction.json
new file mode 100644 (file)
index 0000000..1a0034f
--- /dev/null
@@ -0,0 +1,7 @@
+[
+  {
+    "BriefDescription": "Transaction count",
+    "MetricName": "transaction",
+    "MetricExpr": "TX_C_TEND + TX_NC_TEND + TX_NC_TABORT + TX_C_TABORT_SPECIAL + TX_C_TABORT_NO_SPECIAL"
+  }
+]
index 8bf16759ca531b6fb1418ad873957e29cfdcc516..2dd8dafff2efd94f9fb981179ad96680b06ff51b 100644 (file)
@@ -1,71 +1,83 @@
 [
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "0",
                "EventName": "CPU_CYCLES",
                "BriefDescription": "CPU Cycles",
                "PublicDescription": "Cycle Count"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "1",
                "EventName": "INSTRUCTIONS",
                "BriefDescription": "Instructions",
                "PublicDescription": "Instruction Count"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "2",
                "EventName": "L1I_DIR_WRITES",
                "BriefDescription": "L1I Directory Writes",
                "PublicDescription": "Level-1 I-Cache Directory Write Count"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "3",
                "EventName": "L1I_PENALTY_CYCLES",
                "BriefDescription": "L1I Penalty Cycles",
                "PublicDescription": "Level-1 I-Cache Penalty Cycle Count"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "4",
                "EventName": "L1D_DIR_WRITES",
                "BriefDescription": "L1D Directory Writes",
                "PublicDescription": "Level-1 D-Cache Directory Write Count"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "5",
                "EventName": "L1D_PENALTY_CYCLES",
                "BriefDescription": "L1D Penalty Cycles",
                "PublicDescription": "Level-1 D-Cache Penalty Cycle Count"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "32",
                "EventName": "PROBLEM_STATE_CPU_CYCLES",
                "BriefDescription": "Problem-State CPU Cycles",
                "PublicDescription": "Problem-State Cycle Count"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "33",
                "EventName": "PROBLEM_STATE_INSTRUCTIONS",
                "BriefDescription": "Problem-State Instructions",
                "PublicDescription": "Problem-State Instruction Count"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "34",
                "EventName": "PROBLEM_STATE_L1I_DIR_WRITES",
                "BriefDescription": "Problem-State L1I Directory Writes",
                "PublicDescription": "Problem-State Level-1 I-Cache Directory Write Count"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "35",
                "EventName": "PROBLEM_STATE_L1I_PENALTY_CYCLES",
                "BriefDescription": "Problem-State L1I Penalty Cycles",
                "PublicDescription": "Problem-State Level-1 I-Cache Penalty Cycle Count"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "36",
                "EventName": "PROBLEM_STATE_L1D_DIR_WRITES",
                "BriefDescription": "Problem-State L1D Directory Writes",
                "PublicDescription": "Problem-State Level-1 D-Cache Directory Write Count"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "37",
                "EventName": "PROBLEM_STATE_L1D_PENALTY_CYCLES",
                "BriefDescription": "Problem-State L1D Penalty Cycles",
index 7e5b72492141e036ad5f58e9defe44d855a0b372..db286f19e7b60576fb0937b5d3c89bf482b8cfda 100644 (file)
 [
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "64",
                "EventName": "PRNG_FUNCTIONS",
                "BriefDescription": "PRNG Functions",
                "PublicDescription": "Total number of the PRNG functions issued by the CPU"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "65",
                "EventName": "PRNG_CYCLES",
                "BriefDescription": "PRNG Cycles",
                "PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing PRNG functions issued by the CPU"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "66",
                "EventName": "PRNG_BLOCKED_FUNCTIONS",
                "BriefDescription": "PRNG Blocked Functions",
                "PublicDescription": "Total number of the PRNG functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "67",
                "EventName": "PRNG_BLOCKED_CYCLES",
                "BriefDescription": "PRNG Blocked Cycles",
                "PublicDescription": "Total number of CPU cycles blocked for the PRNG functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "68",
                "EventName": "SHA_FUNCTIONS",
                "BriefDescription": "SHA Functions",
                "PublicDescription": "Total number of SHA functions issued by the CPU"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "69",
                "EventName": "SHA_CYCLES",
                "BriefDescription": "SHA Cycles",
                "PublicDescription": "Total number of CPU cycles when the SHA coprocessor is busy performing the SHA functions issued by the CPU"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "70",
                "EventName": "SHA_BLOCKED_FUNCTIONS",
                "BriefDescription": "SHA Blocked Functions",
                "PublicDescription": "Total number of the SHA functions that are issued by the CPU and are blocked because the SHA coprocessor is busy performing a function issued by another CPU"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "71",
                "EventName": "SHA_BLOCKED_CYCLES",
                "BriefDescription": "SHA Bloced Cycles",
                "PublicDescription": "Total number of CPU cycles blocked for the SHA functions issued by the CPU because the SHA coprocessor is busy performing a function issued by another CPU"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "72",
                "EventName": "DEA_FUNCTIONS",
                "BriefDescription": "DEA Functions",
                "PublicDescription": "Total number of the DEA functions issued by the CPU"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "73",
                "EventName": "DEA_CYCLES",
                "BriefDescription": "DEA Cycles",
                "PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing the DEA functions issued by the CPU"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "74",
                "EventName": "DEA_BLOCKED_FUNCTIONS",
                "BriefDescription": "DEA Blocked Functions",
                "PublicDescription": "Total number of the DEA functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "75",
                "EventName": "DEA_BLOCKED_CYCLES",
                "BriefDescription": "DEA Blocked Cycles",
                "PublicDescription": "Total number of CPU cycles blocked for the DEA functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "76",
                "EventName": "AES_FUNCTIONS",
                "BriefDescription": "AES Functions",
                "PublicDescription": "Total number of AES functions issued by the CPU"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "77",
                "EventName": "AES_CYCLES",
                "BriefDescription": "AES Cycles",
                "PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing the AES functions issued by the CPU"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "78",
                "EventName": "AES_BLOCKED_FUNCTIONS",
                "BriefDescription": "AES Blocked Functions",
                "PublicDescription": "Total number of AES functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "79",
                "EventName": "AES_BLOCKED_CYCLES",
                "BriefDescription": "AES Blocked Cycles",
index b6d7fec7c2e7c802382966543cabdda0b9e3ceb8..b7b42a870bb0c65a6a9bd54a7f3f9f63740db4e2 100644 (file)
 [
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "128",
                "EventName": "L1D_L2_SOURCED_WRITES",
                "BriefDescription": "L1D L2 Sourced Writes",
                "PublicDescription": "A directory write to the Level-1 D-Cache directory where the returned cache line was sourced from the Level-2 cache"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "129",
                "EventName": "L1I_L2_SOURCED_WRITES",
                "BriefDescription": "L1I L2 Sourced Writes",
                "PublicDescription": "A directory write to the Level-1 I-Cache directory where the returned cache line was sourced from the Level-2 cache"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "130",
                "EventName": "DTLB1_MISSES",
                "BriefDescription": "DTLB1 Misses",
                "PublicDescription": "Level-1 Data TLB miss in progress. Incremented by one for every cycle a DTLB1 miss is in progress."
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "131",
                "EventName": "ITLB1_MISSES",
                "BriefDescription": "ITLB1 Misses",
                "PublicDescription": "Level-1 Instruction TLB miss in progress. Incremented by one for every cycle a ITLB1 miss is in progress."
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "133",
                "EventName": "L2C_STORES_SENT",
                "BriefDescription": "L2C Stores Sent",
                "PublicDescription": "Incremented by one for every store sent to Level-2 cache"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "134",
                "EventName": "L1D_OFFBOOK_L3_SOURCED_WRITES",
                "BriefDescription": "L1D Off-Book L3 Sourced Writes",
                "PublicDescription": "A directory write to the Level-1 D-Cache directory where the returned cache line was sourced from an Off Book Level-3 cache"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "135",
                "EventName": "L1D_ONBOOK_L4_SOURCED_WRITES",
                "BriefDescription": "L1D On-Book L4 Sourced Writes",
                "PublicDescription": "A directory write to the Level-1 D-Cache directory where the returned cache line was sourced from an On Book Level-4 cache"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "136",
                "EventName": "L1I_ONBOOK_L4_SOURCED_WRITES",
                "BriefDescription": "L1I On-Book L4 Sourced Writes",
                "PublicDescription": "A directory write to the Level-1 I-Cache directory where the returned cache line was sourced from an On Book Level-4 cache"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "137",
                "EventName": "L1D_RO_EXCL_WRITES",
                "BriefDescription": "L1D Read-only Exclusive Writes",
                "PublicDescription": "A directory write to the Level-1 D-Cache where the line was originally in a Read-Only state in the cache but has been updated to be in the Exclusive state that allows stores to the cache line"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "138",
                "EventName": "L1D_OFFBOOK_L4_SOURCED_WRITES",
                "BriefDescription": "L1D Off-Book L4 Sourced Writes",
                "PublicDescription": "A directory write to the Level-1 D-Cache directory where the returned cache line was sourced from an Off Book Level-4 cache"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "139",
                "EventName": "L1I_OFFBOOK_L4_SOURCED_WRITES",
                "BriefDescription": "L1I Off-Book L4 Sourced Writes",
                "PublicDescription": "A directory write to the Level-1 I-Cache directory where the returned cache line was sourced from an Off Book Level-4 cache"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "140",
                "EventName": "DTLB1_HPAGE_WRITES",
                "BriefDescription": "DTLB1 One-Megabyte Page Writes",
                "PublicDescription": "A translation entry has been written to the Level-1 Data Translation Lookaside Buffer for a one-megabyte page"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "141",
                "EventName": "L1D_LMEM_SOURCED_WRITES",
                "BriefDescription": "L1D Local Memory Sourced Writes",
                "PublicDescription": "A directory write to the Level-1 D-Cache where the installed cache line was sourced from memory that is attached to the same book as the Data cache (Local Memory)"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "142",
                "EventName": "L1I_LMEM_SOURCED_WRITES",
                "BriefDescription": "L1I Local Memory Sourced Writes",
                "PublicDescription": "A directory write to the Level-1 I-Cache where the installed cache line was sourced from memory that is attached to the same book as the Instruction cache (Local Memory)"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "143",
                "EventName": "L1I_OFFBOOK_L3_SOURCED_WRITES",
                "BriefDescription": "L1I Off-Book L3 Sourced Writes",
                "PublicDescription": "A directory write to the Level-1 I-Cache directory where the returned cache line was sourced from an Off Book Level-3 cache"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "144",
                "EventName": "DTLB1_WRITES",
                "BriefDescription": "DTLB1 Writes",
                "PublicDescription": "A translation entry has been written to the Level-1 Data Translation Lookaside Buffer"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "145",
                "EventName": "ITLB1_WRITES",
                "BriefDescription": "ITLB1 Writes",
                "PublicDescription": "A translation entry has been written to the Level-1 Instruction Translation Lookaside Buffer"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "146",
                "EventName": "TLB2_PTE_WRITES",
                "BriefDescription": "TLB2 PTE Writes",
                "PublicDescription": "A translation entry has been written to the Level-2 TLB Page Table Entry arrays"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "147",
                "EventName": "TLB2_CRSTE_HPAGE_WRITES",
                "BriefDescription": "TLB2 CRSTE One-Megabyte Page Writes",
                "PublicDescription": "A translation entry has been written to the Level-2 TLB Common Region Segment Table Entry arrays for a one-megabyte large page translation"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "148",
                "EventName": "TLB2_CRSTE_WRITES",
                "BriefDescription": "TLB2 CRSTE Writes",
                "PublicDescription": "A translation entry has been written to the Level-2 TLB Common Region Segment Table Entry arrays"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "150",
                "EventName": "L1D_ONCHIP_L3_SOURCED_WRITES",
                "BriefDescription": "L1D On-Chip L3 Sourced Writes",
                "PublicDescription": "A directory write to the Level-1 D-Cache directory where the returned cache line was sourced from an On Chip Level-3 cache"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "152",
                "EventName": "L1D_OFFCHIP_L3_SOURCED_WRITES",
                "BriefDescription": "L1D Off-Chip L3 Sourced Writes",
                "PublicDescription": "A directory write to the Level-1 D-Cache directory where the returned cache line was sourced from an Off Chip/On Book Level-3 cache"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "153",
                "EventName": "L1I_ONCHIP_L3_SOURCED_WRITES",
                "BriefDescription": "L1I On-Chip L3 Sourced Writes",
                "PublicDescription": "A directory write to the Level-1 I-Cache directory where the returned cache line was sourced from an On Chip Level-3 cache"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "155",
                "EventName": "L1I_OFFCHIP_L3_SOURCED_WRITES",
                "BriefDescription": "L1I Off-Chip L3 Sourced Writes",
index 8bf16759ca531b6fb1418ad873957e29cfdcc516..2dd8dafff2efd94f9fb981179ad96680b06ff51b 100644 (file)
@@ -1,71 +1,83 @@
 [
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "0",
                "EventName": "CPU_CYCLES",
                "BriefDescription": "CPU Cycles",
                "PublicDescription": "Cycle Count"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "1",
                "EventName": "INSTRUCTIONS",
                "BriefDescription": "Instructions",
                "PublicDescription": "Instruction Count"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "2",
                "EventName": "L1I_DIR_WRITES",
                "BriefDescription": "L1I Directory Writes",
                "PublicDescription": "Level-1 I-Cache Directory Write Count"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "3",
                "EventName": "L1I_PENALTY_CYCLES",
                "BriefDescription": "L1I Penalty Cycles",
                "PublicDescription": "Level-1 I-Cache Penalty Cycle Count"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "4",
                "EventName": "L1D_DIR_WRITES",
                "BriefDescription": "L1D Directory Writes",
                "PublicDescription": "Level-1 D-Cache Directory Write Count"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "5",
                "EventName": "L1D_PENALTY_CYCLES",
                "BriefDescription": "L1D Penalty Cycles",
                "PublicDescription": "Level-1 D-Cache Penalty Cycle Count"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "32",
                "EventName": "PROBLEM_STATE_CPU_CYCLES",
                "BriefDescription": "Problem-State CPU Cycles",
                "PublicDescription": "Problem-State Cycle Count"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "33",
                "EventName": "PROBLEM_STATE_INSTRUCTIONS",
                "BriefDescription": "Problem-State Instructions",
                "PublicDescription": "Problem-State Instruction Count"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "34",
                "EventName": "PROBLEM_STATE_L1I_DIR_WRITES",
                "BriefDescription": "Problem-State L1I Directory Writes",
                "PublicDescription": "Problem-State Level-1 I-Cache Directory Write Count"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "35",
                "EventName": "PROBLEM_STATE_L1I_PENALTY_CYCLES",
                "BriefDescription": "Problem-State L1I Penalty Cycles",
                "PublicDescription": "Problem-State Level-1 I-Cache Penalty Cycle Count"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "36",
                "EventName": "PROBLEM_STATE_L1D_DIR_WRITES",
                "BriefDescription": "Problem-State L1D Directory Writes",
                "PublicDescription": "Problem-State Level-1 D-Cache Directory Write Count"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "37",
                "EventName": "PROBLEM_STATE_L1D_PENALTY_CYCLES",
                "BriefDescription": "Problem-State L1D Penalty Cycles",
index 7e5b72492141e036ad5f58e9defe44d855a0b372..db286f19e7b60576fb0937b5d3c89bf482b8cfda 100644 (file)
 [
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "64",
                "EventName": "PRNG_FUNCTIONS",
                "BriefDescription": "PRNG Functions",
                "PublicDescription": "Total number of the PRNG functions issued by the CPU"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "65",
                "EventName": "PRNG_CYCLES",
                "BriefDescription": "PRNG Cycles",
                "PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing PRNG functions issued by the CPU"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "66",
                "EventName": "PRNG_BLOCKED_FUNCTIONS",
                "BriefDescription": "PRNG Blocked Functions",
                "PublicDescription": "Total number of the PRNG functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "67",
                "EventName": "PRNG_BLOCKED_CYCLES",
                "BriefDescription": "PRNG Blocked Cycles",
                "PublicDescription": "Total number of CPU cycles blocked for the PRNG functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "68",
                "EventName": "SHA_FUNCTIONS",
                "BriefDescription": "SHA Functions",
                "PublicDescription": "Total number of SHA functions issued by the CPU"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "69",
                "EventName": "SHA_CYCLES",
                "BriefDescription": "SHA Cycles",
                "PublicDescription": "Total number of CPU cycles when the SHA coprocessor is busy performing the SHA functions issued by the CPU"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "70",
                "EventName": "SHA_BLOCKED_FUNCTIONS",
                "BriefDescription": "SHA Blocked Functions",
                "PublicDescription": "Total number of the SHA functions that are issued by the CPU and are blocked because the SHA coprocessor is busy performing a function issued by another CPU"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "71",
                "EventName": "SHA_BLOCKED_CYCLES",
                "BriefDescription": "SHA Bloced Cycles",
                "PublicDescription": "Total number of CPU cycles blocked for the SHA functions issued by the CPU because the SHA coprocessor is busy performing a function issued by another CPU"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "72",
                "EventName": "DEA_FUNCTIONS",
                "BriefDescription": "DEA Functions",
                "PublicDescription": "Total number of the DEA functions issued by the CPU"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "73",
                "EventName": "DEA_CYCLES",
                "BriefDescription": "DEA Cycles",
                "PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing the DEA functions issued by the CPU"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "74",
                "EventName": "DEA_BLOCKED_FUNCTIONS",
                "BriefDescription": "DEA Blocked Functions",
                "PublicDescription": "Total number of the DEA functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "75",
                "EventName": "DEA_BLOCKED_CYCLES",
                "BriefDescription": "DEA Blocked Cycles",
                "PublicDescription": "Total number of CPU cycles blocked for the DEA functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "76",
                "EventName": "AES_FUNCTIONS",
                "BriefDescription": "AES Functions",
                "PublicDescription": "Total number of AES functions issued by the CPU"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "77",
                "EventName": "AES_CYCLES",
                "BriefDescription": "AES Cycles",
                "PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing the AES functions issued by the CPU"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "78",
                "EventName": "AES_BLOCKED_FUNCTIONS",
                "BriefDescription": "AES Blocked Functions",
                "PublicDescription": "Total number of AES functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "79",
                "EventName": "AES_BLOCKED_CYCLES",
                "BriefDescription": "AES Blocked Cycles",
index 8682126aabb2e0e3b858d5d5ef5a301dfbcbcf26..162251037219841a360e05149709d44e39239953 100644 (file)
 [
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "128",
                "EventName": "DTLB1_MISSES",
                "BriefDescription": "DTLB1 Misses",
                "PublicDescription": "Level-1 Data TLB miss in progress. Incremented by one for every cycle a DTLB1 miss is in progress."
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "129",
                "EventName": "ITLB1_MISSES",
                "BriefDescription": "ITLB1 Misses",
                "PublicDescription": "Level-1 Instruction TLB miss in progress. Incremented by one for every cycle a ITLB1 miss is in progress."
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "130",
                "EventName": "L1D_L2I_SOURCED_WRITES",
                "BriefDescription": "L1D L2I Sourced Writes",
                "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from the Level-2 Instruction cache"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "131",
                "EventName": "L1I_L2I_SOURCED_WRITES",
                "BriefDescription": "L1I L2I Sourced Writes",
                "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from the Level-2 Instruction cache"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "132",
                "EventName": "L1D_L2D_SOURCED_WRITES",
                "BriefDescription": "L1D L2D Sourced Writes",
                "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from the Level-2 Data cache"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "133",
                "EventName": "DTLB1_WRITES",
                "BriefDescription": "DTLB1 Writes",
                "PublicDescription": "A translation entry has been written to the Level-1 Data Translation Lookaside Buffer"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "135",
                "EventName": "L1D_LMEM_SOURCED_WRITES",
                "BriefDescription": "L1D Local Memory Sourced Writes",
                "PublicDescription": "A directory write to the Level-1 Data cache where the installed cache line was sourced from memory that is attached to the same book as the Data cache (Local Memory)"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "137",
                "EventName": "L1I_LMEM_SOURCED_WRITES",
                "BriefDescription": "L1I Local Memory Sourced Writes",
                "PublicDescription": "A directory write to the Level-1 Instruction cache where the installed cache line was sourced from memory that is attached to the same book as the Instruction cache (Local Memory)"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "138",
                "EventName": "L1D_RO_EXCL_WRITES",
                "BriefDescription": "L1D Read-only Exclusive Writes",
                "PublicDescription": "A directory write to the Level-1 D-Cache where the line was originally in a Read-Only state in the cache but has been updated to be in the Exclusive state that allows stores to the cache line"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "139",
                "EventName": "DTLB1_HPAGE_WRITES",
                "BriefDescription": "DTLB1 One-Megabyte Page Writes",
                "PublicDescription": "A translation entry has been written to the Level-1 Data Translation Lookaside Buffer for a one-megabyte page"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "140",
                "EventName": "ITLB1_WRITES",
                "BriefDescription": "ITLB1 Writes",
                "PublicDescription": "A translation entry has been written to the Level-1 Instruction Translation Lookaside Buffer"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "141",
                "EventName": "TLB2_PTE_WRITES",
                "BriefDescription": "TLB2 PTE Writes",
                "PublicDescription": "A translation entry has been written to the Level-2 TLB Page Table Entry arrays"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "142",
                "EventName": "TLB2_CRSTE_HPAGE_WRITES",
                "BriefDescription": "TLB2 CRSTE One-Megabyte Page Writes",
                "PublicDescription": "A translation entry has been written to the Level-2 TLB Common Region Segment Table Entry arrays for a one-megabyte large page translation"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "143",
                "EventName": "TLB2_CRSTE_WRITES",
                "BriefDescription": "TLB2 CRSTE Writes",
                "PublicDescription": "A translation entry has been written to the Level-2 TLB Common Region Segment Table Entry arrays"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "144",
                "EventName": "L1D_ONCHIP_L3_SOURCED_WRITES",
                "BriefDescription": "L1D On-Chip L3 Sourced Writes",
                "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On Chip Level-3 cache without intervention"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "145",
                "EventName": "L1D_OFFCHIP_L3_SOURCED_WRITES",
                "BriefDescription": "L1D Off-Chip L3 Sourced Writes",
                "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off Chip/On Book Level-3 cache without intervention"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "146",
                "EventName": "L1D_OFFBOOK_L3_SOURCED_WRITES",
                "BriefDescription": "L1D Off-Book L3 Sourced Writes",
                "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off Book Level-3 cache without intervention"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "147",
                "EventName": "L1D_ONBOOK_L4_SOURCED_WRITES",
                "BriefDescription": "L1D On-Book L4 Sourced Writes",
                "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On Book Level-4 cache"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "148",
                "EventName": "L1D_OFFBOOK_L4_SOURCED_WRITES",
                "BriefDescription": "L1D Off-Book L4 Sourced Writes",
                "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off Book Level-4 cache"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "149",
                "EventName": "TX_NC_TEND",
                "BriefDescription": "Completed TEND instructions in non-constrained TX mode",
                "PublicDescription": "A TEND instruction has completed in a nonconstrained transactional-execution mode"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "150",
                "EventName": "L1D_ONCHIP_L3_SOURCED_WRITES_IV",
                "BriefDescription": "L1D On-Chip L3 Sourced Writes with Intervention",
                "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from a On Chip Level-3 cache with intervention"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "151",
                "EventName": "L1D_OFFCHIP_L3_SOURCED_WRITES_IV",
                "BriefDescription": "L1D Off-Chip L3 Sourced Writes with Intervention",
                "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off Chip/On Book Level-3 cache with intervention"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "152",
                "EventName": "L1D_OFFBOOK_L3_SOURCED_WRITES_IV",
                "BriefDescription": "L1D Off-Book L3 Sourced Writes with Intervention",
                "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off Book Level-3 cache with intervention"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "153",
                "EventName": "L1I_ONCHIP_L3_SOURCED_WRITES",
                "BriefDescription": "L1I On-Chip L3 Sourced Writes",
                "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On Chip Level-3 cache without intervention"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "154",
                "EventName": "L1I_OFFCHIP_L3_SOURCED_WRITES",
                "BriefDescription": "L1I Off-Chip L3 Sourced Writes",
                "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off Chip/On Book Level-3 cache without intervention"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "155",
                "EventName": "L1I_OFFBOOK_L3_SOURCED_WRITES",
                "BriefDescription": "L1I Off-Book L3 Sourced Writes",
                "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off Book Level-3 cache without intervention"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "156",
                "EventName": "L1I_ONBOOK_L4_SOURCED_WRITES",
                "BriefDescription": "L1I On-Book L4 Sourced Writes",
                "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On Book Level-4 cache"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "157",
                "EventName": "L1I_OFFBOOK_L4_SOURCED_WRITES",
                "BriefDescription": "L1I Off-Book L4 Sourced Writes",
                "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off Book Level-4 cache"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "158",
                "EventName": "TX_C_TEND",
                "BriefDescription": "Completed TEND instructions in constrained TX mode",
                "PublicDescription": "A TEND instruction has completed in a constrained transactional-execution mode"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "159",
                "EventName": "L1I_ONCHIP_L3_SOURCED_WRITES_IV",
                "BriefDescription": "L1I On-Chip L3 Sourced Writes with Intervention",
                "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On Chip Level-3 cache with intervention"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "160",
                "EventName": "L1I_OFFCHIP_L3_SOURCED_WRITES_IV",
                "BriefDescription": "L1I Off-Chip L3 Sourced Writes with Intervention",
                "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off Chip/On Book Level-3 cache with intervention"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "161",
                "EventName": "L1I_OFFBOOK_L3_SOURCED_WRITES_IV",
                "BriefDescription": "L1I Off-Book L3 Sourced Writes with Intervention",
                "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off Book Level-3 cache with intervention"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "177",
                "EventName": "TX_NC_TABORT",
                "BriefDescription": "Aborted transactions in non-constrained TX mode",
                "PublicDescription": "A transaction abort has occurred in a nonconstrained transactional-execution mode"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "178",
                "EventName": "TX_C_TABORT_NO_SPECIAL",
                "BriefDescription": "Aborted transactions in constrained TX mode not using special completion logic",
                "PublicDescription": "A transaction abort has occurred in a constrained transactional-execution mode and the CPU is not using any special logic to allow the transaction to complete"
        },
        {
+               "Unit": "CPU-M-CF",
                "EventCode": "179",
                "EventName": "TX_C_TABORT_SPECIAL",
                "BriefDescription": "Aborted transactions in constrained TX mode using special completion logic",
diff --git a/tools/perf/pmu-events/arch/s390/cf_zec12/transaction.json b/tools/perf/pmu-events/arch/s390/cf_zec12/transaction.json
new file mode 100644 (file)
index 0000000..1a0034f
--- /dev/null
@@ -0,0 +1,7 @@
+[
+  {
+    "BriefDescription": "Transaction count",
+    "MetricName": "transaction",
+    "MetricExpr": "TX_C_TEND + TX_NC_TEND + TX_NC_TABORT + TX_C_TABORT_SPECIAL + TX_C_TABORT_NO_SPECIAL"
+  }
+]
index db3a594ee1e4208dfb86b82efd3481d99d4d2825..68c92bb599eef708228287e0d1c6728429a67ced 100644 (file)
@@ -233,6 +233,8 @@ static struct map {
        { "QPI LL", "uncore_qpi" },
        { "SBO", "uncore_sbox" },
        { "iMPH-U", "uncore_arb" },
+       { "CPU-M-CF", "cpum_cf" },
+       { "CPU-M-SF", "cpum_sf" },
        {}
 };
 
index 38dfb720fb6f78757dfe3e7aa8d01014ffdc7927..54ace2f6bc3650388ec1f7e96c7c0b1ea2495232 100644 (file)
@@ -31,10 +31,8 @@ def flag_str(event_name, field_name, value):
     string = ""
 
     if flag_fields[event_name][field_name]:
-       print_delim = 0
-        keys = flag_fields[event_name][field_name]['values'].keys()
-        keys.sort()
-        for idx in keys:
+        print_delim = 0
+        for idx in sorted(flag_fields[event_name][field_name]['values']):
             if not value and not idx:
                 string += flag_fields[event_name][field_name]['values'][idx]
                 break
@@ -51,14 +49,12 @@ def symbol_str(event_name, field_name, value):
     string = ""
 
     if symbolic_fields[event_name][field_name]:
-        keys = symbolic_fields[event_name][field_name]['values'].keys()
-        keys.sort()
-        for idx in keys:
+        for idx in sorted(symbolic_fields[event_name][field_name]['values']):
             if not value and not idx:
-               string = symbolic_fields[event_name][field_name]['values'][idx]
+                string = symbolic_fields[event_name][field_name]['values'][idx]
                 break
-           if (value == idx):
-               string = symbolic_fields[event_name][field_name]['values'][idx]
+            if (value == idx):
+                string = symbolic_fields[event_name][field_name]['values'][idx]
                 break
 
     return string
@@ -74,19 +70,17 @@ def trace_flag_str(value):
     string = ""
     print_delim = 0
 
-    keys = trace_flags.keys()
-
-    for idx in keys:
-       if not value and not idx:
-           string += "NONE"
-           break
-
-       if idx and (value & idx) == idx:
-           if print_delim:
-               string += " | ";
-           string += trace_flags[idx]
-           print_delim = 1
-           value &= ~idx
+    for idx in trace_flags:
+        if not value and not idx:
+            string += "NONE"
+            break
+
+        if idx and (value & idx) == idx:
+            if print_delim:
+                string += " | ";
+            string += trace_flags[idx]
+            print_delim = 1
+            value &= ~idx
 
     return string
 
index 81a56cd2b3c166315bfb376d93265bcdf0463e66..21a7a129809443a9231019d955fb5d945278cd46 100755 (executable)
@@ -8,6 +8,7 @@
 # PerfEvent is the base class for all perf event sample, PebsEvent
 # is a HW base Intel x86 PEBS event, and user could add more SW/HW
 # event classes based on requirements.
+from __future__ import print_function
 
 import struct
 
@@ -44,7 +45,8 @@ class PerfEvent(object):
                 PerfEvent.event_num += 1
 
         def show(self):
-                print "PMU event: name=%12s, symbol=%24s, comm=%8s, dso=%12s" % (self.name, self.symbol, self.comm, self.dso)
+                print("PMU event: name=%12s, symbol=%24s, comm=%8s, dso=%12s" %
+                      (self.name, self.symbol, self.comm, self.dso))
 
 #
 # Basic Intel PEBS (Precise Event-based Sampling) event, whose raw buffer
index fdd92f699055713e2d1fec1c99a61489e5812a64..cac7b2542ee8c99b814b9a0baddeb60ba6727c0b 100644 (file)
@@ -11,7 +11,7 @@
 try:
        import wx
 except ImportError:
-       raise ImportError, "You need to install the wxpython lib for this script"
+       raise ImportError("You need to install the wxpython lib for this script")
 
 
 class RootFrame(wx.Frame):
index f6c84966e4f89cb2ffcd0ad8d4b94a34d134811c..7384dcb628c4326c3d98dca18639b8439fc5c8ac 100644 (file)
@@ -5,6 +5,7 @@
 # This software may be distributed under the terms of the GNU General
 # Public License ("GPL") version 2 as published by the Free Software
 # Foundation.
+from __future__ import print_function
 
 import errno, os
 
@@ -33,7 +34,7 @@ def nsecs_str(nsecs):
     return str
 
 def add_stats(dict, key, value):
-       if not dict.has_key(key):
+       if key not in dict:
                dict[key] = (value, value, value, 1)
        else:
                min, max, avg, count = dict[key]
@@ -72,10 +73,10 @@ try:
 except:
        if not audit_package_warned:
                audit_package_warned = True
-               print "Install the audit-libs-python package to get syscall names.\n" \
-                    "For example:\n  # apt-get install python-audit (Ubuntu)" \
-                    "\n  # yum install audit-libs-python (Fedora)" \
-                    "\n  etc.\n"
+               print("Install the audit-libs-python package to get syscall names.\n"
+                    "For example:\n  # apt-get install python-audit (Ubuntu)"
+                    "\n  # yum install audit-libs-python (Fedora)"
+                    "\n  etc.\n")
 
 def syscall_name(id):
        try:
index de66cb3b72c9e6be9dc5d884611e0522def92631..3473e7f66081c93104e951afb4553f5b59e03b6d 100644 (file)
@@ -9,13 +9,17 @@
 # This software is distributed under the terms of the GNU General
 # Public License ("GPL") version 2 as published by the Free Software
 # Foundation.
-
+from __future__ import print_function
 
 import os
 import sys
 
 from collections import defaultdict
-from UserList import UserList
+try:
+    from UserList import UserList
+except ImportError:
+    # Python 3: UserList moved to the collections package
+    from collections import UserList
 
 sys.path.append(os.environ['PERF_EXEC_PATH'] + \
        '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
@@ -300,7 +304,7 @@ class TimeSliceList(UserList):
                if i == -1:
                        return
 
-               for i in xrange(i, len(self.data)):
+               for i in range(i, len(self.data)):
                        timeslice = self.data[i]
                        if timeslice.start > end:
                                return
@@ -336,8 +340,8 @@ class SchedEventProxy:
                on_cpu_task = self.current_tsk[headers.cpu]
 
                if on_cpu_task != -1 and on_cpu_task != prev_pid:
-                       print "Sched switch event rejected ts: %s cpu: %d prev: %s(%d) next: %s(%d)" % \
-                               (headers.ts_format(), headers.cpu, prev_comm, prev_pid, next_comm, next_pid)
+                       print("Sched switch event rejected ts: %s cpu: %d prev: %s(%d) next: %s(%d)" % \
+                               headers.ts_format(), headers.cpu, prev_comm, prev_pid, next_comm, next_pid)
 
                threads[prev_pid] = prev_comm
                threads[next_pid] = next_comm
index 2bde505e2e7ea0c2b1157e2734be89d291351e66..d7a5e1b9aa6f5674f967297aade060e53536bdb8 100644 (file)
@@ -385,7 +385,7 @@ static int test_and_print(struct test *t, bool force_skip, int subtest)
        if (!t->subtest.get_nr)
                pr_debug("%s:", t->desc);
        else
-               pr_debug("%s subtest %d:", t->desc, subtest);
+               pr_debug("%s subtest %d:", t->desc, subtest + 1);
 
        switch (err) {
        case TEST_OK:
@@ -422,7 +422,7 @@ static const char *shell_test__description(char *description, size_t size,
 
 #define for_each_shell_test(dir, base, ent)    \
        while ((ent = readdir(dir)) != NULL)    \
-               if (!is_directory(base, ent))
+               if (!is_directory(base, ent) && ent->d_name[0] != '.')
 
 static const char *shell_tests__dir(char *path, size_t size)
 {
index 7d40770684549d8691d63a403816b76b5bb7c3ad..3b97ac018d5aac5d6505924e508a5451888c6cd7 100644 (file)
@@ -1309,6 +1309,11 @@ static int test__checkevent_config_cache(struct perf_evlist *evlist)
        return 0;
 }
 
+static bool test__intel_pt_valid(void)
+{
+       return !!perf_pmu__find("intel_pt");
+}
+
 static int test__intel_pt(struct perf_evlist *evlist)
 {
        struct perf_evsel *evsel = perf_evlist__first(evlist);
@@ -1317,6 +1322,14 @@ static int test__intel_pt(struct perf_evlist *evlist)
        return 0;
 }
 
+static int test__checkevent_complex_name(struct perf_evlist *evlist)
+{
+       struct perf_evsel *evsel = perf_evlist__first(evlist);
+
+       TEST_ASSERT_VAL("wrong complex name parsing", strcmp(evsel->name, "COMPLEX_CYCLES_NAME:orig=cycles,desc=chip-clock-ticks") == 0);
+       return 0;
+}
+
 static int count_tracepoints(void)
 {
        struct dirent *events_ent;
@@ -1375,6 +1388,7 @@ struct evlist_test {
        const char *name;
        __u32 type;
        const int id;
+       bool (*valid)(void);
        int (*check)(struct perf_evlist *evlist);
 };
 
@@ -1648,9 +1662,15 @@ static struct evlist_test test__events[] = {
        },
        {
                .name  = "intel_pt//u",
+               .valid = test__intel_pt_valid,
                .check = test__intel_pt,
                .id    = 52,
        },
+       {
+               .name  = "cycles/name='COMPLEX_CYCLES_NAME:orig=cycles,desc=chip-clock-ticks'/Duk",
+               .check = test__checkevent_complex_name,
+               .id    = 53
+       }
 };
 
 static struct evlist_test test__events_pmu[] = {
@@ -1669,6 +1689,11 @@ static struct evlist_test test__events_pmu[] = {
                .check = test__checkevent_pmu_partial_time_callgraph,
                .id    = 2,
        },
+       {
+               .name  = "cpu/name='COMPLEX_CYCLES_NAME:orig=cycles,desc=chip-clock-ticks',period=0x1,event=0x2/ukp",
+               .check = test__checkevent_complex_name,
+               .id    = 3,
+       }
 };
 
 struct terms_test {
@@ -1686,17 +1711,24 @@ static struct terms_test test__terms[] = {
 
 static int test_event(struct evlist_test *e)
 {
+       struct parse_events_error err = { .idx = 0, };
        struct perf_evlist *evlist;
        int ret;
 
+       if (e->valid && !e->valid()) {
+               pr_debug("... SKIP");
+               return 0;
+       }
+
        evlist = perf_evlist__new();
        if (evlist == NULL)
                return -ENOMEM;
 
-       ret = parse_events(evlist, e->name, NULL);
+       ret = parse_events(evlist, e->name, &err);
        if (ret) {
-               pr_debug("failed to parse event '%s', err %d\n",
-                        e->name, ret);
+               pr_debug("failed to parse event '%s', err %d, str '%s'\n",
+                        e->name, ret, err.str);
+               parse_events_print_error(&err, e->name);
        } else {
                ret = e->check(evlist);
        }
@@ -1714,10 +1746,11 @@ static int test_events(struct evlist_test *events, unsigned cnt)
        for (i = 0; i < cnt; i++) {
                struct evlist_test *e = &events[i];
 
-               pr_debug("running test %d '%s'\n", e->id, e->name);
+               pr_debug("running test %d '%s'", e->id, e->name);
                ret1 = test_event(e);
                if (ret1)
                        ret2 = ret1;
+               pr_debug("\n");
        }
 
        return ret2;
@@ -1799,7 +1832,7 @@ static int test_pmu_events(void)
        }
 
        while (!ret && (ent = readdir(dir))) {
-               struct evlist_test e;
+               struct evlist_test e = { .id = 0, };
                char name[2 * NAME_MAX + 1 + 12 + 3];
 
                /* Names containing . are special and cannot be used directly */
index 2630570396937fc4b26b844e5bfefc0e74d16409..3013ac8f83d0a996dd23328bf6fdb9ba8608695b 100755 (executable)
 libc=$(grep -w libc /proc/self/maps | head -1 | sed -r 's/.*[[:space:]](\/.*)/\1/g')
 nm -Dg $libc 2>/dev/null | fgrep -q inet_pton || exit 254
 
+event_pattern='probe_libc:inet_pton(\_[[:digit:]]+)?'
+
+add_libc_inet_pton_event() {
+
+       event_name=$(perf probe -f -x $libc -a inet_pton 2>&1 | tail -n +2 | head -n -5 | \
+                       grep -P -o "$event_pattern(?=[[:space:]]\(on inet_pton in $libc\))")
+
+       if [ $? -ne 0 -o -z "$event_name" ] ; then
+               printf "FAIL: could not add event\n"
+               return 1
+       fi
+}
+
 trace_libc_inet_pton_backtrace() {
-       idx=0
-       expected[0]="ping[][0-9 \.:]+probe_libc:inet_pton: \([[:xdigit:]]+\)"
-       expected[1]=".*inet_pton\+0x[[:xdigit:]]+[[:space:]]\($libc|inlined\)$"
+
+       expected=`mktemp -u /tmp/expected.XXX`
+
+       echo "ping[][0-9 \.:]+$event_name: \([[:xdigit:]]+\)" > $expected
+       echo ".*inet_pton\+0x[[:xdigit:]]+[[:space:]]\($libc|inlined\)$" >> $expected
        case "$(uname -m)" in
        s390x)
                eventattr='call-graph=dwarf,max-stack=4'
-               expected[2]="gaih_inet.*\+0x[[:xdigit:]]+[[:space:]]\($libc|inlined\)$"
-               expected[3]="(__GI_)?getaddrinfo\+0x[[:xdigit:]]+[[:space:]]\($libc|inlined\)$"
-               expected[4]="main\+0x[[:xdigit:]]+[[:space:]]\(.*/bin/ping.*\)$"
+               echo "gaih_inet.*\+0x[[:xdigit:]]+[[:space:]]\($libc|inlined\)$" >> $expected
+               echo "(__GI_)?getaddrinfo\+0x[[:xdigit:]]+[[:space:]]\($libc|inlined\)$" >> $expected
+               echo "main\+0x[[:xdigit:]]+[[:space:]]\(.*/bin/ping.*\)$" >> $expected
+               ;;
+       ppc64|ppc64le)
+               eventattr='max-stack=4'
+               echo "gaih_inet.*\+0x[[:xdigit:]]+[[:space:]]\($libc\)$" >> $expected
+               echo "getaddrinfo\+0x[[:xdigit:]]+[[:space:]]\($libc\)$" >> $expected
+               echo ".*\+0x[[:xdigit:]]+[[:space:]]\(.*/bin/ping.*\)$" >> $expected
                ;;
        *)
                eventattr='max-stack=3'
-               expected[2]="getaddrinfo\+0x[[:xdigit:]]+[[:space:]]\($libc\)$"
-               expected[3]=".*\+0x[[:xdigit:]]+[[:space:]]\(.*/bin/ping.*\)$"
+               echo "getaddrinfo\+0x[[:xdigit:]]+[[:space:]]\($libc\)$" >> $expected
+               echo ".*\+0x[[:xdigit:]]+[[:space:]]\(.*/bin/ping.*\)$" >> $expected
                ;;
        esac
 
-       file=`mktemp -u /tmp/perf.data.XXX`
+       perf_data=`mktemp -u /tmp/perf.data.XXX`
+       perf_script=`mktemp -u /tmp/perf.script.XXX`
+       perf record -e $event_name/$eventattr/ -o $perf_data ping -6 -c 1 ::1 > /dev/null 2>&1
+       perf script -i $perf_data > $perf_script
 
-       perf record -e probe_libc:inet_pton/$eventattr/ -o $file ping -6 -c 1 ::1 > /dev/null 2>&1
-       perf script -i $file | while read line ; do
+       exec 3<$perf_script
+       exec 4<$expected
+       while read line <&3 && read -r pattern <&4; do
+               [ -z "$pattern" ] && break
                echo $line
-               echo "$line" | egrep -q "${expected[$idx]}"
+               echo "$line" | egrep -q "$pattern"
                if [ $? -ne 0 ] ; then
-                       printf "FAIL: expected backtrace entry %d \"%s\" got \"%s\"\n" $idx "${expected[$idx]}" "$line"
-                       exit 1
+                       printf "FAIL: expected backtrace entry \"%s\" got \"%s\"\n" "$pattern" "$line"
+                       return 1
                fi
-               let idx+=1
-               [ -z "${expected[$idx]}" ] && break
        done
 
        # If any statements are executed from this point onwards,
@@ -51,13 +75,20 @@ trace_libc_inet_pton_backtrace() {
        # even if the perf script output does not match.
 }
 
+delete_libc_inet_pton_event() {
+
+       if [ -n "$event_name" ] ; then
+               perf probe -q -d $event_name
+       fi
+}
+
 # Check for IPv6 interface existence
 ip a sh lo | fgrep -q inet6 || exit 2
 
 skip_if_no_perf_probe && \
-perf probe -q $libc inet_pton && \
+add_libc_inet_pton_event && \
 trace_libc_inet_pton_backtrace
 err=$?
-rm -f ${file}
-perf probe -q -d probe_libc:inet_pton
+rm -f ${perf_data} ${perf_script} ${expected}
+delete_libc_inet_pton_event
 exit $err
index 55ad9793d5443da34ee4c6c76ea5d7fcba80f6fd..4ce276efe6b4c1855e904a30a2e9efcb6ed01a4e 100755 (executable)
@@ -17,7 +17,7 @@ skip_if_no_perf_probe || exit 2
 file=$(mktemp /tmp/temporary_file.XXXXX)
 
 trace_open_vfs_getname() {
-       evts=$(echo $(perf list syscalls:sys_enter_open* |& egrep 'open(at)? ' | sed -r 's/.*sys_enter_([a-z]+) +\[.*$/\1/') | sed 's/ /,/')
+       evts=$(echo $(perf list syscalls:sys_enter_open* 2>&1 | egrep 'open(at)? ' | sed -r 's/.*sys_enter_([a-z]+) +\[.*$/\1/') | sed 's/ /,/')
        perf trace -e $evts touch $file 2>&1 | \
        egrep " +[0-9]+\.[0-9]+ +\( +[0-9]+\.[0-9]+ ms\): +touch\/[0-9]+ open(at)?\((dfd: +CWD, +)?filename: +${file}, +flags: CREAT\|NOCTTY\|NONBLOCK\|WRONLY, +mode: +IRUGO\|IWUGO\) += +[0-9]+$"
 }
index 40e30a26b23cc260536977fb9a0b17db54aa207a..9497d02f69e6669d8ca19ed753beb1a8477f4006 100644 (file)
@@ -45,6 +45,7 @@ static int session_write_header(char *path)
 
        perf_header__set_feat(&session->header, HEADER_CPU_TOPOLOGY);
        perf_header__set_feat(&session->header, HEADER_NRCPUS);
+       perf_header__set_feat(&session->header, HEADER_ARCH);
 
        session->header.data_size += DATA_SIZE;
 
index 66330d4b739bc82976f25d9d6bdf4637872934a4..f528ba35e1409d23a3c916062c8348375b9e926a 100644 (file)
@@ -7,4 +7,5 @@ endif
 libperf-y += kcmp.o
 libperf-y += pkey_alloc.o
 libperf-y += prctl.o
+libperf-y += socket.o
 libperf-y += statx.o
index 984a504d335c412fdfae6806dd1eb5a6a843a963..9615af5d412b1a93622e130ad19a7b8ea325f4ce 100644 (file)
@@ -106,6 +106,9 @@ size_t syscall_arg__scnprintf_prctl_arg2(char *bf, size_t size, struct syscall_a
 size_t syscall_arg__scnprintf_prctl_arg3(char *bf, size_t size, struct syscall_arg *arg);
 #define SCA_PRCTL_ARG3 syscall_arg__scnprintf_prctl_arg3
 
+size_t syscall_arg__scnprintf_socket_protocol(char *bf, size_t size, struct syscall_arg *arg);
+#define SCA_SK_PROTO syscall_arg__scnprintf_socket_protocol
+
 size_t syscall_arg__scnprintf_statx_flags(char *bf, size_t size, struct syscall_arg *arg);
 #define SCA_STATX_FLAGS syscall_arg__scnprintf_statx_flags
 
index 2149d3a98e425607c5d95888cc65a291a5c95c17..9d3816815e60f48ff8ef8f6b77faf779a6d9bd1b 100755 (executable)
@@ -1,13 +1,14 @@
 #!/bin/sh
 
-drm_header_dir=$1
+[ $# -eq 1 ] && header_dir=$1 || header_dir=tools/include/uapi/drm/
+
 printf "#ifndef DRM_COMMAND_BASE\n"
-grep "#define DRM_COMMAND_BASE" $drm_header_dir/drm.h
+grep "#define DRM_COMMAND_BASE" $header_dir/drm.h
 printf "#endif\n"
 
 printf "static const char *drm_ioctl_cmds[] = {\n"
-grep "^#define DRM_IOCTL.*DRM_IO" $drm_header_dir/drm.h | \
+grep "^#define DRM_IOCTL.*DRM_IO" $header_dir/drm.h | \
        sed -r 's/^#define +DRM_IOCTL_([A-Z0-9_]+)[      ]+DRM_IO[A-Z]* *\( *(0x[[:xdigit:]]+),*.*/     [\2] = "\1",/g'
-grep "^#define DRM_I915_[A-Z_0-9]\+[    ]\+0x" $drm_header_dir/i915_drm.h | \
+grep "^#define DRM_I915_[A-Z_0-9]\+[    ]\+0x" $header_dir/i915_drm.h | \
        sed -r 's/^#define +DRM_I915_([A-Z0-9_]+)[       ]+(0x[[:xdigit:]]+)/\t[DRM_COMMAND_BASE + \2] = "I915_\1",/g'
 printf "};\n"
index 40d063b8c0820075a3173277e724bf88e2297825..a3c304caa336572baaa59d48b684b7413918429a 100755 (executable)
@@ -1,6 +1,6 @@
 #!/bin/sh
 
-header_dir=$1
+[ $# -eq 1 ] && header_dir=$1 || header_dir=tools/include/uapi/linux/
 
 printf "static const char *kcmp_types[] = {\n"
 regex='^[[:space:]]+(KCMP_(\w+)),'
index bd28817afced04c6ebae4e47a6ced11dbc6e635a..c4699fd46bb64a3230232e452b366551a17c3609 100755 (executable)
@@ -1,10 +1,10 @@
 #!/bin/sh
 
-kvm_header_dir=$1
+[ $# -eq 1 ] && header_dir=$1 || header_dir=tools/include/uapi/linux/
 
 printf "static const char *kvm_ioctl_cmds[] = {\n"
 regex='^#[[:space:]]*define[[:space:]]+KVM_(\w+)[[:space:]]+_IO[RW]*\([[:space:]]*KVMIO[[:space:]]*,[[:space:]]*(0x[[:xdigit:]]+).*'
-egrep $regex ${kvm_header_dir}/kvm.h   | \
+egrep $regex ${header_dir}/kvm.h       | \
        sed -r "s/$regex/\2 \1/g"       | \
        egrep -v " ((ARM|PPC|S390)_|[GS]ET_(DEBUGREGS|PIT2|XSAVE|TSC_KHZ)|CREATE_SPAPR_TCE_64)" | \
        sort | xargs printf "\t[%s] = \"%s\",\n"
index 60ef8640ee701c61ef232fd0ecc37f86cf006418..431639eb4d29a9c35011f6d11d20d46ec92d9b09 100755 (executable)
@@ -1,6 +1,6 @@
 #!/bin/sh
 
-header_dir=$1
+[ $# -eq 1 ] && header_dir=$1 || header_dir=tools/include/uapi/asm-generic/
 
 printf "static const char *madvise_advices[] = {\n"
 regex='^[[:space:]]*#[[:space:]]*define[[:space:]]+MADV_([[:alnum:]_]+)[[:space:]]+([[:digit:]]+)[[:space:]]*.*'
index faea4237c793d63ba3f518c2b179e0fded5295fb..6492c74df928df48bca7cf49214b272eefbe13ac 100755 (executable)
@@ -1,6 +1,6 @@
 #!/bin/sh
 
-header_dir=$1
+[ $# -eq 1 ] && header_dir=$1 || header_dir=tools/include/uapi/linux/
 
 printf "static const char *perf_ioctl_cmds[] = {\n"
 regex='^#[[:space:]]*define[[:space:]]+PERF_EVENT_IOC_(\w+)[[:space:]]+_IO[RW]*[[:space:]]*\([[:space:]]*.\$.[[:space:]]*,[[:space:]]*([[:digit:]]+).*'
index 62e51a02b8398b4c62a0180308646892001e7e63..e0a51aeb20b21a8cde5eee01336b2b11af4fef7f 100755 (executable)
@@ -1,6 +1,6 @@
 #!/bin/sh
 
-header_dir=$1
+[ $# -eq 1 ] && header_dir=$1 || header_dir=tools/include/uapi/asm-generic/
 
 printf "static const char *pkey_alloc_access_rights[] = {\n"
 regex='^[[:space:]]*#[[:space:]]*define[[:space:]]+PKEY_([[:alnum:]_]+)[[:space:]]+(0x[[:xdigit:]]+)[[:space:]]*'
index aad5ab130539987db15336b233d50a3d03ef4082..eb511bb5fbd3211697aa1821c66dde2e78f5267e 100755 (executable)
@@ -1,8 +1,8 @@
 #!/bin/sh
 
-sound_header_dir=$1
+[ $# -eq 1 ] && header_dir=$1 || header_dir=tools/include/uapi/sound/
 
 printf "static const char *sndrv_ctl_ioctl_cmds[] = {\n"
-grep "^#define[\t ]\+SNDRV_CTL_IOCTL_" $sound_header_dir/asound.h | \
+grep "^#define[\t ]\+SNDRV_CTL_IOCTL_" $header_dir/asound.h | \
        sed -r 's/^#define +SNDRV_CTL_IOCTL_([A-Z0-9_]+)[\t ]+_IO[RW]*\( *.U., *(0x[[:xdigit:]]+),?.*/\t[\2] = \"\1\",/g'
 printf "};\n"
index b7e9ef6b2f558feae0c1ec1fb6fe94055c4bf903..6818392968b24f130f82896a41f65b15471458f0 100755 (executable)
@@ -1,8 +1,8 @@
 #!/bin/sh
 
-sound_header_dir=$1
+[ $# -eq 1 ] && header_dir=$1 || header_dir=tools/include/uapi/sound/
 
 printf "static const char *sndrv_pcm_ioctl_cmds[] = {\n"
-grep "^#define[\t ]\+SNDRV_PCM_IOCTL_" $sound_header_dir/asound.h | \
+grep "^#define[\t ]\+SNDRV_PCM_IOCTL_" $header_dir/asound.h | \
        sed -r 's/^#define +SNDRV_PCM_IOCTL_([A-Z0-9_]+)[\t ]+_IO[RW]*\( *.A., *(0x[[:xdigit:]]+),?.*/\t[\2] = \"\1\",/g'
 printf "};\n"
diff --git a/tools/perf/trace/beauty/socket.c b/tools/perf/trace/beauty/socket.c
new file mode 100644 (file)
index 0000000..6522726
--- /dev/null
@@ -0,0 +1,28 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * trace/beauty/socket.c
+ *
+ *  Copyright (C) 2018, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
+ */
+
+#include "trace/beauty/beauty.h"
+#include <sys/types.h>
+#include <sys/socket.h>
+
+static size_t socket__scnprintf_ipproto(int protocol, char *bf, size_t size)
+{
+#include "trace/beauty/generated/socket_ipproto_array.c"
+       static DEFINE_STRARRAY(socket_ipproto);
+
+       return strarray__scnprintf(&strarray__socket_ipproto, bf, size, "%d", protocol);
+}
+
+size_t syscall_arg__scnprintf_socket_protocol(char *bf, size_t size, struct syscall_arg *arg)
+{
+       int domain = syscall_arg__val(arg, 0);
+
+       if (domain == AF_INET || domain == AF_INET6)
+               return socket__scnprintf_ipproto(arg->val, bf, size);
+
+       return syscall_arg__scnprintf_int(bf, size, arg);
+}
diff --git a/tools/perf/trace/beauty/socket_ipproto.sh b/tools/perf/trace/beauty/socket_ipproto.sh
new file mode 100755 (executable)
index 0000000..a3cc246
--- /dev/null
@@ -0,0 +1,11 @@
+#!/bin/sh
+
+[ $# -eq 1 ] && header_dir=$1 || header_dir=tools/include/uapi/linux/
+
+printf "static const char *socket_ipproto[] = {\n"
+regex='^[[:space:]]+IPPROTO_(\w+)[[:space:]]+=[[:space:]]+([[:digit:]]+),.*'
+
+egrep $regex ${header_dir}/in.h | \
+       sed -r "s/$regex/\2 \1/g"       | \
+       sort | xargs printf "\t[%s] = \"%s\",\n"
+printf "};\n"
index 76f1de6977870acf19cd533e518733e15d0c535f..0f6a5197d0bede8456bcf00d702ff4a51bf39b9d 100755 (executable)
@@ -1,17 +1,17 @@
 #!/bin/sh
 
-vhost_virtio_header_dir=$1
+[ $# -eq 1 ] && header_dir=$1 || header_dir=tools/include/uapi/linux/
 
 printf "static const char *vhost_virtio_ioctl_cmds[] = {\n"
 regex='^#[[:space:]]*define[[:space:]]+VHOST_(\w+)[[:space:]]+_IOW?\([[:space:]]*VHOST_VIRTIO[[:space:]]*,[[:space:]]*(0x[[:xdigit:]]+).*'
-egrep $regex ${vhost_virtio_header_dir}/vhost.h | \
+egrep $regex ${header_dir}/vhost.h | \
        sed -r "s/$regex/\2 \1/g"       | \
        sort | xargs printf "\t[%s] = \"%s\",\n"
 printf "};\n"
 
 printf "static const char *vhost_virtio_ioctl_read_cmds[] = {\n"
 regex='^#[[:space:]]*define[[:space:]]+VHOST_(\w+)[[:space:]]+_IOW?R\([[:space:]]*VHOST_VIRTIO[[:space:]]*,[[:space:]]*(0x[[:xdigit:]]+).*'
-egrep $regex ${vhost_virtio_header_dir}/vhost.h | \
+egrep $regex ${header_dir}/vhost.h | \
        sed -r "s/$regex/\2 \1/g"       | \
        sort | xargs printf "\t[%s] = \"%s\",\n"
 printf "};\n"
index b085f1b3e34dacdd4764d0704e0c65c6644debf1..4ab663ec3e5ea108ee7df9a189ecc2bc4e996843 100644 (file)
@@ -382,7 +382,7 @@ static void perf_gtk__show_hists(GtkWidget *window, struct hists *hists,
                        gtk_tree_store_set(store, &iter, col_idx++, s, -1);
                }
 
-               if (hists__has_callchains(hists) &&
+               if (hist_entry__has_callchains(h) &&
                    symbol_conf.use_callchain && hists__has(hists, sym)) {
                        if (callchain_param.mode == CHAIN_GRAPH_REL)
                                total = symbol_conf.cumulate_callchain ?
index 69b7a28f7a1c0300f8579fb62eb3fdde0c0da467..74c4ae1f0a054ee9dd619c936be4d2295627d19d 100644 (file)
@@ -529,7 +529,7 @@ out:
 
 static int hist_entry__fprintf(struct hist_entry *he, size_t size,
                               char *bf, size_t bfsz, FILE *fp,
-                              bool use_callchain)
+                              bool ignore_callchains)
 {
        int ret;
        int callchain_ret = 0;
@@ -550,7 +550,7 @@ static int hist_entry__fprintf(struct hist_entry *he, size_t size,
 
        ret = fprintf(fp, "%s\n", bf);
 
-       if (hist_entry__has_callchains(he) && use_callchain)
+       if (hist_entry__has_callchains(he) && !ignore_callchains)
                callchain_ret = hist_entry_callchain__fprintf(he, total_period,
                                                              0, fp);
 
@@ -755,7 +755,7 @@ int hists__fprintf_headers(struct hists *hists, FILE *fp)
 
 size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows,
                      int max_cols, float min_pcnt, FILE *fp,
-                     bool use_callchain)
+                     bool ignore_callchains)
 {
        struct rb_node *nd;
        size_t ret = 0;
@@ -799,7 +799,7 @@ size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows,
                if (percent < min_pcnt)
                        continue;
 
-               ret += hist_entry__fprintf(h, max_cols, line, linesz, fp, use_callchain);
+               ret += hist_entry__fprintf(h, max_cols, line, linesz, fp, ignore_callchains);
 
                if (max_rows && ++nr_rows >= max_rows)
                        break;
index cee658733e2c560f0a1b07a39465b5d80c737f31..3d02ae38ec561279bfd7fb07a2314058ad9345ce 100644 (file)
@@ -747,7 +747,9 @@ int bpf__load(struct bpf_object *obj)
 
        err = bpf_object__load(obj);
        if (err) {
-               pr_debug("bpf: load objects failed\n");
+               char bf[128];
+               libbpf_strerror(err, bf, sizeof(bf));
+               pr_debug("bpf: load objects failed: err=%d: (%s)\n", err, bf);
                return err;
        }
        return 0;
index bf31ceab33bd487d0021ccd9818384dde51fd371..89512504551b0b198a44ebe30e81d0972b86ce77 100644 (file)
@@ -146,8 +146,15 @@ getBPFObjectFromModule(llvm::Module *Module)
        raw_svector_ostream ostream(*Buffer);
 
        legacy::PassManager PM;
-       if (TargetMachine->addPassesToEmitFile(PM, ostream,
-                                              TargetMachine::CGFT_ObjectFile)) {
+       bool NotAdded;
+#if CLANG_VERSION_MAJOR < 7
+       NotAdded = TargetMachine->addPassesToEmitFile(PM, ostream,
+                                                     TargetMachine::CGFT_ObjectFile);
+#else
+       NotAdded = TargetMachine->addPassesToEmitFile(PM, ostream, nullptr,
+                                                     TargetMachine::CGFT_ObjectFile);
+#endif
+       if (NotAdded) {
                llvm::errs() << "TargetMachine can't emit a file of this type\n";
                return std::unique_ptr<llvm::SmallVectorImpl<char>>(nullptr);;
        }
index 7798a2cc8a867420a468ffc530b444a1fd26f1fe..31279a7bd919616370d9bcececb34dc0efef271a 100644 (file)
@@ -20,9 +20,10 @@ static struct rw_semaphore comm_str_lock = {.lock = PTHREAD_RWLOCK_INITIALIZER,}
 
 static struct comm_str *comm_str__get(struct comm_str *cs)
 {
-       if (cs)
-               refcount_inc(&cs->refcnt);
-       return cs;
+       if (cs && refcount_inc_not_zero(&cs->refcnt))
+               return cs;
+
+       return NULL;
 }
 
 static void comm_str__put(struct comm_str *cs)
@@ -67,9 +68,14 @@ struct comm_str *__comm_str__findnew(const char *str, struct rb_root *root)
                parent = *p;
                iter = rb_entry(parent, struct comm_str, rb_node);
 
+               /*
+                * If we race with comm_str__put, iter->refcnt is 0
+                * and it will be removed within comm_str__put call
+                * shortly, ignore it in this search.
+                */
                cmp = strcmp(str, iter->str);
-               if (!cmp)
-                       return comm_str__get(iter);
+               if (!cmp && comm_str__get(iter))
+                       return iter;
 
                if (cmp < 0)
                        p = &(*p)->rb_left;
index 4d5fc374e7302e30428266df5a1fb9da03304fd8..938def6d0bb9878bdf3b773d071e841a8ff0e595 100644 (file)
@@ -31,6 +31,8 @@
 #endif
 #endif
 
+#define CS_ETM_INVAL_ADDR      0xdeadbeefdeadbeefUL
+
 struct cs_etm_decoder {
        void *data;
        void (*packet_printer)(const char *msg);
@@ -261,8 +263,8 @@ static void cs_etm_decoder__clear_buffer(struct cs_etm_decoder *decoder)
        decoder->tail = 0;
        decoder->packet_count = 0;
        for (i = 0; i < MAX_BUFFER; i++) {
-               decoder->packet_buffer[i].start_addr = 0xdeadbeefdeadbeefUL;
-               decoder->packet_buffer[i].end_addr = 0xdeadbeefdeadbeefUL;
+               decoder->packet_buffer[i].start_addr = CS_ETM_INVAL_ADDR;
+               decoder->packet_buffer[i].end_addr = CS_ETM_INVAL_ADDR;
                decoder->packet_buffer[i].last_instr_taken_branch = false;
                decoder->packet_buffer[i].exc = false;
                decoder->packet_buffer[i].exc_ret = false;
@@ -295,8 +297,8 @@ cs_etm_decoder__buffer_packet(struct cs_etm_decoder *decoder,
        decoder->packet_buffer[et].exc = false;
        decoder->packet_buffer[et].exc_ret = false;
        decoder->packet_buffer[et].cpu = *((int *)inode->priv);
-       decoder->packet_buffer[et].start_addr = 0xdeadbeefdeadbeefUL;
-       decoder->packet_buffer[et].end_addr = 0xdeadbeefdeadbeefUL;
+       decoder->packet_buffer[et].start_addr = CS_ETM_INVAL_ADDR;
+       decoder->packet_buffer[et].end_addr = CS_ETM_INVAL_ADDR;
 
        if (decoder->packet_count == MAX_BUFFER - 1)
                return OCSD_RESP_WAIT;
index 743f5f4443046b51da02a6fbefa3b9cf98f4e3d6..612b5755f742ffc1052e82df925143f4caf93a15 100644 (file)
@@ -23,6 +23,7 @@ struct cs_etm_buffer {
 };
 
 enum cs_etm_sample_type {
+       CS_ETM_EMPTY = 0,
        CS_ETM_RANGE = 1 << 0,
        CS_ETM_TRACE_ON = 1 << 1,
 };
index 822ba915d144de828347936bb2c1514943266f36..2ae640257fdbbe897d8c398d1dfeeb406d1c9fb1 100644 (file)
@@ -494,6 +494,10 @@ static inline void cs_etm__reset_last_branch_rb(struct cs_etm_queue *etmq)
 
 static inline u64 cs_etm__last_executed_instr(struct cs_etm_packet *packet)
 {
+       /* Returns 0 for the CS_ETM_TRACE_ON packet */
+       if (packet->sample_type == CS_ETM_TRACE_ON)
+               return 0;
+
        /*
         * The packet records the execution range with an exclusive end address
         *
@@ -505,6 +509,15 @@ static inline u64 cs_etm__last_executed_instr(struct cs_etm_packet *packet)
        return packet->end_addr - A64_INSTR_SIZE;
 }
 
+static inline u64 cs_etm__first_executed_instr(struct cs_etm_packet *packet)
+{
+       /* Returns 0 for the CS_ETM_TRACE_ON packet */
+       if (packet->sample_type == CS_ETM_TRACE_ON)
+               return 0;
+
+       return packet->start_addr;
+}
+
 static inline u64 cs_etm__instr_count(const struct cs_etm_packet *packet)
 {
        /*
@@ -546,7 +559,7 @@ static void cs_etm__update_last_branch_rb(struct cs_etm_queue *etmq)
 
        be       = &bs->entries[etmq->last_branch_pos];
        be->from = cs_etm__last_executed_instr(etmq->prev_packet);
-       be->to   = etmq->packet->start_addr;
+       be->to   = cs_etm__first_executed_instr(etmq->packet);
        /* No support for mispredict */
        be->flags.mispred = 0;
        be->flags.predicted = 1;
@@ -701,7 +714,7 @@ static int cs_etm__synth_branch_sample(struct cs_etm_queue *etmq)
        sample.ip = cs_etm__last_executed_instr(etmq->prev_packet);
        sample.pid = etmq->pid;
        sample.tid = etmq->tid;
-       sample.addr = etmq->packet->start_addr;
+       sample.addr = cs_etm__first_executed_instr(etmq->packet);
        sample.id = etmq->etm->branches_id;
        sample.stream_id = etmq->etm->branches_id;
        sample.period = 1;
@@ -897,13 +910,23 @@ static int cs_etm__sample(struct cs_etm_queue *etmq)
                etmq->period_instructions = instrs_over;
        }
 
-       if (etm->sample_branches &&
-           etmq->prev_packet &&
-           etmq->prev_packet->sample_type == CS_ETM_RANGE &&
-           etmq->prev_packet->last_instr_taken_branch) {
-               ret = cs_etm__synth_branch_sample(etmq);
-               if (ret)
-                       return ret;
+       if (etm->sample_branches && etmq->prev_packet) {
+               bool generate_sample = false;
+
+               /* Generate sample for tracing on packet */
+               if (etmq->prev_packet->sample_type == CS_ETM_TRACE_ON)
+                       generate_sample = true;
+
+               /* Generate sample for branch taken packet */
+               if (etmq->prev_packet->sample_type == CS_ETM_RANGE &&
+                   etmq->prev_packet->last_instr_taken_branch)
+                       generate_sample = true;
+
+               if (generate_sample) {
+                       ret = cs_etm__synth_branch_sample(etmq);
+                       if (ret)
+                               return ret;
+               }
        }
 
        if (etm->sample_branches || etm->synth_opts.last_branch) {
@@ -922,10 +945,17 @@ static int cs_etm__sample(struct cs_etm_queue *etmq)
 static int cs_etm__flush(struct cs_etm_queue *etmq)
 {
        int err = 0;
+       struct cs_etm_auxtrace *etm = etmq->etm;
        struct cs_etm_packet *tmp;
 
+       if (!etmq->prev_packet)
+               return 0;
+
+       /* Handle start tracing packet */
+       if (etmq->prev_packet->sample_type == CS_ETM_EMPTY)
+               goto swap_packet;
+
        if (etmq->etm->synth_opts.last_branch &&
-           etmq->prev_packet &&
            etmq->prev_packet->sample_type == CS_ETM_RANGE) {
                /*
                 * Generate a last branch event for the branches left in the
@@ -939,8 +969,22 @@ static int cs_etm__flush(struct cs_etm_queue *etmq)
                err = cs_etm__synth_instruction_sample(
                        etmq, addr,
                        etmq->period_instructions);
+               if (err)
+                       return err;
+
                etmq->period_instructions = 0;
 
+       }
+
+       if (etm->sample_branches &&
+           etmq->prev_packet->sample_type == CS_ETM_RANGE) {
+               err = cs_etm__synth_branch_sample(etmq);
+               if (err)
+                       return err;
+       }
+
+swap_packet:
+       if (etmq->etm->synth_opts.last_branch) {
                /*
                 * Swap PACKET with PREV_PACKET: PACKET becomes PREV_PACKET for
                 * the next incoming packet.
@@ -1020,6 +1064,13 @@ static int cs_etm__run_decoder(struct cs_etm_queue *etmq)
                                         */
                                        cs_etm__flush(etmq);
                                        break;
+                               case CS_ETM_EMPTY:
+                                       /*
+                                        * Should not receive empty packet,
+                                        * report error.
+                                        */
+                                       pr_err("CS ETM Trace: empty packet\n");
+                                       return -EINVAL;
                                default:
                                        break;
                                }
index 94fce4f537e91c70eef03f569d1c2e76ded0dc9d..ddf84b941abf894de7299ee5f63d4f3992d6a006 100644 (file)
@@ -260,6 +260,17 @@ struct perf_evsel *perf_evsel__new_idx(struct perf_event_attr *attr, int idx)
                evsel->attr.sample_period = 1;
        }
 
+       if (perf_evsel__is_clock(evsel)) {
+               /*
+                * The evsel->unit points to static alias->unit
+                * so it's ok to use static string in here.
+                */
+               static const char *unit = "msec";
+
+               evsel->unit = unit;
+               evsel->scale = 1e-6;
+       }
+
        return evsel;
 }
 
@@ -848,6 +859,12 @@ static void apply_config_terms(struct perf_evsel *evsel,
        }
 }
 
+static bool is_dummy_event(struct perf_evsel *evsel)
+{
+       return (evsel->attr.type == PERF_TYPE_SOFTWARE) &&
+              (evsel->attr.config == PERF_COUNT_SW_DUMMY);
+}
+
 /*
  * The enable_on_exec/disabled value strategy:
  *
@@ -1086,6 +1103,14 @@ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts,
                else
                        perf_evsel__reset_sample_bit(evsel, PERIOD);
        }
+
+       /*
+        * For initial_delay, a dummy event is added implicitly.
+        * The software event will trigger -EOPNOTSUPP error out,
+        * if BRANCH_STACK bit is set.
+        */
+       if (opts->initial_delay && is_dummy_event(evsel))
+               perf_evsel__reset_sample_bit(evsel, BRANCH_STACK);
 }
 
 static int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
index d277930b19a1168ac6f0e76009d92592d2283d80..973c03167947579ff424d8e181cdbf42eaedec2d 100644 (file)
@@ -402,10 +402,13 @@ bool perf_evsel__is_function_event(struct perf_evsel *evsel);
 
 static inline bool perf_evsel__is_bpf_output(struct perf_evsel *evsel)
 {
-       struct perf_event_attr *attr = &evsel->attr;
+       return perf_evsel__match(evsel, SOFTWARE, SW_BPF_OUTPUT);
+}
 
-       return (attr->config == PERF_COUNT_SW_BPF_OUTPUT) &&
-               (attr->type == PERF_TYPE_SOFTWARE);
+static inline bool perf_evsel__is_clock(struct perf_evsel *evsel)
+{
+       return perf_evsel__match(evsel, SOFTWARE, SW_CPU_CLOCK) ||
+              perf_evsel__match(evsel, SOFTWARE, SW_TASK_CLOCK);
 }
 
 struct perf_attr_details {
index 540cd2dcd3e7098b7335c534a0aa7534ba87c889..5af58aac91ad2b186079914809934acecdf34578 100644 (file)
@@ -2129,6 +2129,7 @@ static int process_cpu_topology(struct feat_fd *ff, void *data __maybe_unused)
        int cpu_nr = ff->ph->env.nr_cpus_avail;
        u64 size = 0;
        struct perf_header *ph = ff->ph;
+       bool do_core_id_test = true;
 
        ph->env.cpu = calloc(cpu_nr, sizeof(*ph->env.cpu));
        if (!ph->env.cpu)
@@ -2183,6 +2184,13 @@ static int process_cpu_topology(struct feat_fd *ff, void *data __maybe_unused)
                return 0;
        }
 
+       /* On s390 the socket_id number is not related to the numbers of cpus.
+        * The socket_id number might be higher than the numbers of cpus.
+        * This depends on the configuration.
+        */
+       if (ph->env.arch && !strncmp(ph->env.arch, "s390", 4))
+               do_core_id_test = false;
+
        for (i = 0; i < (u32)cpu_nr; i++) {
                if (do_read_u32(ff, &nr))
                        goto free_cpu;
@@ -2192,7 +2200,7 @@ static int process_cpu_topology(struct feat_fd *ff, void *data __maybe_unused)
                if (do_read_u32(ff, &nr))
                        goto free_cpu;
 
-               if (nr != (u32)-1 && nr > (u32)cpu_nr) {
+               if (do_core_id_test && nr != (u32)-1 && nr > (u32)cpu_nr) {
                        pr_debug("socket_id number is too big."
                                 "You may need to upgrade the perf tool.\n");
                        goto free_cpu;
@@ -2579,7 +2587,7 @@ static const struct feature_ops feat_ops[HEADER_LAST_FEATURE] = {
        FEAT_OPR(NUMA_TOPOLOGY, numa_topology,  true),
        FEAT_OPN(BRANCH_STACK,  branch_stack,   false),
        FEAT_OPR(PMU_MAPPINGS,  pmu_mappings,   false),
-       FEAT_OPN(GROUP_DESC,    group_desc,     false),
+       FEAT_OPR(GROUP_DESC,    group_desc,     false),
        FEAT_OPN(AUXTRACE,      auxtrace,       false),
        FEAT_OPN(STAT,          stat,           false),
        FEAT_OPN(CACHE,         cache,          true),
@@ -3456,7 +3464,7 @@ int perf_event__process_feature(struct perf_tool *tool,
                pr_warning("invalid record type %d in pipe-mode\n", type);
                return 0;
        }
-       if (feat == HEADER_RESERVED || feat > HEADER_LAST_FEATURE) {
+       if (feat == HEADER_RESERVED || feat >= HEADER_LAST_FEATURE) {
                pr_warning("invalid record type %d in pipe-mode\n", type);
                return -1;
        }
index 90d4577a92dc60c7f89867e6ff49a0b7577269bf..6d7fe44aadc0da92a88aa76f464cccf8c340ef28 100644 (file)
@@ -2,6 +2,7 @@
 #ifndef __PERF_HEADER_H
 #define __PERF_HEADER_H
 
+#include <linux/stddef.h>
 #include <linux/perf_event.h>
 #include <sys/types.h>
 #include <stdbool.h>
index 52e8fda93a4723f8b19b8fcaf7e6635aae505a6e..828cb9794c7668c9e48d3b9fa527cff394580923 100644 (file)
@@ -370,9 +370,11 @@ void hists__delete_entries(struct hists *hists)
 
 static int hist_entry__init(struct hist_entry *he,
                            struct hist_entry *template,
-                           bool sample_self)
+                           bool sample_self,
+                           size_t callchain_size)
 {
        *he = *template;
+       he->callchain_size = callchain_size;
 
        if (symbol_conf.cumulate_callchain) {
                he->stat_acc = malloc(sizeof(he->stat));
@@ -473,7 +475,7 @@ static struct hist_entry *hist_entry__new(struct hist_entry *template,
 
        he = ops->new(callchain_size);
        if (he) {
-               err = hist_entry__init(he, template, sample_self);
+               err = hist_entry__init(he, template, sample_self, callchain_size);
                if (err) {
                        ops->free(he);
                        he = NULL;
@@ -619,9 +621,11 @@ __hists__add_entry(struct hists *hists,
                .raw_data = sample->raw_data,
                .raw_size = sample->raw_size,
                .ops = ops,
-       };
+       }, *he = hists__findnew_entry(hists, &entry, al, sample_self);
 
-       return hists__findnew_entry(hists, &entry, al, sample_self);
+       if (!hists->has_callchains && he && he->callchain_size != 0)
+               hists->has_callchains = true;
+       return he;
 }
 
 struct hist_entry *hists__add_entry(struct hists *hists,
index 06607c434949da48b53099d6129a53e71037711a..3badd7f1e1b81aa24d204d755ef8dfb883076a66 100644 (file)
@@ -85,6 +85,7 @@ struct hists {
        struct events_stats     stats;
        u64                     event_stream;
        u16                     col_len[HISTC_NR_COLS];
+       bool                    has_callchains;
        int                     socket_filter;
        struct perf_hpp_list    *hpp_list;
        struct list_head        hpp_formats;
@@ -180,7 +181,7 @@ size_t events_stats__fprintf(struct events_stats *stats, FILE *fp);
 
 size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows,
                      int max_cols, float min_pcnt, FILE *fp,
-                     bool use_callchain);
+                     bool ignore_callchains);
 size_t perf_evlist__fprintf_nr_events(struct perf_evlist *evlist, FILE *fp);
 
 void hists__filter_by_dso(struct hists *hists);
@@ -222,8 +223,7 @@ static inline struct hists *evsel__hists(struct perf_evsel *evsel)
 
 static __pure inline bool hists__has_callchains(struct hists *hists)
 {
-       const struct perf_evsel *evsel = hists_to_evsel(hists);
-       return evsel__has_callchain(evsel);
+       return hists->has_callchains;
 }
 
 int hists__init(void);
index ba4c9dd186434a33c8c33a59ab8884fd7c679dd3..d426761a549d02d67756c541ea7ab0b2a0495e68 100644 (file)
@@ -366,7 +366,7 @@ static int intel_pt_get_cyc(unsigned int byte, const unsigned char *buf,
                if (len < offs)
                        return INTEL_PT_NEED_MORE_BYTES;
                byte = buf[offs++];
-               payload |= (byte >> 1) << shift;
+               payload |= ((uint64_t)byte >> 1) << shift;
        }
 
        packet->type = INTEL_PT_CYC;
index 976e658e38dce762163bb583f1ab02b39231a742..5e94857dfca2c8c47ae289b79dd2a717ae4a82b5 100644 (file)
@@ -266,16 +266,16 @@ static const char *kinc_fetch_script =
 "#!/usr/bin/env sh\n"
 "if ! test -d \"$KBUILD_DIR\"\n"
 "then\n"
-"      exit -1\n"
+"      exit 1\n"
 "fi\n"
 "if ! test -f \"$KBUILD_DIR/include/generated/autoconf.h\"\n"
 "then\n"
-"      exit -1\n"
+"      exit 1\n"
 "fi\n"
 "TMPDIR=`mktemp -d`\n"
 "if test -z \"$TMPDIR\"\n"
 "then\n"
-"    exit -1\n"
+"    exit 1\n"
 "fi\n"
 "cat << EOF > $TMPDIR/Makefile\n"
 "obj-y := dummy.o\n"
index e7b4a8b513f2a5be5c3ea4ce7df70a8f626c3394..b300a3973448a91fd4286af12605fd653fc2a74a 100644 (file)
@@ -408,23 +408,16 @@ out_err:
 }
 
 /*
- * Caller must eventually drop thread->refcnt returned with a successful
- * lookup/new thread inserted.
+ * Front-end cache - TID lookups come in blocks,
+ * so most of the time we dont have to look up
+ * the full rbtree:
  */
-static struct thread *____machine__findnew_thread(struct machine *machine,
-                                                 struct threads *threads,
-                                                 pid_t pid, pid_t tid,
-                                                 bool create)
+static struct thread*
+__threads__get_last_match(struct threads *threads, struct machine *machine,
+                         int pid, int tid)
 {
-       struct rb_node **p = &threads->entries.rb_node;
-       struct rb_node *parent = NULL;
        struct thread *th;
 
-       /*
-        * Front-end cache - TID lookups come in blocks,
-        * so most of the time we dont have to look up
-        * the full rbtree:
-        */
        th = threads->last_match;
        if (th != NULL) {
                if (th->tid == tid) {
@@ -435,12 +428,57 @@ static struct thread *____machine__findnew_thread(struct machine *machine,
                threads->last_match = NULL;
        }
 
+       return NULL;
+}
+
+static struct thread*
+threads__get_last_match(struct threads *threads, struct machine *machine,
+                       int pid, int tid)
+{
+       struct thread *th = NULL;
+
+       if (perf_singlethreaded)
+               th = __threads__get_last_match(threads, machine, pid, tid);
+
+       return th;
+}
+
+static void
+__threads__set_last_match(struct threads *threads, struct thread *th)
+{
+       threads->last_match = th;
+}
+
+static void
+threads__set_last_match(struct threads *threads, struct thread *th)
+{
+       if (perf_singlethreaded)
+               __threads__set_last_match(threads, th);
+}
+
+/*
+ * Caller must eventually drop thread->refcnt returned with a successful
+ * lookup/new thread inserted.
+ */
+static struct thread *____machine__findnew_thread(struct machine *machine,
+                                                 struct threads *threads,
+                                                 pid_t pid, pid_t tid,
+                                                 bool create)
+{
+       struct rb_node **p = &threads->entries.rb_node;
+       struct rb_node *parent = NULL;
+       struct thread *th;
+
+       th = threads__get_last_match(threads, machine, pid, tid);
+       if (th)
+               return th;
+
        while (*p != NULL) {
                parent = *p;
                th = rb_entry(parent, struct thread, rb_node);
 
                if (th->tid == tid) {
-                       threads->last_match = th;
+                       threads__set_last_match(threads, th);
                        machine__update_thread_pid(machine, th, pid);
                        return thread__get(th);
                }
@@ -477,7 +515,7 @@ static struct thread *____machine__findnew_thread(struct machine *machine,
                 * It is now in the rbtree, get a ref
                 */
                thread__get(th);
-               threads->last_match = th;
+               threads__set_last_match(threads, th);
                ++threads->nr;
        }
 
@@ -1635,7 +1673,7 @@ static void __machine__remove_thread(struct machine *machine, struct thread *th,
        struct threads *threads = machine__threads(machine, th->tid);
 
        if (threads->last_match == th)
-               threads->last_match = NULL;
+               threads__set_last_match(threads, NULL);
 
        BUG_ON(refcount_read(&th->refcnt) == 0);
        if (lock)
@@ -2272,6 +2310,7 @@ static int unwind_entry(struct unwind_entry *entry, void *arg)
 {
        struct callchain_cursor *cursor = arg;
        const char *srcline = NULL;
+       u64 addr;
 
        if (symbol_conf.hide_unresolved && entry->sym == NULL)
                return 0;
@@ -2279,7 +2318,13 @@ static int unwind_entry(struct unwind_entry *entry, void *arg)
        if (append_inlines(cursor, entry->map, entry->sym, entry->ip) == 0)
                return 0;
 
-       srcline = callchain_srcline(entry->map, entry->sym, entry->ip);
+       /*
+        * Convert entry->ip from a virtual address to an offset in
+        * its corresponding binary.
+        */
+       addr = map__map_ip(entry->map, entry->ip);
+
+       srcline = callchain_srcline(entry->map, entry->sym, addr);
        return callchain_cursor_append(cursor, entry->ip,
                                       entry->map, entry->sym,
                                       false, NULL, 0, 0, 0, srcline);
index 1ddc3d1d0147e7e86dccbf4d522076b602c0c157..a28f9b5cc4ffed9c2a4a1a131e2d258b693284da 100644 (file)
@@ -326,8 +326,8 @@ void metricgroup__print(bool metrics, bool metricgroups, char *filter,
                                if (raw)
                                        s = (char *)pe->metric_name;
                                else {
-                                       if (asprintf(&s, "%s\n\t[%s]",
-                                                    pe->metric_name, pe->desc) < 0)
+                                       if (asprintf(&s, "%s\n%*s%s]",
+                                                    pe->metric_name, 8, "[", pe->desc) < 0)
                                                return;
                                }
 
@@ -490,3 +490,25 @@ out:
        metricgroup__free_egroups(&group_list);
        return ret;
 }
+
+bool metricgroup__has_metric(const char *metric)
+{
+       struct pmu_events_map *map = perf_pmu__find_map(NULL);
+       struct pmu_event *pe;
+       int i;
+
+       if (!map)
+               return false;
+
+       for (i = 0; ; i++) {
+               pe = &map->table[i];
+
+               if (!pe->name && !pe->metric_group && !pe->metric_name)
+                       break;
+               if (!pe->metric_expr)
+                       continue;
+               if (match_metric(pe->metric_name, metric))
+                       return true;
+       }
+       return false;
+}
index 06854e125ee71c9f7cae52dc2b81ae00329afdf6..8a155dba0581e5d3c7bad97a946c222fba898852 100644 (file)
@@ -28,4 +28,5 @@ int metricgroup__parse_groups(const struct option *opt,
                        struct rblist *metric_events);
 
 void metricgroup__print(bool metrics, bool groups, char *filter, bool raw);
+bool metricgroup__has_metric(const char *metric);
 #endif
index 760558dcfd1810c432684b7a7cea1f0aacaba677..cae1a9a397222ca4b06a17f5bda0d4aa6774ce5d 100644 (file)
@@ -10,6 +10,7 @@
 #define __PERF_NAMESPACES_H
 
 #include <sys/types.h>
+#include <linux/stddef.h>
 #include <linux/perf_event.h>
 #include <linux/refcount.h>
 #include <linux/types.h>
index 155d2570274fdae6fbe7caea7bfa1e07953f7948..da8fe57691b8cd0d4c1c22d0cf0fa62595385ac5 100644 (file)
@@ -227,11 +227,16 @@ event_def: event_pmu |
 event_pmu:
 PE_NAME opt_pmu_config
 {
+       struct parse_events_state *parse_state = _parse_state;
+       struct parse_events_error *error = parse_state->error;
        struct list_head *list, *orig_terms, *terms;
 
        if (parse_events_copy_term_list($2, &orig_terms))
                YYABORT;
 
+       if (error)
+               error->idx = @1.first_column;
+
        ALLOC_LIST(list);
        if (parse_events_add_pmu(_parse_state, list, $1, $2, false, false)) {
                struct perf_pmu *pmu = NULL;
index d2fb597c9a8c78d8e8fd8a9890e67f8b8f4432d7..afd68524ffa983095e4ae17529d489d8ca075915 100644 (file)
@@ -234,6 +234,74 @@ static int perf_pmu__parse_snapshot(struct perf_pmu_alias *alias,
        return 0;
 }
 
+static void perf_pmu_assign_str(char *name, const char *field, char **old_str,
+                               char **new_str)
+{
+       if (!*old_str)
+               goto set_new;
+
+       if (*new_str) { /* Have new string, check with old */
+               if (strcasecmp(*old_str, *new_str))
+                       pr_debug("alias %s differs in field '%s'\n",
+                                name, field);
+               zfree(old_str);
+       } else          /* Nothing new --> keep old string */
+               return;
+set_new:
+       *old_str = *new_str;
+       *new_str = NULL;
+}
+
+static void perf_pmu_update_alias(struct perf_pmu_alias *old,
+                                 struct perf_pmu_alias *newalias)
+{
+       perf_pmu_assign_str(old->name, "desc", &old->desc, &newalias->desc);
+       perf_pmu_assign_str(old->name, "long_desc", &old->long_desc,
+                           &newalias->long_desc);
+       perf_pmu_assign_str(old->name, "topic", &old->topic, &newalias->topic);
+       perf_pmu_assign_str(old->name, "metric_expr", &old->metric_expr,
+                           &newalias->metric_expr);
+       perf_pmu_assign_str(old->name, "metric_name", &old->metric_name,
+                           &newalias->metric_name);
+       perf_pmu_assign_str(old->name, "value", &old->str, &newalias->str);
+       old->scale = newalias->scale;
+       old->per_pkg = newalias->per_pkg;
+       old->snapshot = newalias->snapshot;
+       memcpy(old->unit, newalias->unit, sizeof(old->unit));
+}
+
+/* Delete an alias entry. */
+static void perf_pmu_free_alias(struct perf_pmu_alias *newalias)
+{
+       zfree(&newalias->name);
+       zfree(&newalias->desc);
+       zfree(&newalias->long_desc);
+       zfree(&newalias->topic);
+       zfree(&newalias->str);
+       zfree(&newalias->metric_expr);
+       zfree(&newalias->metric_name);
+       parse_events_terms__purge(&newalias->terms);
+       free(newalias);
+}
+
+/* Merge an alias, search in alias list. If this name is already
+ * present merge both of them to combine all information.
+ */
+static bool perf_pmu_merge_alias(struct perf_pmu_alias *newalias,
+                                struct list_head *alist)
+{
+       struct perf_pmu_alias *a;
+
+       list_for_each_entry(a, alist, list) {
+               if (!strcasecmp(newalias->name, a->name)) {
+                       perf_pmu_update_alias(a, newalias);
+                       perf_pmu_free_alias(newalias);
+                       return true;
+               }
+       }
+       return false;
+}
+
 static int __perf_pmu__new_alias(struct list_head *list, char *dir, char *name,
                                 char *desc, char *val,
                                 char *long_desc, char *topic,
@@ -241,9 +309,11 @@ static int __perf_pmu__new_alias(struct list_head *list, char *dir, char *name,
                                 char *metric_expr,
                                 char *metric_name)
 {
+       struct parse_events_term *term;
        struct perf_pmu_alias *alias;
        int ret;
        int num;
+       char newval[256];
 
        alias = malloc(sizeof(*alias));
        if (!alias)
@@ -262,6 +332,27 @@ static int __perf_pmu__new_alias(struct list_head *list, char *dir, char *name,
                return ret;
        }
 
+       /* Scan event and remove leading zeroes, spaces, newlines, some
+        * platforms have terms specified as
+        * event=0x0091 (read from files ../<PMU>/events/<FILE>
+        * and terms specified as event=0x91 (read from JSON files).
+        *
+        * Rebuild string to make alias->str member comparable.
+        */
+       memset(newval, 0, sizeof(newval));
+       ret = 0;
+       list_for_each_entry(term, &alias->terms, list) {
+               if (ret)
+                       ret += scnprintf(newval + ret, sizeof(newval) - ret,
+                                        ",");
+               if (term->type_val == PARSE_EVENTS__TERM_TYPE_NUM)
+                       ret += scnprintf(newval + ret, sizeof(newval) - ret,
+                                        "%s=%#x", term->config, term->val.num);
+               else if (term->type_val == PARSE_EVENTS__TERM_TYPE_STR)
+                       ret += scnprintf(newval + ret, sizeof(newval) - ret,
+                                        "%s=%s", term->config, term->val.str);
+       }
+
        alias->name = strdup(name);
        if (dir) {
                /*
@@ -285,9 +376,10 @@ static int __perf_pmu__new_alias(struct list_head *list, char *dir, char *name,
                snprintf(alias->unit, sizeof(alias->unit), "%s", unit);
        }
        alias->per_pkg = perpkg && sscanf(perpkg, "%d", &num) == 1 && num == 1;
-       alias->str = strdup(val);
+       alias->str = strdup(newval);
 
-       list_add_tail(&alias->list, list);
+       if (!perf_pmu_merge_alias(alias, list))
+               list_add_tail(&alias->list, list);
 
        return 0;
 }
@@ -303,6 +395,9 @@ static int perf_pmu__new_alias(struct list_head *list, char *dir, char *name, FI
 
        buf[ret] = 0;
 
+       /* Remove trailing newline from sysfs file */
+       rtrim(buf);
+
        return __perf_pmu__new_alias(list, dir, name, NULL, buf, NULL, NULL, NULL,
                                     NULL, NULL, NULL);
 }
@@ -557,12 +652,6 @@ static int is_arm_pmu_core(const char *name)
        if (stat(path, &st) == 0)
                return 1;
 
-       /* Look for cpu sysfs (specific to s390) */
-       scnprintf(path, PATH_MAX, "%s/bus/event_source/devices/%s",
-                 sysfs, name);
-       if (stat(path, &st) == 0 && !strncmp(name, "cpum_", 5))
-               return 1;
-
        return 0;
 }
 
index 46e9e19ab1ac43a9bee5349c8826d4c990c976ad..bc32e57d17be76bddbc561bcbafe3b06e5295461 100644 (file)
@@ -908,14 +908,11 @@ static void python_process_tracepoint(struct perf_sample *sample,
        if (_PyTuple_Resize(&t, n) == -1)
                Py_FatalError("error resizing Python tuple");
 
-       if (!dict) {
+       if (!dict)
                call_object(handler, t, handler_name);
-       } else {
+       else
                call_object(handler, t, default_handler_name);
-               Py_DECREF(dict);
-       }
 
-       Py_XDECREF(all_entries_dict);
        Py_DECREF(t);
 }
 
@@ -1235,7 +1232,6 @@ static void python_process_general_event(struct perf_sample *sample,
 
        call_object(handler, t, handler_name);
 
-       Py_DECREF(dict);
        Py_DECREF(t);
 }
 
@@ -1627,6 +1623,7 @@ static int python_generate_script(struct pevent *pevent, const char *outfile)
        fprintf(ofp, "# See the perf-script-python Documentation for the list "
                "of available functions.\n\n");
 
+       fprintf(ofp, "from __future__ import print_function\n\n");
        fprintf(ofp, "import os\n");
        fprintf(ofp, "import sys\n\n");
 
@@ -1636,10 +1633,10 @@ static int python_generate_script(struct pevent *pevent, const char *outfile)
        fprintf(ofp, "from Core import *\n\n\n");
 
        fprintf(ofp, "def trace_begin():\n");
-       fprintf(ofp, "\tprint \"in trace_begin\"\n\n");
+       fprintf(ofp, "\tprint(\"in trace_begin\")\n\n");
 
        fprintf(ofp, "def trace_end():\n");
-       fprintf(ofp, "\tprint \"in trace_end\"\n\n");
+       fprintf(ofp, "\tprint(\"in trace_end\")\n\n");
 
        while ((event = trace_find_next_event(pevent, event))) {
                fprintf(ofp, "def %s__%s(", event->system, event->name);
@@ -1675,7 +1672,7 @@ static int python_generate_script(struct pevent *pevent, const char *outfile)
                        "common_secs, common_nsecs,\n\t\t\t"
                        "common_pid, common_comm)\n\n");
 
-               fprintf(ofp, "\t\tprint \"");
+               fprintf(ofp, "\t\tprint(\"");
 
                not_first = 0;
                count = 0;
@@ -1736,31 +1733,31 @@ static int python_generate_script(struct pevent *pevent, const char *outfile)
                                fprintf(ofp, "%s", f->name);
                }
 
-               fprintf(ofp, ")\n\n");
+               fprintf(ofp, "))\n\n");
 
-               fprintf(ofp, "\t\tprint 'Sample: {'+"
-                       "get_dict_as_string(perf_sample_dict['sample'], ', ')+'}'\n\n");
+               fprintf(ofp, "\t\tprint('Sample: {'+"
+                       "get_dict_as_string(perf_sample_dict['sample'], ', ')+'}')\n\n");
 
                fprintf(ofp, "\t\tfor node in common_callchain:");
                fprintf(ofp, "\n\t\t\tif 'sym' in node:");
-               fprintf(ofp, "\n\t\t\t\tprint \"\\t[%%x] %%s\" %% (node['ip'], node['sym']['name'])");
+               fprintf(ofp, "\n\t\t\t\tprint(\"\\t[%%x] %%s\" %% (node['ip'], node['sym']['name']))");
                fprintf(ofp, "\n\t\t\telse:");
-               fprintf(ofp, "\n\t\t\t\tprint \"\t[%%x]\" %% (node['ip'])\n\n");
-               fprintf(ofp, "\t\tprint \"\\n\"\n\n");
+               fprintf(ofp, "\n\t\t\t\tprint(\"\t[%%x]\" %% (node['ip']))\n\n");
+               fprintf(ofp, "\t\tprint()\n\n");
 
        }
 
        fprintf(ofp, "def trace_unhandled(event_name, context, "
                "event_fields_dict, perf_sample_dict):\n");
 
-       fprintf(ofp, "\t\tprint get_dict_as_string(event_fields_dict)\n");
-       fprintf(ofp, "\t\tprint 'Sample: {'+"
-               "get_dict_as_string(perf_sample_dict['sample'], ', ')+'}'\n\n");
+       fprintf(ofp, "\t\tprint(get_dict_as_string(event_fields_dict))\n");
+       fprintf(ofp, "\t\tprint('Sample: {'+"
+               "get_dict_as_string(perf_sample_dict['sample'], ', ')+'}')\n\n");
 
        fprintf(ofp, "def print_header("
                "event_name, cpu, secs, nsecs, pid, comm):\n"
-               "\tprint \"%%-20s %%5u %%05u.%%09u %%8u %%-20s \" %% \\\n\t"
-               "(event_name, cpu, secs, nsecs, pid, comm),\n\n");
+               "\tprint(\"%%-20s %%5u %%05u.%%09u %%8u %%-20s \" %% \\\n\t"
+               "(event_name, cpu, secs, nsecs, pid, comm), end=\"\")\n\n");
 
        fprintf(ofp, "def get_dict_as_string(a_dict, delimiter=' '):\n"
                "\treturn delimiter.join"
index 7cf2d5cc038ea07accaf5ef631c9b23b7b0c207b..8bf302cafcecd6b285d68e2b2c56130019dea101 100644 (file)
@@ -112,6 +112,8 @@ struct hist_entry {
 
        char                    level;
        u8                      filtered;
+
+       u16                     callchain_size;
        union {
                /*
                 * Since perf diff only supports the stdio output, TUI
@@ -153,7 +155,7 @@ struct hist_entry {
 
 static __pure inline bool hist_entry__has_callchains(struct hist_entry *he)
 {
-       return hists__has_callchains(he->hists);
+       return he->callchain_size != 0;
 }
 
 static inline bool hist_entry__has_pairs(struct hist_entry *he)
index 594d14a02b67cb4cf49f18205511506a72c66765..99990f5f2512acbe59b0a51ab450fb92c0c6b446 100644 (file)
@@ -913,11 +913,10 @@ void perf_stat__print_shadow_stats(struct perf_evsel *evsel,
                        ratio = total / avg;
 
                print_metric(ctxp, NULL, "%8.0f", "cycles / elision", ratio);
-       } else if (perf_evsel__match(evsel, SOFTWARE, SW_TASK_CLOCK) ||
-                  perf_evsel__match(evsel, SOFTWARE, SW_CPU_CLOCK)) {
+       } else if (perf_evsel__is_clock(evsel)) {
                if ((ratio = avg_stats(&walltime_nsecs_stats)) != 0)
                        print_metric(ctxp, NULL, "%8.3f", "CPUs utilized",
-                                    avg / ratio);
+                                    avg / (ratio * evsel->scale));
                else
                        print_metric(ctxp, NULL, NULL, "CPUs utilized", 0);
        } else if (perf_stat_evsel__is(evsel, TOPDOWN_FETCH_BUBBLES)) {
index 0ee7f568d60cced8ab428c3f73cae9d1e7b67620..3393d7ee940141c62158410ee90f4a3dee691b51 100644 (file)
@@ -38,6 +38,10 @@ static const char **syscalltbl_native = syscalltbl_powerpc_64;
 #include <asm/syscalls_32.c>
 const int syscalltbl_native_max_id = SYSCALLTBL_POWERPC_32_MAX_ID;
 static const char **syscalltbl_native = syscalltbl_powerpc_32;
+#elif defined(__aarch64__)
+#include <asm/syscalls.c>
+const int syscalltbl_native_max_id = SYSCALLTBL_ARM64_MAX_ID;
+static const char **syscalltbl_native = syscalltbl_arm64;
 #endif
 
 struct syscall {
index 538db4e5d1e69c733edb0810d82d2b89206da2de..6f318b15950e8e539f60ac11b3b7606007b496c3 100644 (file)
@@ -77,7 +77,7 @@ static int entry(u64 ip, struct unwind_info *ui)
        if (__report_module(&al, ip, ui))
                return -1;
 
-       e->ip  = al.addr;
+       e->ip  = ip;
        e->map = al.map;
        e->sym = al.sym;
 
index 6a11bc7e6b27f68780b93aebcba5cae0ecdf18ec..79f521a552cf5156b97835728305e72c6cba26aa 100644 (file)
@@ -575,7 +575,7 @@ static int entry(u64 ip, struct thread *thread,
        struct addr_location al;
 
        e.sym = thread__find_symbol(thread, PERF_RECORD_MISC_USER, ip, &al);
-       e.ip = al.addr;
+       e.ip  = ip;
        e.map = al.map;
 
        pr_debug("unwind: %s:ip = 0x%" PRIx64 " (0x%" PRIx64 ")\n",
index ca9ef70176249294644a1beeea2b141086cfe75c..a6db83a88e852c40024f42d46f5c55fa3dbfd446 100644 (file)
@@ -56,7 +56,7 @@ name as necessary to disambiguate it from others is necessary.  Note that option
 .PP
 \fB--hide column\fP do not show the specified built-in columns.  May be invoked multiple times, or with a comma-separated list of column names.  Use "--hide sysfs" to hide the sysfs statistics columns as a group.
 .PP
-\fB--enable column\fP show the specified built-in columns, which are otherwise disabled, by default.  Currently the only built-in counters disabled by default are "usec" and "Time_Of_Day_Seconds".
+\fB--enable column\fP show the specified built-in columns, which are otherwise disabled, by default.  Currently the only built-in counters disabled by default are "usec", "Time_Of_Day_Seconds", "APIC" and "X2APIC".
 The column name "all" can be used to enable all disabled-by-default built-in counters.
 .PP
 \fB--show column\fP show only the specified built-in columns.  May be invoked multiple times, or with a comma-separated list of column names.  Use "--show sysfs" to show the sysfs statistics columns as a group.
@@ -106,7 +106,7 @@ The system configuration dump (if --quiet is not used) is followed by statistics
 \fBC1%, C2%, C3%\fP The residency percentage that Linux requested C1, C2, C3....  The system summary is the average of all CPUs in the system.  Note that these are software, reflecting what was requested.  The hardware counters reflect what was actually achieved.
 \fBCPU%c1, CPU%c3, CPU%c6, CPU%c7\fP show the percentage residency in hardware core idle states.  These numbers are from hardware residency counters.
 \fBCoreTmp\fP Degrees Celsius reported by the per-core Digital Thermal Sensor.
-\fBPkgTtmp\fP Degrees Celsius reported by the per-package Package Thermal Monitor.
+\fBPkgTmp\fP Degrees Celsius reported by the per-package Package Thermal Monitor.
 \fBGFX%rc6\fP The percentage of time the GPU is in the "render C6" state, rc6, during the measurement interval. From /sys/class/drm/card0/power/rc6_residency_ms.
 \fBGFXMHz\fP Instantaneous snapshot of what sysfs presents at the end of the measurement interval. From /sys/class/graphics/fb0/device/drm/card0/gt_cur_freq_mhz.
 \fBPkg%pc2, Pkg%pc3, Pkg%pc6, Pkg%pc7\fP percentage residency in hardware package idle states.  These numbers are from hardware residency counters.
@@ -114,7 +114,7 @@ The system configuration dump (if --quiet is not used) is followed by statistics
 \fBCorWatt\fP Watts consumed by the core part of the package.
 \fBGFXWatt\fP Watts consumed by the Graphics part of the package -- available only on client processors.
 \fBRAMWatt\fP Watts consumed by the DRAM DIMMS -- available only on server processors.
-\fBPKG_%\fP percent of the interval that RAPL throttling was active on the Package.
+\fBPKG_%\fP percent of the interval that RAPL throttling was active on the Package.  Note that the system summary is the sum of the package throttling time, and thus may be higher than 100% on a multi-package system.  Note that the meaning of this field is model specific.  For example, some hardware increments this counter when RAPL responds to thermal limits, but does not increment this counter when RAPL responds to power limits.  Comparing PkgWatt and PkgTmp to system limits is necessary.
 \fBRAM_%\fP percent of the interval that RAPL throttling was active on DRAM.
 .fi
 .SH TOO MUCH INFORMATION EXAMPLE
index d6cff3070ebde60d2fa9a54deec6c147b6bda484..980bd9d20646bd1ef7d64f5a624c6f62fe8bacf4 100644 (file)
@@ -109,6 +109,7 @@ unsigned int has_hwp_activity_window;       /* IA32_HWP_REQUEST[bits 41:32] */
 unsigned int has_hwp_epp;              /* IA32_HWP_REQUEST[bits 31:24] */
 unsigned int has_hwp_pkg;              /* IA32_HWP_REQUEST_PKG */
 unsigned int has_misc_feature_control;
+unsigned int first_counter_read = 1;
 
 #define RAPL_PKG               (1 << 0)
                                        /* 0x610 MSR_PKG_POWER_LIMIT */
@@ -170,6 +171,8 @@ struct thread_data {
        unsigned long long  irq_count;
        unsigned int smi_count;
        unsigned int cpu_id;
+       unsigned int apic_id;
+       unsigned int x2apic_id;
        unsigned int flags;
 #define CPU_IS_FIRST_THREAD_IN_CORE    0x2
 #define CPU_IS_FIRST_CORE_IN_PACKAGE   0x4
@@ -381,19 +384,23 @@ int get_msr(int cpu, off_t offset, unsigned long long *msr)
 }
 
 /*
- * Each string in this array is compared in --show and --hide cmdline.
- * Thus, strings that are proper sub-sets must follow their more specific peers.
+ * This list matches the column headers, except
+ * 1. built-in only, the sysfs counters are not here -- we learn of those at run-time
+ * 2. Core and CPU are moved to the end, we can't have strings that contain them
+ *    matching on them for --show and --hide.
  */
 struct msr_counter bic[] = {
        { 0x0, "usec" },
        { 0x0, "Time_Of_Day_Seconds" },
        { 0x0, "Package" },
+       { 0x0, "Node" },
        { 0x0, "Avg_MHz" },
+       { 0x0, "Busy%" },
        { 0x0, "Bzy_MHz" },
        { 0x0, "TSC_MHz" },
        { 0x0, "IRQ" },
        { 0x0, "SMI", "", 32, 0, FORMAT_DELTA, NULL},
-       { 0x0, "Busy%" },
+       { 0x0, "sysfs" },
        { 0x0, "CPU%c1" },
        { 0x0, "CPU%c3" },
        { 0x0, "CPU%c6" },
@@ -424,73 +431,73 @@ struct msr_counter bic[] = {
        { 0x0, "Cor_J" },
        { 0x0, "GFX_J" },
        { 0x0, "RAM_J" },
-       { 0x0, "Core" },
-       { 0x0, "CPU" },
        { 0x0, "Mod%c6" },
-       { 0x0, "sysfs" },
        { 0x0, "Totl%C0" },
        { 0x0, "Any%C0" },
        { 0x0, "GFX%C0" },
        { 0x0, "CPUGFX%" },
-       { 0x0, "Node%" },
+       { 0x0, "Core" },
+       { 0x0, "CPU" },
+       { 0x0, "APIC" },
+       { 0x0, "X2APIC" },
 };
 
-
-
 #define MAX_BIC (sizeof(bic) / sizeof(struct msr_counter))
 #define        BIC_USEC        (1ULL << 0)
 #define        BIC_TOD         (1ULL << 1)
 #define        BIC_Package     (1ULL << 2)
-#define        BIC_Avg_MHz     (1ULL << 3)
-#define        BIC_Bzy_MHz     (1ULL << 4)
-#define        BIC_TSC_MHz     (1ULL << 5)
-#define        BIC_IRQ         (1ULL << 6)
-#define        BIC_SMI         (1ULL << 7)
-#define        BIC_Busy        (1ULL << 8)
-#define        BIC_CPU_c1      (1ULL << 9)
-#define        BIC_CPU_c3      (1ULL << 10)
-#define        BIC_CPU_c6      (1ULL << 11)
-#define        BIC_CPU_c7      (1ULL << 12)
-#define        BIC_ThreadC     (1ULL << 13)
-#define        BIC_CoreTmp     (1ULL << 14)
-#define        BIC_CoreCnt     (1ULL << 15)
-#define        BIC_PkgTmp      (1ULL << 16)
-#define        BIC_GFX_rc6     (1ULL << 17)
-#define        BIC_GFXMHz      (1ULL << 18)
-#define        BIC_Pkgpc2      (1ULL << 19)
-#define        BIC_Pkgpc3      (1ULL << 20)
-#define        BIC_Pkgpc6      (1ULL << 21)
-#define        BIC_Pkgpc7      (1ULL << 22)
-#define        BIC_Pkgpc8      (1ULL << 23)
-#define        BIC_Pkgpc9      (1ULL << 24)
-#define        BIC_Pkgpc10     (1ULL << 25)
-#define BIC_CPU_LPI    (1ULL << 26)
-#define BIC_SYS_LPI    (1ULL << 27)
-#define        BIC_PkgWatt     (1ULL << 26)
-#define        BIC_CorWatt     (1ULL << 27)
-#define        BIC_GFXWatt     (1ULL << 28)
-#define        BIC_PkgCnt      (1ULL << 29)
-#define        BIC_RAMWatt     (1ULL << 30)
-#define        BIC_PKG__       (1ULL << 31)
-#define        BIC_RAM__       (1ULL << 32)
-#define        BIC_Pkg_J       (1ULL << 33)
-#define        BIC_Cor_J       (1ULL << 34)
-#define        BIC_GFX_J       (1ULL << 35)
-#define        BIC_RAM_J       (1ULL << 36)
-#define        BIC_Core        (1ULL << 37)
-#define        BIC_CPU         (1ULL << 38)
-#define        BIC_Mod_c6      (1ULL << 39)
-#define        BIC_sysfs       (1ULL << 40)
-#define        BIC_Totl_c0     (1ULL << 41)
-#define        BIC_Any_c0      (1ULL << 42)
-#define        BIC_GFX_c0      (1ULL << 43)
-#define        BIC_CPUGFX      (1ULL << 44)
-#define        BIC_Node        (1ULL << 45)
-
-#define BIC_DISABLED_BY_DEFAULT        (BIC_USEC | BIC_TOD)
+#define        BIC_Node        (1ULL << 3)
+#define        BIC_Avg_MHz     (1ULL << 4)
+#define        BIC_Busy        (1ULL << 5)
+#define        BIC_Bzy_MHz     (1ULL << 6)
+#define        BIC_TSC_MHz     (1ULL << 7)
+#define        BIC_IRQ         (1ULL << 8)
+#define        BIC_SMI         (1ULL << 9)
+#define        BIC_sysfs       (1ULL << 10)
+#define        BIC_CPU_c1      (1ULL << 11)
+#define        BIC_CPU_c3      (1ULL << 12)
+#define        BIC_CPU_c6      (1ULL << 13)
+#define        BIC_CPU_c7      (1ULL << 14)
+#define        BIC_ThreadC     (1ULL << 15)
+#define        BIC_CoreTmp     (1ULL << 16)
+#define        BIC_CoreCnt     (1ULL << 17)
+#define        BIC_PkgTmp      (1ULL << 18)
+#define        BIC_GFX_rc6     (1ULL << 19)
+#define        BIC_GFXMHz      (1ULL << 20)
+#define        BIC_Pkgpc2      (1ULL << 21)
+#define        BIC_Pkgpc3      (1ULL << 22)
+#define        BIC_Pkgpc6      (1ULL << 23)
+#define        BIC_Pkgpc7      (1ULL << 24)
+#define        BIC_Pkgpc8      (1ULL << 25)
+#define        BIC_Pkgpc9      (1ULL << 26)
+#define        BIC_Pkgpc10     (1ULL << 27)
+#define BIC_CPU_LPI    (1ULL << 28)
+#define BIC_SYS_LPI    (1ULL << 29)
+#define        BIC_PkgWatt     (1ULL << 30)
+#define        BIC_CorWatt     (1ULL << 31)
+#define        BIC_GFXWatt     (1ULL << 32)
+#define        BIC_PkgCnt      (1ULL << 33)
+#define        BIC_RAMWatt     (1ULL << 34)
+#define        BIC_PKG__       (1ULL << 35)
+#define        BIC_RAM__       (1ULL << 36)
+#define        BIC_Pkg_J       (1ULL << 37)
+#define        BIC_Cor_J       (1ULL << 38)
+#define        BIC_GFX_J       (1ULL << 39)
+#define        BIC_RAM_J       (1ULL << 40)
+#define        BIC_Mod_c6      (1ULL << 41)
+#define        BIC_Totl_c0     (1ULL << 42)
+#define        BIC_Any_c0      (1ULL << 43)
+#define        BIC_GFX_c0      (1ULL << 44)
+#define        BIC_CPUGFX      (1ULL << 45)
+#define        BIC_Core        (1ULL << 46)
+#define        BIC_CPU         (1ULL << 47)
+#define        BIC_APIC        (1ULL << 48)
+#define        BIC_X2APIC      (1ULL << 49)
+
+#define BIC_DISABLED_BY_DEFAULT        (BIC_USEC | BIC_TOD | BIC_APIC | BIC_X2APIC)
 
 unsigned long long bic_enabled = (0xFFFFFFFFFFFFFFFFULL & ~BIC_DISABLED_BY_DEFAULT);
-unsigned long long bic_present = BIC_USEC | BIC_TOD | BIC_sysfs;
+unsigned long long bic_present = BIC_USEC | BIC_TOD | BIC_sysfs | BIC_APIC | BIC_X2APIC;
 
 #define DO_BIC(COUNTER_NAME) (bic_enabled & bic_present & COUNTER_NAME)
 #define ENABLE_BIC(COUNTER_NAME) (bic_enabled |= COUNTER_NAME)
@@ -517,17 +524,34 @@ void help(void)
        "when COMMAND completes.\n"
        "If no COMMAND is specified, turbostat wakes every 5-seconds\n"
        "to print statistics, until interrupted.\n"
-       "--add          add a counter\n"
-       "               eg. --add msr0x10,u64,cpu,delta,MY_TSC\n"
-       "--cpu  cpu-set limit output to summary plus cpu-set:\n"
-       "               {core | package | j,k,l..m,n-p }\n"
-       "--quiet        skip decoding system configuration header\n"
-       "--interval sec.subsec  Override default 5-second measurement interval\n"
-       "--help         print this help message\n"
-       "--list         list column headers only\n"
-       "--num_iterations num   number of the measurement iterations\n"
-       "--out file     create or truncate \"file\" for all output\n"
-       "--version      print version information\n"
+       "  -a, --add    add a counter\n"
+       "                 eg. --add msr0x10,u64,cpu,delta,MY_TSC\n"
+       "  -c, --cpu    cpu-set limit output to summary plus cpu-set:\n"
+       "                 {core | package | j,k,l..m,n-p }\n"
+       "  -d, --debug  displays usec, Time_Of_Day_Seconds and more debugging\n"
+       "  -D, --Dump   displays the raw counter values\n"
+       "  -e, --enable [all | column]\n"
+       "               shows all or the specified disabled column\n"
+       "  -H, --hide [column|column,column,...]\n"
+       "               hide the specified column(s)\n"
+       "  -i, --interval sec.subsec\n"
+       "               Override default 5-second measurement interval\n"
+       "  -J, --Joules displays energy in Joules instead of Watts\n"
+       "  -l, --list   list column headers only\n"
+       "  -n, --num_iterations num\n"
+       "               number of the measurement iterations\n"
+       "  -o, --out file\n"
+       "               create or truncate \"file\" for all output\n"
+       "  -q, --quiet  skip decoding system configuration header\n"
+       "  -s, --show [column|column,column,...]\n"
+       "               show only the specified column(s)\n"
+       "  -S, --Summary\n"
+       "               limits output to 1-line system summary per interval\n"
+       "  -T, --TCC temperature\n"
+       "               sets the Thermal Control Circuit temperature in\n"
+       "                 degrees Celsius\n"
+       "  -h, --help   print this help message\n"
+       "  -v, --version        print version information\n"
        "\n"
        "For more help, run \"man turbostat\"\n");
 }
@@ -601,6 +625,10 @@ void print_header(char *delim)
                outp += sprintf(outp, "%sCore", (printed++ ? delim : ""));
        if (DO_BIC(BIC_CPU))
                outp += sprintf(outp, "%sCPU", (printed++ ? delim : ""));
+       if (DO_BIC(BIC_APIC))
+               outp += sprintf(outp, "%sAPIC", (printed++ ? delim : ""));
+       if (DO_BIC(BIC_X2APIC))
+               outp += sprintf(outp, "%sX2APIC", (printed++ ? delim : ""));
        if (DO_BIC(BIC_Avg_MHz))
                outp += sprintf(outp, "%sAvg_MHz", (printed++ ? delim : ""));
        if (DO_BIC(BIC_Busy))
@@ -880,6 +908,10 @@ int format_counters(struct thread_data *t, struct core_data *c,
                        outp += sprintf(outp, "%s-", (printed++ ? delim : ""));
                if (DO_BIC(BIC_CPU))
                        outp += sprintf(outp, "%s-", (printed++ ? delim : ""));
+               if (DO_BIC(BIC_APIC))
+                       outp += sprintf(outp, "%s-", (printed++ ? delim : ""));
+               if (DO_BIC(BIC_X2APIC))
+                       outp += sprintf(outp, "%s-", (printed++ ? delim : ""));
        } else {
                if (DO_BIC(BIC_Package)) {
                        if (p)
@@ -904,6 +936,10 @@ int format_counters(struct thread_data *t, struct core_data *c,
                }
                if (DO_BIC(BIC_CPU))
                        outp += sprintf(outp, "%s%d", (printed++ ? delim : ""), t->cpu_id);
+               if (DO_BIC(BIC_APIC))
+                       outp += sprintf(outp, "%s%d", (printed++ ? delim : ""), t->apic_id);
+               if (DO_BIC(BIC_X2APIC))
+                       outp += sprintf(outp, "%s%d", (printed++ ? delim : ""), t->x2apic_id);
        }
 
        if (DO_BIC(BIC_Avg_MHz))
@@ -1127,9 +1163,7 @@ void format_all_counters(struct thread_data *t, struct core_data *c, struct pkg_
        if (!printed || !summary_only)
                print_header("\t");
 
-       if (topo.num_cpus > 1)
-               format_counters(&average.threads, &average.cores,
-                       &average.packages);
+       format_counters(&average.threads, &average.cores, &average.packages);
 
        printed = 1;
 
@@ -1231,6 +1265,12 @@ delta_thread(struct thread_data *new, struct thread_data *old,
        int i;
        struct msr_counter *mp;
 
+       /* we run cpuid just the 1st time, copy the results */
+       if (DO_BIC(BIC_APIC))
+               new->apic_id = old->apic_id;
+       if (DO_BIC(BIC_X2APIC))
+               new->x2apic_id = old->x2apic_id;
+
        /*
         * the timestamps from start of measurement interval are in "old"
         * the timestamp from end of measurement interval are in "new"
@@ -1393,6 +1433,12 @@ int sum_counters(struct thread_data *t, struct core_data *c,
        int i;
        struct msr_counter *mp;
 
+       /* copy un-changing apic_id's */
+       if (DO_BIC(BIC_APIC))
+               average.threads.apic_id = t->apic_id;
+       if (DO_BIC(BIC_X2APIC))
+               average.threads.x2apic_id = t->x2apic_id;
+
        /* remember first tv_begin */
        if (average.threads.tv_begin.tv_sec == 0)
                average.threads.tv_begin = t->tv_begin;
@@ -1619,6 +1665,34 @@ int get_mp(int cpu, struct msr_counter *mp, unsigned long long *counterp)
        return 0;
 }
 
+void get_apic_id(struct thread_data *t)
+{
+       unsigned int eax, ebx, ecx, edx, max_level;
+
+       eax = ebx = ecx = edx = 0;
+
+       if (!genuine_intel)
+               return;
+
+       __cpuid(0, max_level, ebx, ecx, edx);
+
+       __cpuid(1, eax, ebx, ecx, edx);
+       t->apic_id = (ebx >> 24) & 0xf;
+
+       if (max_level < 0xb)
+               return;
+
+       if (!DO_BIC(BIC_X2APIC))
+               return;
+
+       ecx = 0;
+       __cpuid(0xb, eax, ebx, ecx, edx);
+       t->x2apic_id = edx;
+
+       if (debug && (t->apic_id != t->x2apic_id))
+               fprintf(outf, "cpu%d: apic 0x%x x2apic 0x%x\n", t->cpu_id, t->apic_id, t->x2apic_id);
+}
+
 /*
  * get_counters(...)
  * migrate to cpu
@@ -1632,7 +1706,6 @@ int get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
        struct msr_counter *mp;
        int i;
 
-
        gettimeofday(&t->tv_begin, (struct timezone *)NULL);
 
        if (cpu_migrate(cpu)) {
@@ -1640,6 +1713,8 @@ int get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
                return -1;
        }
 
+       if (first_counter_read)
+               get_apic_id(t);
 retry:
        t->tsc = rdtsc();       /* we are running on local CPU of interest */
 
@@ -2396,49 +2471,43 @@ int get_core_id(int cpu)
 
 void set_node_data(void)
 {
-       char path[80];
-       FILE *filep;
-       int pkg, node, cpu;
-
-       struct pkg_node_info {
-               int count;
-               int min;
-       } *pni;
-
-       pni = calloc(topo.num_packages, sizeof(struct pkg_node_info));
-       if (!pni)
-               err(1, "calloc pkg_node_count");
-
-       for (pkg = 0; pkg < topo.num_packages; pkg++)
-               pni[pkg].min = topo.num_cpus;
-
-       for (node = 0; node <= topo.max_node_num; node++) {
-               /* find the "first" cpu in the node */
-               sprintf(path, "/sys/bus/node/devices/node%d/cpulist", node);
-               filep = fopen(path, "r");
-               if (!filep)
-                       continue;
-               fscanf(filep, "%d", &cpu);
-               fclose(filep);
-
-               pkg = cpus[cpu].physical_package_id;
-               pni[pkg].count++;
-
-               if (node < pni[pkg].min)
-                       pni[pkg].min = node;
-       }
-
-       for (pkg = 0; pkg < topo.num_packages; pkg++)
-               if (pni[pkg].count > topo.nodes_per_pkg)
-                       topo.nodes_per_pkg = pni[0].count;
-
-       for (cpu = 0; cpu < topo.num_cpus; cpu++) {
-               pkg = cpus[cpu].physical_package_id;
-               node = cpus[cpu].physical_node_id;
-               cpus[cpu].logical_node_id = node - pni[pkg].min;
+       int pkg, node, lnode, cpu, cpux;
+       int cpu_count;
+
+       /* initialize logical_node_id */
+       for (cpu = 0; cpu <= topo.max_cpu_num; ++cpu)
+               cpus[cpu].logical_node_id = -1;
+
+       cpu_count = 0;
+       for (pkg = 0; pkg < topo.num_packages; pkg++) {
+               lnode = 0;
+               for (cpu = 0; cpu <= topo.max_cpu_num; ++cpu) {
+                       if (cpus[cpu].physical_package_id != pkg)
+                               continue;
+                       /* find a cpu with an unset logical_node_id */
+                       if (cpus[cpu].logical_node_id != -1)
+                               continue;
+                       cpus[cpu].logical_node_id = lnode;
+                       node = cpus[cpu].physical_node_id;
+                       cpu_count++;
+                       /*
+                        * find all matching cpus on this pkg and set
+                        * the logical_node_id
+                        */
+                       for (cpux = cpu; cpux <= topo.max_cpu_num; cpux++) {
+                               if ((cpus[cpux].physical_package_id == pkg) &&
+                                  (cpus[cpux].physical_node_id == node)) {
+                                       cpus[cpux].logical_node_id = lnode;
+                                       cpu_count++;
+                               }
+                       }
+                       lnode++;
+                       if (lnode > topo.nodes_per_pkg)
+                               topo.nodes_per_pkg = lnode;
+               }
+               if (cpu_count >= topo.max_cpu_num)
+                       break;
        }
-       free(pni);
-
 }
 
 int get_physical_node_id(struct cpu_topology *thiscpu)
@@ -2879,6 +2948,7 @@ void do_sleep(void)
        }
 }
 
+
 void turbostat_loop()
 {
        int retval;
@@ -2892,6 +2962,7 @@ restart:
 
        snapshot_proc_sysfs_files();
        retval = for_all_cpus(get_counters, EVEN_COUNTERS);
+       first_counter_read = 0;
        if (retval < -1) {
                exit(retval);
        } else if (retval == -1) {
@@ -4386,13 +4457,15 @@ void process_cpuid()
        family = (fms >> 8) & 0xf;
        model = (fms >> 4) & 0xf;
        stepping = fms & 0xf;
-       if (family == 6 || family == 0xf)
+       if (family == 0xf)
+               family += (fms >> 20) & 0xff;
+       if (family >= 6)
                model += ((fms >> 16) & 0xf) << 4;
 
        if (!quiet) {
                fprintf(outf, "%d CPUID levels; family:model:stepping 0x%x:%x:%x (%d:%d:%d)\n",
                        max_level, family, model, stepping, family, model, stepping);
-               fprintf(outf, "CPUID(1): %s %s %s %s %s %s %s %s %s\n",
+               fprintf(outf, "CPUID(1): %s %s %s %s %s %s %s %s %s %s\n",
                        ecx & (1 << 0) ? "SSE3" : "-",
                        ecx & (1 << 3) ? "MONITOR" : "-",
                        ecx & (1 << 6) ? "SMX" : "-",
@@ -4401,6 +4474,7 @@ void process_cpuid()
                        edx & (1 << 4) ? "TSC" : "-",
                        edx & (1 << 5) ? "MSR" : "-",
                        edx & (1 << 22) ? "ACPI-TM" : "-",
+                       edx & (1 << 28) ? "HT" : "-",
                        edx & (1 << 29) ? "TM" : "-");
        }
 
@@ -4652,7 +4726,6 @@ void process_cpuid()
        return;
 }
 
-
 /*
  * in /dev/cpu/ return success for names that are numbers
  * ie. filter out ".", "..", "microcode".
@@ -4755,16 +4828,8 @@ void topology_probe()
                siblings = get_thread_siblings(&cpus[i]);
                if (siblings > max_siblings)
                        max_siblings = siblings;
-               if (cpus[i].thread_id != -1)
+               if (cpus[i].thread_id == 0)
                        topo.num_cores++;
-
-               if (debug > 1)
-                       fprintf(outf,
-                               "cpu %d pkg %d node %d core %d thread %d\n",
-                               i, cpus[i].physical_package_id,
-                               cpus[i].physical_node_id,
-                               cpus[i].physical_core_id,
-                               cpus[i].thread_id);
        }
 
        topo.cores_per_node = max_core_id + 1;
@@ -4790,6 +4855,20 @@ void topology_probe()
        topo.threads_per_core = max_siblings;
        if (debug > 1)
                fprintf(outf, "max_siblings %d\n", max_siblings);
+
+       if (debug < 1)
+               return;
+
+       for (i = 0; i <= topo.max_cpu_num; ++i) {
+               fprintf(outf,
+                       "cpu %d pkg %d node %d lnode %d core %d thread %d\n",
+                       i, cpus[i].physical_package_id,
+                       cpus[i].physical_node_id,
+                       cpus[i].logical_node_id,
+                       cpus[i].physical_core_id,
+                       cpus[i].thread_id);
+       }
+
 }
 
 void
@@ -4842,6 +4921,13 @@ void init_counter(struct thread_data *thread_base, struct core_data *core_base,
        struct core_data *c;
        struct pkg_data *p;
 
+
+       /* Workaround for systems where physical_node_id==-1
+        * and logical_node_id==(-1 - topo.num_cpus)
+        */
+       if (node_id < 0)
+               node_id = 0;
+
        t = GET_THREAD(thread_base, thread_id, core_id, node_id, pkg_id);
        c = GET_CORE(core_base, core_id, node_id, pkg_id);
        p = GET_PKG(pkg_base, pkg_id);
@@ -4946,6 +5032,7 @@ int fork_it(char **argv)
 
        snapshot_proc_sysfs_files();
        status = for_all_cpus(get_counters, EVEN_COUNTERS);
+       first_counter_read = 0;
        if (status)
                exit(status);
        /* clear affinity side-effect of get_counters() */
@@ -5009,7 +5096,7 @@ int get_and_dump_counters(void)
 }
 
 void print_version() {
-       fprintf(outf, "turbostat version 18.06.01"
+       fprintf(outf, "turbostat version 18.07.27"
                " - Len Brown <lenb@kernel.org>\n");
 }
 
@@ -5381,7 +5468,7 @@ void cmdline(int argc, char **argv)
                        break;
                case 'e':
                        /* --enable specified counter */
-                       bic_enabled |= bic_lookup(optarg, SHOW_LIST);
+                       bic_enabled = bic_enabled | bic_lookup(optarg, SHOW_LIST);
                        break;
                case 'd':
                        debug++;
@@ -5465,7 +5552,6 @@ void cmdline(int argc, char **argv)
 int main(int argc, char **argv)
 {
        outf = stderr;
-
        cmdline(argc, argv);
 
        if (!quiet)
index a8fb63edcf8948df54b6aaa2f225def65d5a705f..e2926f72a821471214817f7ddb1c253a93b1ee02 100644 (file)
@@ -1991,8 +1991,7 @@ static void nfit_test0_setup(struct nfit_test *t)
        pcap->header.type = ACPI_NFIT_TYPE_CAPABILITIES;
        pcap->header.length = sizeof(*pcap);
        pcap->highest_capability = 1;
-       pcap->capabilities = ACPI_NFIT_CAPABILITY_CACHE_FLUSH |
-               ACPI_NFIT_CAPABILITY_MEM_FLUSH;
+       pcap->capabilities = ACPI_NFIT_CAPABILITY_MEM_FLUSH;
        offset += pcap->header.length;
 
        if (t->setup_hotplug) {
index 7a6214e9ae58d4432668394bf4762a0e2cb5c669..a362e3d7abc633fd33db81aa3bd99da27b6edd4b 100644 (file)
@@ -105,7 +105,7 @@ $(OUTPUT)/test_xdp_noinline.o: CLANG_FLAGS += -fno-inline
 
 BTF_LLC_PROBE := $(shell $(LLC) -march=bpf -mattr=help 2>&1 | grep dwarfris)
 BTF_PAHOLE_PROBE := $(shell $(BTF_PAHOLE) --help 2>&1 | grep BTF)
-BTF_OBJCOPY_PROBE := $(shell $(LLVM_OBJCOPY) --version 2>&1 | grep LLVM)
+BTF_OBJCOPY_PROBE := $(shell $(LLVM_OBJCOPY) --help 2>&1 | grep -i 'usage.*llvm')
 
 ifneq ($(BTF_LLC_PROBE),)
 ifneq ($(BTF_PAHOLE_PROBE),)
index f2f28b6c89151d91eb7819b5e0f835afd4c9944a..810de20e8e2636ca76c10e00df607d450aa9a672 100644 (file)
@@ -158,6 +158,15 @@ struct bpf_map_def {
        unsigned int numa_node;
 };
 
+#define BPF_ANNOTATE_KV_PAIR(name, type_key, type_val)         \
+       struct ____btf_map_##name {                             \
+               type_key key;                                   \
+               type_val value;                                 \
+       };                                                      \
+       struct ____btf_map_##name                               \
+       __attribute__ ((section(".maps." #name), used))         \
+               ____btf_map_##name = { }
+
 static int (*bpf_skb_load_bytes)(void *ctx, int off, void *to, int len) =
        (void *) BPF_FUNC_skb_load_bytes;
 static int (*bpf_skb_store_bytes)(void *ctx, int off, void *from, int len, int flags) =
index 1eefe211a4a88a3dfbac5be585932384061b9edd..b4994a94968bfd9d12965fd630cba7e99458a30a 100644 (file)
@@ -6,4 +6,15 @@ CONFIG_TEST_BPF=m
 CONFIG_CGROUP_BPF=y
 CONFIG_NETDEVSIM=m
 CONFIG_NET_CLS_ACT=y
+CONFIG_NET_SCHED=y
 CONFIG_NET_SCH_INGRESS=y
+CONFIG_NET_IPIP=y
+CONFIG_IPV6=y
+CONFIG_NET_IPGRE_DEMUX=y
+CONFIG_NET_IPGRE=y
+CONFIG_IPV6_GRE=y
+CONFIG_CRYPTO_USER_API_HASH=m
+CONFIG_CRYPTO_HMAC=m
+CONFIG_CRYPTO_SHA256=m
+CONFIG_VXLAN=y
+CONFIG_GENEVE=y
index 3619f30230880e96efa05ba75a60abef44a5437a..ffdd27737c9e72366221e46609b9a2b1a41bec32 100644 (file)
@@ -247,6 +247,34 @@ static struct btf_raw_test raw_tests[] = {
        .max_entries = 4,
 },
 
+{
+       .descr = "struct test #3 Invalid member offset",
+       .raw_types = {
+               /* int */                                       /* [1] */
+               BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),
+               /* int64 */                                     /* [2] */
+               BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 64, 8),
+
+               /* struct A { */                                /* [3] */
+               BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 2), 16),
+               BTF_MEMBER_ENC(NAME_TBD, 1, 64),        /* int m;               */
+               BTF_MEMBER_ENC(NAME_TBD, 2, 0),         /* int64 n; */
+               /* } */
+               BTF_END_RAW,
+       },
+       .str_sec = "\0A\0m\0n\0",
+       .str_sec_size = sizeof("\0A\0m\0n\0"),
+       .map_type = BPF_MAP_TYPE_ARRAY,
+       .map_name = "struct_test3_map",
+       .key_size = sizeof(int),
+       .value_size = 16,
+       .key_type_id = 1,
+       .value_type_id = 3,
+       .max_entries = 4,
+       .btf_load_err = true,
+       .err_str = "Invalid member bits_offset",
+},
+
 /* Test member exceeds the size of struct.
  *
  * struct A {
@@ -479,7 +507,7 @@ static struct btf_raw_test raw_tests[] = {
        .key_size = sizeof(int),
        .value_size = sizeof(void *) * 4,
        .key_type_id = 1,
-       .value_type_id = 4,
+       .value_type_id = 5,
        .max_entries = 4,
 },
 
@@ -1264,6 +1292,88 @@ static struct btf_raw_test raw_tests[] = {
        .err_str = "type != 0",
 },
 
+{
+       .descr = "arraymap invalid btf key (a bit field)",
+       .raw_types = {
+               /* int */                               /* [1] */
+               BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),
+               /* 32 bit int with 32 bit offset */     /* [2] */
+               BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 32, 32, 8),
+               BTF_END_RAW,
+       },
+       .str_sec = "",
+       .str_sec_size = sizeof(""),
+       .map_type = BPF_MAP_TYPE_ARRAY,
+       .map_name = "array_map_check_btf",
+       .key_size = sizeof(int),
+       .value_size = sizeof(int),
+       .key_type_id = 2,
+       .value_type_id = 1,
+       .max_entries = 4,
+       .map_create_err = true,
+},
+
+{
+       .descr = "arraymap invalid btf key (!= 32 bits)",
+       .raw_types = {
+               /* int */                               /* [1] */
+               BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),
+               /* 16 bit int with 0 bit offset */      /* [2] */
+               BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 16, 2),
+               BTF_END_RAW,
+       },
+       .str_sec = "",
+       .str_sec_size = sizeof(""),
+       .map_type = BPF_MAP_TYPE_ARRAY,
+       .map_name = "array_map_check_btf",
+       .key_size = sizeof(int),
+       .value_size = sizeof(int),
+       .key_type_id = 2,
+       .value_type_id = 1,
+       .max_entries = 4,
+       .map_create_err = true,
+},
+
+{
+       .descr = "arraymap invalid btf value (too small)",
+       .raw_types = {
+               /* int */                               /* [1] */
+               BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),
+               BTF_END_RAW,
+       },
+       .str_sec = "",
+       .str_sec_size = sizeof(""),
+       .map_type = BPF_MAP_TYPE_ARRAY,
+       .map_name = "array_map_check_btf",
+       .key_size = sizeof(int),
+       /* btf_value_size < map->value_size */
+       .value_size = sizeof(__u64),
+       .key_type_id = 1,
+       .value_type_id = 1,
+       .max_entries = 4,
+       .map_create_err = true,
+},
+
+{
+       .descr = "arraymap invalid btf value (too big)",
+       .raw_types = {
+               /* int */                               /* [1] */
+               BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),
+               BTF_END_RAW,
+       },
+       .str_sec = "",
+       .str_sec_size = sizeof(""),
+       .map_type = BPF_MAP_TYPE_ARRAY,
+       .map_name = "array_map_check_btf",
+       .key_size = sizeof(int),
+       /* btf_value_size > map->value_size */
+       .value_size = sizeof(__u16),
+       .key_type_id = 1,
+       .value_type_id = 1,
+       .max_entries = 4,
+       .map_create_err = true,
+},
+
 }; /* struct btf_raw_test raw_tests[] */
 
 static const char *get_next_str(const char *start, const char *end)
@@ -2023,7 +2133,7 @@ static struct btf_raw_test pprint_test = {
                BTF_ENUM_ENC(NAME_TBD, 2),
                BTF_ENUM_ENC(NAME_TBD, 3),
                /* struct pprint_mapv */                /* [16] */
-               BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 8), 28),
+               BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 8), 32),
                BTF_MEMBER_ENC(NAME_TBD, 11, 0),        /* uint32_t ui32 */
                BTF_MEMBER_ENC(NAME_TBD, 10, 32),       /* uint16_t ui16 */
                BTF_MEMBER_ENC(NAME_TBD, 12, 64),       /* int32_t si32 */
index 8c7ca096ecf2e76bf35c0de30133c66edd19339a..b21b876f475d8a972586b6181ef1328fc2bfd13a 100644 (file)
@@ -10,11 +10,6 @@ struct ipv_counts {
        unsigned int v6;
 };
 
-typedef int btf_map_key;
-typedef struct ipv_counts btf_map_value;
-btf_map_key dumm_key;
-btf_map_value dummy_value;
-
 struct bpf_map_def SEC("maps") btf_map = {
        .type = BPF_MAP_TYPE_ARRAY,
        .key_size = sizeof(int),
@@ -22,6 +17,8 @@ struct bpf_map_def SEC("maps") btf_map = {
        .max_entries = 4,
 };
 
+BPF_ANNOTATE_KV_PAIR(btf_map, int, struct ipv_counts);
+
 struct dummy_tracepoint_args {
        unsigned long long pad;
        struct sock *sock;
index 35669ccd4d23b26c7505e8829bcf3876e3bcb3e1..9df0d2ac45f8453b9529c4ea90fb19dba3f86480 100755 (executable)
@@ -1,6 +1,15 @@
 #!/bin/sh
 # SPDX-License-Identifier: GPL-2.0
 
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
+msg="skip all tests:"
+if [ "$(id -u)" != "0" ]; then
+       echo $msg please run this as root >&2
+       exit $ksft_skip
+fi
+
 SRC_TREE=../../../../
 
 test_run()
index ce2e15e4f9760e205ed8e91ab5260a23172ab2e5..677686198df34d799e67c0eb15ab25f4b68eba4c 100755 (executable)
@@ -1,6 +1,15 @@
 #!/bin/bash
 # SPDX-License-Identifier: GPL-2.0
 
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
+msg="skip all tests:"
+if [ $UID != 0 ]; then
+       echo $msg please run this as root >&2
+       exit $ksft_skip
+fi
+
 GREEN='\033[0;92m'
 RED='\033[0;31m'
 NC='\033[0m' # No Color
index 1c77994b5e713dfe8aae357dd083c4713080f62a..785eabf2a593124afb6f862543dac2ad580c9287 100755 (executable)
 # An UDP datagram is sent from fb00::1 to fb00::6. The test succeeds if this
 # datagram can be read on NS6 when binding to fb00::6.
 
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
+msg="skip all tests:"
+if [ $UID != 0 ]; then
+       echo $msg please run this as root >&2
+       exit $ksft_skip
+fi
+
 TMP_FILE="/tmp/selftest_lwt_seg6local.txt"
 
 cleanup()
@@ -106,14 +115,14 @@ ip netns exec ns2 ip -6 route add fb00::6 encap bpf in obj test_lwt_seg6local.o
 ip netns exec ns2 ip -6 route add fd00::1 dev veth3 via fb00::43 scope link
 
 ip netns exec ns3 ip -6 route add fc42::1 dev veth5 via fb00::65
-ip netns exec ns3 ip -6 route add fd00::1 encap seg6local action End.BPF obj test_lwt_seg6local.o sec add_egr_x dev veth4
+ip netns exec ns3 ip -6 route add fd00::1 encap seg6local action End.BPF endpoint obj test_lwt_seg6local.o sec add_egr_x dev veth4
 
-ip netns exec ns4 ip -6 route add fd00::2 encap seg6local action End.BPF obj test_lwt_seg6local.o sec pop_egr dev veth6
+ip netns exec ns4 ip -6 route add fd00::2 encap seg6local action End.BPF endpoint obj test_lwt_seg6local.o sec pop_egr dev veth6
 ip netns exec ns4 ip -6 addr add fc42::1 dev lo
 ip netns exec ns4 ip -6 route add fd00::3 dev veth7 via fb00::87
 
 ip netns exec ns5 ip -6 route add fd00::4 table 117 dev veth9 via fb00::109
-ip netns exec ns5 ip -6 route add fd00::3 encap seg6local action End.BPF obj test_lwt_seg6local.o sec inspect_t dev veth8
+ip netns exec ns5 ip -6 route add fd00::3 encap seg6local action End.BPF endpoint obj test_lwt_seg6local.o sec inspect_t dev veth8
 
 ip netns exec ns6 ip -6 addr add fb00::6/16 dev lo
 ip netns exec ns6 ip -6 addr add fd00::4/16 dev lo
index e78aad0a68bb9963368a5236377144c8e61cb230..be800d0e7a841abfbc60545cf63fe33219db0c35 100755 (executable)
@@ -163,6 +163,10 @@ def bpftool(args, JSON=True, ns="", fail=True):
 
 def bpftool_prog_list(expected=None, ns=""):
     _, progs = bpftool("prog show", JSON=True, ns=ns, fail=True)
+    # Remove the base progs
+    for p in base_progs:
+        if p in progs:
+            progs.remove(p)
     if expected is not None:
         if len(progs) != expected:
             fail(True, "%d BPF programs loaded, expected %d" %
@@ -171,6 +175,10 @@ def bpftool_prog_list(expected=None, ns=""):
 
 def bpftool_map_list(expected=None, ns=""):
     _, maps = bpftool("map show", JSON=True, ns=ns, fail=True)
+    # Remove the base maps
+    for m in base_maps:
+        if m in maps:
+            maps.remove(m)
     if expected is not None:
         if len(maps) != expected:
             fail(True, "%d BPF maps loaded, expected %d" %
@@ -585,8 +593,8 @@ skip(os.getuid() != 0, "test must be run as root")
 # Check tools
 ret, progs = bpftool("prog", fail=False)
 skip(ret != 0, "bpftool not installed")
-# Check no BPF programs are loaded
-skip(len(progs) != 0, "BPF programs already loaded on the system")
+base_progs = progs
+_, base_maps = bpftool("map")
 
 # Check netdevsim
 ret, out = cmd("modprobe netdevsim", fail=False)
index 05c8cb71724ae8c1d8d7c3e3453bce9a83092b96..0c7d9e556b47d0a8129586a634a423e4ad1af067 100644 (file)
@@ -354,7 +354,7 @@ static int msg_loop(int fd, int iov_count, int iov_length, int cnt,
                while (s->bytes_recvd < total_bytes) {
                        if (txmsg_cork) {
                                timeout.tv_sec = 0;
-                               timeout.tv_usec = 1000;
+                               timeout.tv_usec = 300000;
                        } else {
                                timeout.tv_sec = 1;
                                timeout.tv_usec = 0;
@@ -1413,18 +1413,12 @@ out:
 
 int main(int argc, char **argv)
 {
-       struct rlimit r = {10 * 1024 * 1024, RLIM_INFINITY};
        int iov_count = 1, length = 1024, rate = 1;
        struct sockmap_options options = {0};
        int opt, longindex, err, cg_fd = 0;
        char *bpf_file = BPF_SOCKMAP_FILENAME;
        int test = PING_PONG;
 
-       if (setrlimit(RLIMIT_MEMLOCK, &r)) {
-               perror("setrlimit(RLIMIT_MEMLOCK)");
-               return 1;
-       }
-
        if (argc < 2)
                return test_suite();
 
index aeb2901f21f4737558efbecec73974b17c610a38..546aee3e9fb457ae166c0fda8bc0c3b484f1a19b 100755 (executable)
@@ -608,28 +608,26 @@ setup_xfrm_tunnel()
 test_xfrm_tunnel()
 {
        config_device
-        #tcpdump -nei veth1 ip &
-       output=$(mktemp)
-       cat /sys/kernel/debug/tracing/trace_pipe | tee $output &
-        setup_xfrm_tunnel
+       > /sys/kernel/debug/tracing/trace
+       setup_xfrm_tunnel
        tc qdisc add dev veth1 clsact
        tc filter add dev veth1 proto ip ingress bpf da obj test_tunnel_kern.o \
                sec xfrm_get_state
        ip netns exec at_ns0 ping $PING_ARG 10.1.1.200
        sleep 1
-       grep "reqid 1" $output
+       grep "reqid 1" /sys/kernel/debug/tracing/trace
        check_err $?
-       grep "spi 0x1" $output
+       grep "spi 0x1" /sys/kernel/debug/tracing/trace
        check_err $?
-       grep "remote ip 0xac100164" $output
+       grep "remote ip 0xac100164" /sys/kernel/debug/tracing/trace
        check_err $?
        cleanup
 
        if [ $ret -ne 0 ]; then
-                echo -e ${RED}"FAIL: xfrm tunnel"${NC}
-                return 1
-        fi
-        echo -e ${GREEN}"PASS: xfrm tunnel"${NC}
+               echo -e ${RED}"FAIL: xfrm tunnel"${NC}
+               return 1
+       fi
+       echo -e ${GREEN}"PASS: xfrm tunnel"${NC}
 }
 
 attach_bpf()
@@ -657,6 +655,10 @@ cleanup()
        ip link del ip6geneve11 2> /dev/null
        ip link del erspan11 2> /dev/null
        ip link del ip6erspan11 2> /dev/null
+       ip xfrm policy delete dir out src 10.1.1.200/32 dst 10.1.1.100/32 2> /dev/null
+       ip xfrm policy delete dir in src 10.1.1.100/32 dst 10.1.1.200/32 2> /dev/null
+       ip xfrm state delete src 172.16.1.100 dst 172.16.1.200 proto esp spi 0x1 2> /dev/null
+       ip xfrm state delete src 172.16.1.200 dst 172.16.1.100 proto esp spi 0x2 2> /dev/null
 }
 
 cleanup_exit()
@@ -668,7 +670,7 @@ cleanup_exit()
 
 check()
 {
-       ip link help $1 2>&1 | grep -q "^Usage:"
+       ip link help 2>&1 | grep -q "\s$1\s"
        if [ $? -ne 0 ];then
                echo "SKIP $1: iproute2 not support"
        cleanup
index 2ecd27b670d77e29e817d607ac80fd100c683884..41106d9d5cc75570e60d841396541cb3ea8e7439 100644 (file)
@@ -4974,6 +4974,24 @@ static struct bpf_test tests[] = {
                .result = ACCEPT,
                .prog_type = BPF_PROG_TYPE_LWT_XMIT,
        },
+       {
+               "make headroom for LWT_XMIT",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+                       BPF_MOV64_IMM(BPF_REG_2, 34),
+                       BPF_MOV64_IMM(BPF_REG_3, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_skb_change_head),
+                       /* split for s390 to succeed */
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+                       BPF_MOV64_IMM(BPF_REG_2, 42),
+                       BPF_MOV64_IMM(BPF_REG_3, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_skb_change_head),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .result = ACCEPT,
+               .prog_type = BPF_PROG_TYPE_LWT_XMIT,
+       },
        {
                "invalid access of tc_classid for LWT_IN",
                .insns = {
@@ -11986,6 +12004,46 @@ static struct bpf_test tests[] = {
                .errstr = "BPF_XADD stores into R2 packet",
                .prog_type = BPF_PROG_TYPE_XDP,
        },
+       {
+               "xadd/w check whether src/dst got mangled, 1",
+               .insns = {
+                       BPF_MOV64_IMM(BPF_REG_0, 1),
+                       BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+                       BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
+                       BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
+                       BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
+                       BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
+                       BPF_JMP_REG(BPF_JNE, BPF_REG_6, BPF_REG_0, 3),
+                       BPF_JMP_REG(BPF_JNE, BPF_REG_7, BPF_REG_10, 2),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
+                       BPF_EXIT_INSN(),
+                       BPF_MOV64_IMM(BPF_REG_0, 42),
+                       BPF_EXIT_INSN(),
+               },
+               .result = ACCEPT,
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+               .retval = 3,
+       },
+       {
+               "xadd/w check whether src/dst got mangled, 2",
+               .insns = {
+                       BPF_MOV64_IMM(BPF_REG_0, 1),
+                       BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+                       BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
+                       BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -8),
+                       BPF_STX_XADD(BPF_W, BPF_REG_10, BPF_REG_0, -8),
+                       BPF_STX_XADD(BPF_W, BPF_REG_10, BPF_REG_0, -8),
+                       BPF_JMP_REG(BPF_JNE, BPF_REG_6, BPF_REG_0, 3),
+                       BPF_JMP_REG(BPF_JNE, BPF_REG_7, BPF_REG_10, 2),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -8),
+                       BPF_EXIT_INSN(),
+                       BPF_MOV64_IMM(BPF_REG_0, 42),
+                       BPF_EXIT_INSN(),
+               },
+               .result = ACCEPT,
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+               .retval = 3,
+       },
        {
                "bpf_get_stack return R0 within range",
                .insns = {
@@ -12554,8 +12612,11 @@ static void do_test_single(struct bpf_test *test, bool unpriv,
        }
 
        if (fd_prog >= 0) {
+               __u8 tmp[TEST_DATA_LEN << 2];
+               __u32 size_tmp = sizeof(tmp);
+
                err = bpf_prog_test_run(fd_prog, 1, test->data,
-                                       sizeof(test->data), NULL, NULL,
+                                       sizeof(test->data), tmp, &size_tmp,
                                        &retval, NULL);
                if (err && errno != 524/*ENOTSUPP*/ && errno != EPERM) {
                        printf("Unexpected bpf_prog_test_run error\n");
diff --git a/tools/testing/selftests/ftrace/test.d/00basic/snapshot.tc b/tools/testing/selftests/ftrace/test.d/00basic/snapshot.tc
new file mode 100644 (file)
index 0000000..3b1f45e
--- /dev/null
@@ -0,0 +1,28 @@
+#!/bin/sh
+# description: Snapshot and tracing setting
+# flags: instance
+
+[ ! -f snapshot ] && exit_unsupported
+
+echo "Set tracing off"
+echo 0 > tracing_on
+
+echo "Allocate and take a snapshot"
+echo 1 > snapshot
+
+# Since trace buffer is empty, snapshot is also empty, but allocated
+grep -q "Snapshot is allocated" snapshot
+
+echo "Ensure keep tracing off"
+test `cat tracing_on` -eq 0
+
+echo "Set tracing on"
+echo 1 > tracing_on
+
+echo "Take a snapshot again"
+echo 1 > snapshot
+
+echo "Ensure keep tracing on"
+test `cat tracing_on` -eq 1
+
+exit 0
index 128e548aa377d600f16fa0b9cdb4fbdf9c540cdc..1a0ac3a29ec5f8c9f0052e47e074f40089c634c5 100644 (file)
@@ -12,3 +12,4 @@ tcp_mmap
 udpgso
 udpgso_bench_rx
 udpgso_bench_tx
+tcp_inq
index 7ba089b33e8b8248ec08d8421a582be66c9f7e87..cd3a2f1545b54c23dab9b534bce9528c57b6c2ec 100644 (file)
@@ -12,3 +12,5 @@ CONFIG_NET_IPVTI=y
 CONFIG_INET6_XFRM_MODE_TUNNEL=y
 CONFIG_IPV6_VTI=y
 CONFIG_DUMMY=y
+CONFIG_BRIDGE=y
+CONFIG_VLAN_8021Q=y
old mode 100644 (file)
new mode 100755 (executable)
index 78245d6..0f45633
@@ -740,13 +740,6 @@ ipv6_rt_add()
        run_cmd "$IP -6 ro add unreachable 2001:db8:104::/64"
        log_test $? 2 "Attempt to add duplicate route - reject route"
 
-       # iproute2 prepend only sets NLM_F_CREATE
-       # - adds a new route; does NOT convert existing route to ECMP
-       add_route6 "2001:db8:104::/64" "via 2001:db8:101::2"
-       run_cmd "$IP -6 ro prepend 2001:db8:104::/64 via 2001:db8:103::2"
-       check_route6 "2001:db8:104::/64 via 2001:db8:101::2 dev veth1 metric 1024 2001:db8:104::/64 via 2001:db8:103::2 dev veth3 metric 1024"
-       log_test $? 0 "Add new route for existing prefix (w/o NLM_F_EXCL)"
-
        # route append with same prefix adds a new route
        # - iproute2 sets NLM_F_CREATE | NLM_F_APPEND
        add_route6 "2001:db8:104::/64" "via 2001:db8:101::2"
@@ -754,27 +747,6 @@ ipv6_rt_add()
        check_route6 "2001:db8:104::/64 metric 1024 nexthop via 2001:db8:101::2 dev veth1 weight 1 nexthop via 2001:db8:103::2 dev veth3 weight 1"
        log_test $? 0 "Append nexthop to existing route - gw"
 
-       add_route6 "2001:db8:104::/64" "via 2001:db8:101::2"
-       run_cmd "$IP -6 ro append 2001:db8:104::/64 dev veth3"
-       check_route6 "2001:db8:104::/64 metric 1024 nexthop via 2001:db8:101::2 dev veth1 weight 1 nexthop dev veth3 weight 1"
-       log_test $? 0 "Append nexthop to existing route - dev only"
-
-       # multipath route can not have a nexthop that is a reject route
-       add_route6 "2001:db8:104::/64" "via 2001:db8:101::2"
-       run_cmd "$IP -6 ro append unreachable 2001:db8:104::/64"
-       log_test $? 2 "Append nexthop to existing route - reject route"
-
-       # reject route can not be converted to multipath route
-       run_cmd "$IP -6 ro flush 2001:db8:104::/64"
-       run_cmd "$IP -6 ro add unreachable 2001:db8:104::/64"
-       run_cmd "$IP -6 ro append 2001:db8:104::/64 via 2001:db8:103::2"
-       log_test $? 2 "Append nexthop to existing reject route - gw"
-
-       run_cmd "$IP -6 ro flush 2001:db8:104::/64"
-       run_cmd "$IP -6 ro add unreachable 2001:db8:104::/64"
-       run_cmd "$IP -6 ro append 2001:db8:104::/64 dev veth3"
-       log_test $? 2 "Append nexthop to existing reject route - dev only"
-
        # insert mpath directly
        add_route6 "2001:db8:104::/64" "nexthop via 2001:db8:101::2 nexthop via 2001:db8:103::2"
        check_route6  "2001:db8:104::/64 metric 1024 nexthop via 2001:db8:101::2 dev veth1 weight 1 nexthop via 2001:db8:103::2 dev veth3 weight 1"
@@ -819,13 +791,6 @@ ipv6_rt_replace_single()
        check_route6 "2001:db8:104::/64 metric 1024 nexthop via 2001:db8:101::3 dev veth1 weight 1 nexthop via 2001:db8:103::2 dev veth3 weight 1"
        log_test $? 0 "Single path with multipath"
 
-       # single path with reject
-       #
-       add_initial_route6 "nexthop via 2001:db8:101::2"
-       run_cmd "$IP -6 ro replace unreachable 2001:db8:104::/64"
-       check_route6 "unreachable 2001:db8:104::/64 dev lo metric 1024"
-       log_test $? 0 "Single path with reject route"
-
        # single path with single path using MULTIPATH attribute
        #
        add_initial_route6 "via 2001:db8:101::2"
@@ -873,12 +838,6 @@ ipv6_rt_replace_mpath()
        check_route6 "2001:db8:104::/64 via 2001:db8:101::3 dev veth1 metric 1024"
        log_test $? 0 "Multipath with single path via multipath attribute"
 
-       # multipath with reject
-       add_initial_route6 "nexthop via 2001:db8:101::2 nexthop via 2001:db8:103::2"
-       run_cmd "$IP -6 ro replace unreachable 2001:db8:104::/64"
-       check_route6 "unreachable 2001:db8:104::/64 dev lo metric 1024"
-       log_test $? 0 "Multipath with reject route"
-
        # route replace fails - invalid nexthop 1
        add_initial_route6 "nexthop via 2001:db8:101::2 nexthop via 2001:db8:103::2"
        run_cmd "$IP -6 ro replace 2001:db8:104::/64 nexthop via 2001:db8:111::3 nexthop via 2001:db8:103::3"
index 77f762780199ff1f69f9f6b3f18e72deddb69f5e..e8c5dff448eb0b8d995393004ed1c1e982712bc6 100644 (file)
@@ -402,7 +402,7 @@ int main(int argc, char *argv[])
                exit(1);
        }
 
-       fd = socket(AF_INET6, SOCK_STREAM, 0);
+       fd = socket(cfg_family, SOCK_STREAM, 0);
        if (fd == -1) {
                perror("socket");
                exit(1);
index 792fa4d0285e80e6cd36fdd83e3b5946b0538b3f..850767befa47a5fe7ca4bf4733fa670e55c6bf37 100755 (executable)
@@ -35,9 +35,6 @@ run_udp() {
 
        echo "udp gso"
        run_in_netns ${args} -S
-
-       echo "udp gso zerocopy"
-       run_in_netns ${args} -S -z
 }
 
 run_tcp() {
index 6ccb154cb4aa4f36184811d406ed9f4317647e4f..22f8df1ad7d484418235b6dadd290baca3bf3c6c 100755 (executable)
@@ -7,13 +7,16 @@
 #
 # Released under the terms of the GPL v2.
 
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
 . ./common_tests
 
 if [ -e $REBOOT_FLAG  ]; then
     rm $REBOOT_FLAG
 else
     prlog "pstore_crash_test has not been executed yet. we skip further tests."
-    exit 0
+    exit $ksft_skip
 fi
 
 prlog -n "Mounting pstore filesystem ... "
index c15f270e121d61af0e80b19f8a69d47fe68e88de..65541c21a5444abbad5face676e223c0934de39e 100755 (executable)
@@ -1,6 +1,6 @@
 #!/bin/bash
 #
-# Usage: configinit.sh config-spec-file [ build output dir ]
+# Usage: configinit.sh config-spec-file build-output-dir results-dir
 #
 # Create a .config file from the spec file.  Run from the kernel source tree.
 # Exits with 0 if all went well, with 1 if all went well but the config
@@ -40,20 +40,18 @@ mkdir $T
 
 c=$1
 buildloc=$2
+resdir=$3
 builddir=
-if test -n $buildloc
+if echo $buildloc | grep -q '^O='
 then
-       if echo $buildloc | grep -q '^O='
+       builddir=`echo $buildloc | sed -e 's/^O=//'`
+       if test ! -d $builddir
        then
-               builddir=`echo $buildloc | sed -e 's/^O=//'`
-               if test ! -d $builddir
-               then
-                       mkdir $builddir
-               fi
-       else
-               echo Bad build directory: \"$buildloc\"
-               exit 2
+               mkdir $builddir
        fi
+else
+       echo Bad build directory: \"$buildloc\"
+       exit 2
 fi
 
 sed -e 's/^\(CONFIG[0-9A-Z_]*\)=.*$/grep -v "^# \1" |/' < $c > $T/u.sh
@@ -61,12 +59,12 @@ sed -e 's/^\(CONFIG[0-9A-Z_]*=\).*$/grep -v \1 |/' < $c >> $T/u.sh
 grep '^grep' < $T/u.sh > $T/upd.sh
 echo "cat - $c" >> $T/upd.sh
 make mrproper
-make $buildloc distclean > $builddir/Make.distclean 2>&1
-make $buildloc $TORTURE_DEFCONFIG > $builddir/Make.defconfig.out 2>&1
+make $buildloc distclean > $resdir/Make.distclean 2>&1
+make $buildloc $TORTURE_DEFCONFIG > $resdir/Make.defconfig.out 2>&1
 mv $builddir/.config $builddir/.config.sav
 sh $T/upd.sh < $builddir/.config.sav > $builddir/.config
 cp $builddir/.config $builddir/.config.new
-yes '' | make $buildloc oldconfig > $builddir/Make.oldconfig.out 2> $builddir/Make.oldconfig.err
+yes '' | make $buildloc oldconfig > $resdir/Make.oldconfig.out 2> $resdir/Make.oldconfig.err
 
 # verify new config matches specification.
 configcheck.sh $builddir/.config $c
index 34d126734cde9f1dd3109077e5810d491aa88562..9115fcdb5617cdcb7f72dbcf23bdd523f32528e6 100755 (executable)
@@ -2,7 +2,7 @@
 #
 # Build a kvm-ready Linux kernel from the tree in the current directory.
 #
-# Usage: kvm-build.sh config-template build-dir
+# Usage: kvm-build.sh config-template build-dir resdir
 #
 # This program is free software; you can redistribute it and/or modify
 # it under the terms of the GNU General Public License as published by
@@ -29,6 +29,7 @@ then
        exit 1
 fi
 builddir=${2}
+resdir=${3}
 
 T=${TMPDIR-/tmp}/test-linux.sh.$$
 trap 'rm -rf $T' 0
@@ -41,19 +42,19 @@ CONFIG_VIRTIO_PCI=y
 CONFIG_VIRTIO_CONSOLE=y
 ___EOF___
 
-configinit.sh $T/config O=$builddir
+configinit.sh $T/config O=$builddir $resdir
 retval=$?
 if test $retval -gt 1
 then
        exit 2
 fi
 ncpus=`cpus2use.sh`
-make O=$builddir -j$ncpus $TORTURE_KMAKE_ARG > $builddir/Make.out 2>&1
+make O=$builddir -j$ncpus $TORTURE_KMAKE_ARG > $resdir/Make.out 2>&1
 retval=$?
-if test $retval -ne 0 || grep "rcu[^/]*": < $builddir/Make.out | egrep -q "Stop|Error|error:|warning:" || egrep -q "Stop|Error|error:" < $builddir/Make.out
+if test $retval -ne 0 || grep "rcu[^/]*": < $resdir/Make.out | egrep -q "Stop|Error|error:|warning:" || egrep -q "Stop|Error|error:" < $resdir/Make.out
 then
        echo Kernel build error
-       egrep "Stop|Error|error:|warning:" < $builddir/Make.out
+       egrep "Stop|Error|error:|warning:" < $resdir/Make.out
        echo Run aborted.
        exit 3
 fi
index 477ecb1293ab273ba60094c911031603740c44c3..0fa8a61ccb7b254baa29ea8fdf30b0dd28da2246 100755 (executable)
@@ -70,4 +70,5 @@ else
        else
                print_warning $nclosecalls "Reader Batch close calls in" $(($dur/60)) minute run: $i
        fi
+       echo $nclosecalls "Reader Batch close calls in" $(($dur/60)) minute run: $i > $i/console.log.rcu.diags
 fi
index c27e97824163e3c48a911455ab295d0d6a4dc00d..c9bab57a77ebafe98d01809f19508c7d589ccf97 100755 (executable)
@@ -39,6 +39,7 @@ do
                        head -1 $resdir/log
                fi
                TORTURE_SUITE="`cat $i/../TORTURE_SUITE`"
+               rm -f $i/console.log.*.diags
                kvm-recheck-${TORTURE_SUITE}.sh $i
                if test -f "$i/console.log"
                then
index c5b0f94341d9a686d16b9eb52a3ecc6e5d990fc0..f7247ee00514d97b368e91338721fd8090b8a98c 100755 (executable)
@@ -98,14 +98,15 @@ then
        ln -s $base_resdir/.config $resdir  # for kvm-recheck.sh
        # Arch-independent indicator
        touch $resdir/builtkernel
-elif kvm-build.sh $T/Kc2 $builddir
+elif kvm-build.sh $T/Kc2 $builddir $resdir
 then
        # Had to build a kernel for this test.
        QEMU="`identify_qemu $builddir/vmlinux`"
        BOOT_IMAGE="`identify_boot_image $QEMU`"
-       cp $builddir/Make*.out $resdir
        cp $builddir/vmlinux $resdir
        cp $builddir/.config $resdir
+       cp $builddir/Module.symvers $resdir > /dev/null || :
+       cp $builddir/System.map $resdir > /dev/null || :
        if test -n "$BOOT_IMAGE"
        then
                cp $builddir/$BOOT_IMAGE $resdir
index 56610dbbdf73ffc6e390015a538df89eb6096830..5a7a62d76a50b91234b82fb1455ffc0d66f637a6 100755 (executable)
@@ -347,7 +347,7 @@ function dump(first, pastlast, batchnum)
        print "needqemurun="
        jn=1
        for (j = first; j < pastlast; j++) {
-               builddir=KVM "/b" jn
+               builddir=KVM "/b1"
                cpusr[jn] = cpus[j];
                if (cfrep[cf[j]] == "") {
                        cfr[jn] = cf[j];
index 17293436f5518008e2619c3dec09217eecd7f4d9..84933f6aed77818d7e368f171ad544141578493f 100755 (executable)
@@ -163,6 +163,13 @@ then
        print_warning Summary: $summary
        cat $T.diags >> $file.diags
 fi
+for i in $file.*.diags
+do
+       if test -f "$i"
+       then
+               cat $i >> $file.diags
+       fi
+done
 if ! test -s $file.diags
 then
        rm -f $file.diags
index 5d2cc0bd50a0963efb0348d264114f60b30d91a8..5c3213cc3ad707feabd47dd691a42cecd5003b4a 100644 (file)
@@ -1,5 +1,5 @@
-rcutorture.onoff_interval=1 rcutorture.onoff_holdoff=30
-rcutree.gp_preinit_delay=3
+rcutorture.onoff_interval=200 rcutorture.onoff_holdoff=30
+rcutree.gp_preinit_delay=12
 rcutree.gp_init_delay=3
 rcutree.gp_cleanup_delay=3
 rcutree.kthread_prio=2
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE08-T.boot b/tools/testing/selftests/rcutorture/configs/rcu/TREE08-T.boot
deleted file mode 100644 (file)
index 883149b..0000000
+++ /dev/null
@@ -1 +0,0 @@
-rcutree.rcu_fanout_exact=1
index 24ec910419576879ed48a75e448cebc57c13aece..7bab8246392bb21f982e3803f77860717513a084 100644 (file)
@@ -39,7 +39,7 @@ rcutorture_param_onoff () {
        if ! bootparam_hotplug_cpu "$1" && configfrag_hotplug_cpu "$2"
        then
                echo CPU-hotplug kernel, adding rcutorture onoff. 1>&2
-               echo rcutorture.onoff_interval=3 rcutorture.onoff_holdoff=30
+               echo rcutorture.onoff_interval=1000 rcutorture.onoff_holdoff=30
        fi
 }
 
index 6a9f602a8718691b086b08544e41aa0a17667e18..91cf50b2b0e596c6e1930dbcfe9f0dd9111ad014 100644 (file)
@@ -90,6 +90,30 @@ unsigned int yield_mod_cnt, nr_abort;
 #error "Unsupported architecture"
 #endif
 
+#elif defined(__s390__)
+
+#define RSEQ_INJECT_INPUT \
+       , [loop_cnt_1]"m"(loop_cnt[1]) \
+       , [loop_cnt_2]"m"(loop_cnt[2]) \
+       , [loop_cnt_3]"m"(loop_cnt[3]) \
+       , [loop_cnt_4]"m"(loop_cnt[4]) \
+       , [loop_cnt_5]"m"(loop_cnt[5]) \
+       , [loop_cnt_6]"m"(loop_cnt[6])
+
+#define INJECT_ASM_REG "r12"
+
+#define RSEQ_INJECT_CLOBBER \
+       , INJECT_ASM_REG
+
+#define RSEQ_INJECT_ASM(n) \
+       "l %%" INJECT_ASM_REG ", %[loop_cnt_" #n "]\n\t" \
+       "ltr %%" INJECT_ASM_REG ", %%" INJECT_ASM_REG "\n\t" \
+       "je 333f\n\t" \
+       "222:\n\t" \
+       "ahi %%" INJECT_ASM_REG ", -1\n\t" \
+       "jnz 222b\n\t" \
+       "333:\n\t"
+
 #elif defined(__ARMEL__)
 
 #define RSEQ_INJECT_INPUT \
@@ -137,6 +161,30 @@ unsigned int yield_mod_cnt, nr_abort;
        "subic. %%" INJECT_ASM_REG ", %%" INJECT_ASM_REG ", 1\n\t" \
        "bne 222b\n\t" \
        "333:\n\t"
+
+#elif defined(__mips__)
+
+#define RSEQ_INJECT_INPUT \
+       , [loop_cnt_1]"m"(loop_cnt[1]) \
+       , [loop_cnt_2]"m"(loop_cnt[2]) \
+       , [loop_cnt_3]"m"(loop_cnt[3]) \
+       , [loop_cnt_4]"m"(loop_cnt[4]) \
+       , [loop_cnt_5]"m"(loop_cnt[5]) \
+       , [loop_cnt_6]"m"(loop_cnt[6])
+
+#define INJECT_ASM_REG "$5"
+
+#define RSEQ_INJECT_CLOBBER \
+       , INJECT_ASM_REG
+
+#define RSEQ_INJECT_ASM(n) \
+       "lw " INJECT_ASM_REG ", %[loop_cnt_" #n "]\n\t" \
+       "beqz " INJECT_ASM_REG ", 333f\n\t" \
+       "222:\n\t" \
+       "addiu " INJECT_ASM_REG ", -1\n\t" \
+       "bnez " INJECT_ASM_REG ", 222b\n\t" \
+       "333:\n\t"
+
 #else
 #error unsupported target
 #endif
index 3b055f9aeaab56bcbe91f9bc493ac2d08c527074..3cea19877227a03c4c501bffa6a687cbe32ad126 100644 (file)
@@ -57,6 +57,7 @@ do {                                                                  \
 #define __RSEQ_ASM_DEFINE_ABORT(table_label, label, teardown,          \
                                abort_label, version, flags,            \
                                start_ip, post_commit_offset, abort_ip) \
+               ".balign 32\n\t"                                        \
                __rseq_str(table_label) ":\n\t"                         \
                ".word " __rseq_str(version) ", " __rseq_str(flags) "\n\t" \
                ".word " __rseq_str(start_ip) ", 0x0, " __rseq_str(post_commit_offset) ", 0x0, " __rseq_str(abort_ip) ", 0x0\n\t" \
diff --git a/tools/testing/selftests/rseq/rseq-mips.h b/tools/testing/selftests/rseq/rseq-mips.h
new file mode 100644 (file)
index 0000000..7f48ecf
--- /dev/null
@@ -0,0 +1,725 @@
+/* SPDX-License-Identifier: LGPL-2.1 OR MIT */
+/*
+ * Author: Paul Burton <paul.burton@mips.com>
+ * (C) Copyright 2018 MIPS Tech LLC
+ *
+ * Based on rseq-arm.h:
+ * (C) Copyright 2016-2018 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#define RSEQ_SIG       0x53053053
+
+#define rseq_smp_mb()  __asm__ __volatile__ ("sync" ::: "memory")
+#define rseq_smp_rmb() rseq_smp_mb()
+#define rseq_smp_wmb() rseq_smp_mb()
+
+#define rseq_smp_load_acquire(p)                                       \
+__extension__ ({                                                       \
+       __typeof(*p) ____p1 = RSEQ_READ_ONCE(*p);                       \
+       rseq_smp_mb();                                                  \
+       ____p1;                                                         \
+})
+
+#define rseq_smp_acquire__after_ctrl_dep()     rseq_smp_rmb()
+
+#define rseq_smp_store_release(p, v)                                   \
+do {                                                                   \
+       rseq_smp_mb();                                                  \
+       RSEQ_WRITE_ONCE(*p, v);                                         \
+} while (0)
+
+#ifdef RSEQ_SKIP_FASTPATH
+#include "rseq-skip.h"
+#else /* !RSEQ_SKIP_FASTPATH */
+
+#if _MIPS_SZLONG == 64
+# define LONG                  ".dword"
+# define LONG_LA               "dla"
+# define LONG_L                        "ld"
+# define LONG_S                        "sd"
+# define LONG_ADDI             "daddiu"
+# define U32_U64_PAD(x)                x
+#elif _MIPS_SZLONG == 32
+# define LONG                  ".word"
+# define LONG_LA               "la"
+# define LONG_L                        "lw"
+# define LONG_S                        "sw"
+# define LONG_ADDI             "addiu"
+# ifdef __BIG_ENDIAN
+#  define U32_U64_PAD(x)       "0x0, " x
+# else
+#  define U32_U64_PAD(x)       x ", 0x0"
+# endif
+#else
+# error unsupported _MIPS_SZLONG
+#endif
+
+#define __RSEQ_ASM_DEFINE_TABLE(version, flags,        start_ip, \
+                               post_commit_offset, abort_ip) \
+               ".pushsection __rseq_table, \"aw\"\n\t" \
+               ".balign 32\n\t" \
+               ".word " __rseq_str(version) ", " __rseq_str(flags) "\n\t" \
+               LONG " " U32_U64_PAD(__rseq_str(start_ip)) "\n\t" \
+               LONG " " U32_U64_PAD(__rseq_str(post_commit_offset)) "\n\t" \
+               LONG " " U32_U64_PAD(__rseq_str(abort_ip)) "\n\t" \
+               ".popsection\n\t"
+
+#define RSEQ_ASM_DEFINE_TABLE(start_ip, post_commit_ip, abort_ip) \
+       __RSEQ_ASM_DEFINE_TABLE(0x0, 0x0, start_ip, \
+                               (post_commit_ip - start_ip), abort_ip)
+
+#define RSEQ_ASM_STORE_RSEQ_CS(label, cs_label, rseq_cs) \
+               RSEQ_INJECT_ASM(1) \
+               LONG_LA " $4, " __rseq_str(cs_label) "\n\t" \
+               LONG_S  " $4, %[" __rseq_str(rseq_cs) "]\n\t" \
+               __rseq_str(label) ":\n\t"
+
+#define RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, label) \
+               RSEQ_INJECT_ASM(2) \
+               "lw  $4, %[" __rseq_str(current_cpu_id) "]\n\t" \
+               "bne $4, %[" __rseq_str(cpu_id) "], " __rseq_str(label) "\n\t"
+
+#define __RSEQ_ASM_DEFINE_ABORT(table_label, label, teardown, \
+                               abort_label, version, flags, \
+                               start_ip, post_commit_offset, abort_ip) \
+               ".balign 32\n\t" \
+               __rseq_str(table_label) ":\n\t" \
+               ".word " __rseq_str(version) ", " __rseq_str(flags) "\n\t" \
+               LONG " " U32_U64_PAD(__rseq_str(start_ip)) "\n\t" \
+               LONG " " U32_U64_PAD(__rseq_str(post_commit_offset)) "\n\t" \
+               LONG " " U32_U64_PAD(__rseq_str(abort_ip)) "\n\t" \
+               ".word " __rseq_str(RSEQ_SIG) "\n\t" \
+               __rseq_str(label) ":\n\t" \
+               teardown \
+               "b %l[" __rseq_str(abort_label) "]\n\t"
+
+#define RSEQ_ASM_DEFINE_ABORT(table_label, label, teardown, abort_label, \
+                             start_ip, post_commit_ip, abort_ip) \
+       __RSEQ_ASM_DEFINE_ABORT(table_label, label, teardown, \
+                               abort_label, 0x0, 0x0, start_ip, \
+                               (post_commit_ip - start_ip), abort_ip)
+
+#define RSEQ_ASM_DEFINE_CMPFAIL(label, teardown, cmpfail_label) \
+               __rseq_str(label) ":\n\t" \
+               teardown \
+               "b %l[" __rseq_str(cmpfail_label) "]\n\t"
+
+#define rseq_workaround_gcc_asm_size_guess()   __asm__ __volatile__("")
+
+static inline __attribute__((always_inline))
+int rseq_cmpeqv_storev(intptr_t *v, intptr_t expect, intptr_t newv, int cpu)
+{
+       RSEQ_INJECT_C(9)
+
+       rseq_workaround_gcc_asm_size_guess();
+       __asm__ __volatile__ goto (
+               RSEQ_ASM_DEFINE_TABLE(1f, 2f, 4f) /* start, commit, abort */
+               /* Start rseq by storing table entry pointer into rseq_cs. */
+               RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs)
+               RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+               RSEQ_INJECT_ASM(3)
+               LONG_L " $4, %[v]\n\t"
+               "bne $4, %[expect], %l[cmpfail]\n\t"
+               RSEQ_INJECT_ASM(4)
+#ifdef RSEQ_COMPARE_TWICE
+               RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+               LONG_L " $4, %[v]\n\t"
+               "bne $4, %[expect], %l[error2]\n\t"
+#endif
+               /* final store */
+               LONG_S " %[newv], %[v]\n\t"
+               "2:\n\t"
+               RSEQ_INJECT_ASM(5)
+               "b 5f\n\t"
+               RSEQ_ASM_DEFINE_ABORT(3, 4, "", abort, 1b, 2b, 4f)
+               "5:\n\t"
+               : /* gcc asm goto does not allow outputs */
+               : [cpu_id]              "r" (cpu),
+                 [current_cpu_id]      "m" (__rseq_abi.cpu_id),
+                 [rseq_cs]             "m" (__rseq_abi.rseq_cs),
+                 [v]                   "m" (*v),
+                 [expect]              "r" (expect),
+                 [newv]                "r" (newv)
+                 RSEQ_INJECT_INPUT
+               : "$4", "memory"
+                 RSEQ_INJECT_CLOBBER
+               : abort, cmpfail
+#ifdef RSEQ_COMPARE_TWICE
+                 , error1, error2
+#endif
+       );
+       rseq_workaround_gcc_asm_size_guess();
+       return 0;
+abort:
+       rseq_workaround_gcc_asm_size_guess();
+       RSEQ_INJECT_FAILED
+       return -1;
+cmpfail:
+       rseq_workaround_gcc_asm_size_guess();
+       return 1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+       rseq_bug("cpu_id comparison failed");
+error2:
+       rseq_bug("expected value comparison failed");
+#endif
+}
+
+static inline __attribute__((always_inline))
+int rseq_cmpnev_storeoffp_load(intptr_t *v, intptr_t expectnot,
+                              off_t voffp, intptr_t *load, int cpu)
+{
+       RSEQ_INJECT_C(9)
+
+       rseq_workaround_gcc_asm_size_guess();
+       __asm__ __volatile__ goto (
+               RSEQ_ASM_DEFINE_TABLE(1f, 2f, 4f) /* start, commit, abort */
+               /* Start rseq by storing table entry pointer into rseq_cs. */
+               RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs)
+               RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+               RSEQ_INJECT_ASM(3)
+               LONG_L " $4, %[v]\n\t"
+               "beq $4, %[expectnot], %l[cmpfail]\n\t"
+               RSEQ_INJECT_ASM(4)
+#ifdef RSEQ_COMPARE_TWICE
+               RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+               LONG_L " $4, %[v]\n\t"
+               "beq $4, %[expectnot], %l[error2]\n\t"
+#endif
+               LONG_S " $4, %[load]\n\t"
+               LONG_ADDI " $4, %[voffp]\n\t"
+               LONG_L " $4, 0($4)\n\t"
+               /* final store */
+               LONG_S " $4, %[v]\n\t"
+               "2:\n\t"
+               RSEQ_INJECT_ASM(5)
+               "b 5f\n\t"
+               RSEQ_ASM_DEFINE_ABORT(3, 4, "", abort, 1b, 2b, 4f)
+               "5:\n\t"
+               : /* gcc asm goto does not allow outputs */
+               : [cpu_id]              "r" (cpu),
+                 [current_cpu_id]      "m" (__rseq_abi.cpu_id),
+                 [rseq_cs]             "m" (__rseq_abi.rseq_cs),
+                 /* final store input */
+                 [v]                   "m" (*v),
+                 [expectnot]           "r" (expectnot),
+                 [voffp]               "Ir" (voffp),
+                 [load]                "m" (*load)
+                 RSEQ_INJECT_INPUT
+               : "$4", "memory"
+                 RSEQ_INJECT_CLOBBER
+               : abort, cmpfail
+#ifdef RSEQ_COMPARE_TWICE
+                 , error1, error2
+#endif
+       );
+       rseq_workaround_gcc_asm_size_guess();
+       return 0;
+abort:
+       rseq_workaround_gcc_asm_size_guess();
+       RSEQ_INJECT_FAILED
+       return -1;
+cmpfail:
+       rseq_workaround_gcc_asm_size_guess();
+       return 1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+       rseq_bug("cpu_id comparison failed");
+error2:
+       rseq_bug("expected value comparison failed");
+#endif
+}
+
+static inline __attribute__((always_inline))
+int rseq_addv(intptr_t *v, intptr_t count, int cpu)
+{
+       RSEQ_INJECT_C(9)
+
+       rseq_workaround_gcc_asm_size_guess();
+       __asm__ __volatile__ goto (
+               RSEQ_ASM_DEFINE_TABLE(1f, 2f, 4f) /* start, commit, abort */
+               /* Start rseq by storing table entry pointer into rseq_cs. */
+               RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs)
+               RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+               RSEQ_INJECT_ASM(3)
+#ifdef RSEQ_COMPARE_TWICE
+               RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+#endif
+               LONG_L " $4, %[v]\n\t"
+               LONG_ADDI " $4, %[count]\n\t"
+               /* final store */
+               LONG_S " $4, %[v]\n\t"
+               "2:\n\t"
+               RSEQ_INJECT_ASM(4)
+               "b 5f\n\t"
+               RSEQ_ASM_DEFINE_ABORT(3, 4, "", abort, 1b, 2b, 4f)
+               "5:\n\t"
+               : /* gcc asm goto does not allow outputs */
+               : [cpu_id]              "r" (cpu),
+                 [current_cpu_id]      "m" (__rseq_abi.cpu_id),
+                 [rseq_cs]             "m" (__rseq_abi.rseq_cs),
+                 [v]                   "m" (*v),
+                 [count]               "Ir" (count)
+                 RSEQ_INJECT_INPUT
+               : "$4", "memory"
+                 RSEQ_INJECT_CLOBBER
+               : abort
+#ifdef RSEQ_COMPARE_TWICE
+                 , error1
+#endif
+       );
+       rseq_workaround_gcc_asm_size_guess();
+       return 0;
+abort:
+       rseq_workaround_gcc_asm_size_guess();
+       RSEQ_INJECT_FAILED
+       return -1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+       rseq_bug("cpu_id comparison failed");
+#endif
+}
+
+static inline __attribute__((always_inline))
+int rseq_cmpeqv_trystorev_storev(intptr_t *v, intptr_t expect,
+                                intptr_t *v2, intptr_t newv2,
+                                intptr_t newv, int cpu)
+{
+       RSEQ_INJECT_C(9)
+
+       rseq_workaround_gcc_asm_size_guess();
+       __asm__ __volatile__ goto (
+               RSEQ_ASM_DEFINE_TABLE(1f, 2f, 4f) /* start, commit, abort */
+               /* Start rseq by storing table entry pointer into rseq_cs. */
+               RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs)
+               RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+               RSEQ_INJECT_ASM(3)
+               LONG_L " $4, %[v]\n\t"
+               "bne $4, %[expect], %l[cmpfail]\n\t"
+               RSEQ_INJECT_ASM(4)
+#ifdef RSEQ_COMPARE_TWICE
+               RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+               LONG_L " $4, %[v]\n\t"
+               "bne $4, %[expect], %l[error2]\n\t"
+#endif
+               /* try store */
+               LONG_S " %[newv2], %[v2]\n\t"
+               RSEQ_INJECT_ASM(5)
+               /* final store */
+               LONG_S " %[newv], %[v]\n\t"
+               "2:\n\t"
+               RSEQ_INJECT_ASM(6)
+               "b 5f\n\t"
+               RSEQ_ASM_DEFINE_ABORT(3, 4, "", abort, 1b, 2b, 4f)
+               "5:\n\t"
+               : /* gcc asm goto does not allow outputs */
+               : [cpu_id]              "r" (cpu),
+                 [current_cpu_id]      "m" (__rseq_abi.cpu_id),
+                 [rseq_cs]             "m" (__rseq_abi.rseq_cs),
+                 /* try store input */
+                 [v2]                  "m" (*v2),
+                 [newv2]               "r" (newv2),
+                 /* final store input */
+                 [v]                   "m" (*v),
+                 [expect]              "r" (expect),
+                 [newv]                "r" (newv)
+                 RSEQ_INJECT_INPUT
+               : "$4", "memory"
+                 RSEQ_INJECT_CLOBBER
+               : abort, cmpfail
+#ifdef RSEQ_COMPARE_TWICE
+                 , error1, error2
+#endif
+       );
+       rseq_workaround_gcc_asm_size_guess();
+       return 0;
+abort:
+       rseq_workaround_gcc_asm_size_guess();
+       RSEQ_INJECT_FAILED
+       return -1;
+cmpfail:
+       rseq_workaround_gcc_asm_size_guess();
+       return 1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+       rseq_bug("cpu_id comparison failed");
+error2:
+       rseq_bug("expected value comparison failed");
+#endif
+}
+
+static inline __attribute__((always_inline))
+int rseq_cmpeqv_trystorev_storev_release(intptr_t *v, intptr_t expect,
+                                        intptr_t *v2, intptr_t newv2,
+                                        intptr_t newv, int cpu)
+{
+       RSEQ_INJECT_C(9)
+
+       rseq_workaround_gcc_asm_size_guess();
+       __asm__ __volatile__ goto (
+               RSEQ_ASM_DEFINE_TABLE(1f, 2f, 4f) /* start, commit, abort */
+               /* Start rseq by storing table entry pointer into rseq_cs. */
+               RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs)
+               RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+               RSEQ_INJECT_ASM(3)
+               LONG_L " $4, %[v]\n\t"
+               "bne $4, %[expect], %l[cmpfail]\n\t"
+               RSEQ_INJECT_ASM(4)
+#ifdef RSEQ_COMPARE_TWICE
+               RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+               LONG_L " $4, %[v]\n\t"
+               "bne $4, %[expect], %l[error2]\n\t"
+#endif
+               /* try store */
+               LONG_S " %[newv2], %[v2]\n\t"
+               RSEQ_INJECT_ASM(5)
+               "sync\n\t"      /* full sync provides store-release */
+               /* final store */
+               LONG_S " %[newv], %[v]\n\t"
+               "2:\n\t"
+               RSEQ_INJECT_ASM(6)
+               "b 5f\n\t"
+               RSEQ_ASM_DEFINE_ABORT(3, 4, "", abort, 1b, 2b, 4f)
+               "5:\n\t"
+               : /* gcc asm goto does not allow outputs */
+               : [cpu_id]              "r" (cpu),
+                 [current_cpu_id]      "m" (__rseq_abi.cpu_id),
+                 [rseq_cs]             "m" (__rseq_abi.rseq_cs),
+                 /* try store input */
+                 [v2]                  "m" (*v2),
+                 [newv2]               "r" (newv2),
+                 /* final store input */
+                 [v]                   "m" (*v),
+                 [expect]              "r" (expect),
+                 [newv]                "r" (newv)
+                 RSEQ_INJECT_INPUT
+               : "$4", "memory"
+                 RSEQ_INJECT_CLOBBER
+               : abort, cmpfail
+#ifdef RSEQ_COMPARE_TWICE
+                 , error1, error2
+#endif
+       );
+       rseq_workaround_gcc_asm_size_guess();
+       return 0;
+abort:
+       rseq_workaround_gcc_asm_size_guess();
+       RSEQ_INJECT_FAILED
+       return -1;
+cmpfail:
+       rseq_workaround_gcc_asm_size_guess();
+       return 1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+       rseq_bug("cpu_id comparison failed");
+error2:
+       rseq_bug("expected value comparison failed");
+#endif
+}
+
+static inline __attribute__((always_inline))
+int rseq_cmpeqv_cmpeqv_storev(intptr_t *v, intptr_t expect,
+                             intptr_t *v2, intptr_t expect2,
+                             intptr_t newv, int cpu)
+{
+       RSEQ_INJECT_C(9)
+
+       rseq_workaround_gcc_asm_size_guess();
+       __asm__ __volatile__ goto (
+               RSEQ_ASM_DEFINE_TABLE(1f, 2f, 4f) /* start, commit, abort */
+               /* Start rseq by storing table entry pointer into rseq_cs. */
+               RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs)
+               RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+               RSEQ_INJECT_ASM(3)
+               LONG_L " $4, %[v]\n\t"
+               "bne $4, %[expect], %l[cmpfail]\n\t"
+               RSEQ_INJECT_ASM(4)
+               LONG_L " $4, %[v2]\n\t"
+               "bne $4, %[expect2], %l[cmpfail]\n\t"
+               RSEQ_INJECT_ASM(5)
+#ifdef RSEQ_COMPARE_TWICE
+               RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+               LONG_L " $4, %[v]\n\t"
+               "bne $4, %[expect], %l[error2]\n\t"
+               LONG_L " $4, %[v2]\n\t"
+               "bne $4, %[expect2], %l[error3]\n\t"
+#endif
+               /* final store */
+               LONG_S " %[newv], %[v]\n\t"
+               "2:\n\t"
+               RSEQ_INJECT_ASM(6)
+               "b 5f\n\t"
+               RSEQ_ASM_DEFINE_ABORT(3, 4, "", abort, 1b, 2b, 4f)
+               "5:\n\t"
+               : /* gcc asm goto does not allow outputs */
+               : [cpu_id]              "r" (cpu),
+                 [current_cpu_id]      "m" (__rseq_abi.cpu_id),
+                 [rseq_cs]             "m" (__rseq_abi.rseq_cs),
+                 /* cmp2 input */
+                 [v2]                  "m" (*v2),
+                 [expect2]             "r" (expect2),
+                 /* final store input */
+                 [v]                   "m" (*v),
+                 [expect]              "r" (expect),
+                 [newv]                "r" (newv)
+                 RSEQ_INJECT_INPUT
+               : "$4", "memory"
+                 RSEQ_INJECT_CLOBBER
+               : abort, cmpfail
+#ifdef RSEQ_COMPARE_TWICE
+                 , error1, error2, error3
+#endif
+       );
+       rseq_workaround_gcc_asm_size_guess();
+       return 0;
+abort:
+       rseq_workaround_gcc_asm_size_guess();
+       RSEQ_INJECT_FAILED
+       return -1;
+cmpfail:
+       rseq_workaround_gcc_asm_size_guess();
+       return 1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+       rseq_bug("cpu_id comparison failed");
+error2:
+       rseq_bug("1st expected value comparison failed");
+error3:
+       rseq_bug("2nd expected value comparison failed");
+#endif
+}
+
+static inline __attribute__((always_inline))
+int rseq_cmpeqv_trymemcpy_storev(intptr_t *v, intptr_t expect,
+                                void *dst, void *src, size_t len,
+                                intptr_t newv, int cpu)
+{
+       uintptr_t rseq_scratch[3];
+
+       RSEQ_INJECT_C(9)
+
+       rseq_workaround_gcc_asm_size_guess();
+       __asm__ __volatile__ goto (
+               RSEQ_ASM_DEFINE_TABLE(1f, 2f, 4f) /* start, commit, abort */
+               LONG_S " %[src], %[rseq_scratch0]\n\t"
+               LONG_S "  %[dst], %[rseq_scratch1]\n\t"
+               LONG_S " %[len], %[rseq_scratch2]\n\t"
+               /* Start rseq by storing table entry pointer into rseq_cs. */
+               RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs)
+               RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+               RSEQ_INJECT_ASM(3)
+               LONG_L " $4, %[v]\n\t"
+               "bne $4, %[expect], 5f\n\t"
+               RSEQ_INJECT_ASM(4)
+#ifdef RSEQ_COMPARE_TWICE
+               RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 6f)
+               LONG_L " $4, %[v]\n\t"
+               "bne $4, %[expect], 7f\n\t"
+#endif
+               /* try memcpy */
+               "beqz %[len], 333f\n\t" \
+               "222:\n\t" \
+               "lb   $4, 0(%[src])\n\t" \
+               "sb   $4, 0(%[dst])\n\t" \
+               LONG_ADDI " %[src], 1\n\t" \
+               LONG_ADDI " %[dst], 1\n\t" \
+               LONG_ADDI " %[len], -1\n\t" \
+               "bnez %[len], 222b\n\t" \
+               "333:\n\t" \
+               RSEQ_INJECT_ASM(5)
+               /* final store */
+               LONG_S " %[newv], %[v]\n\t"
+               "2:\n\t"
+               RSEQ_INJECT_ASM(6)
+               /* teardown */
+               LONG_L " %[len], %[rseq_scratch2]\n\t"
+               LONG_L " %[dst], %[rseq_scratch1]\n\t"
+               LONG_L " %[src], %[rseq_scratch0]\n\t"
+               "b 8f\n\t"
+               RSEQ_ASM_DEFINE_ABORT(3, 4,
+                                     /* teardown */
+                                     LONG_L " %[len], %[rseq_scratch2]\n\t"
+                                     LONG_L " %[dst], %[rseq_scratch1]\n\t"
+                                     LONG_L " %[src], %[rseq_scratch0]\n\t",
+                                     abort, 1b, 2b, 4f)
+               RSEQ_ASM_DEFINE_CMPFAIL(5,
+                                       /* teardown */
+                                       LONG_L " %[len], %[rseq_scratch2]\n\t"
+                                       LONG_L " %[dst], %[rseq_scratch1]\n\t"
+                                       LONG_L " %[src], %[rseq_scratch0]\n\t",
+                                       cmpfail)
+#ifdef RSEQ_COMPARE_TWICE
+               RSEQ_ASM_DEFINE_CMPFAIL(6,
+                                       /* teardown */
+                                       LONG_L " %[len], %[rseq_scratch2]\n\t"
+                                       LONG_L " %[dst], %[rseq_scratch1]\n\t"
+                                       LONG_L " %[src], %[rseq_scratch0]\n\t",
+                                       error1)
+               RSEQ_ASM_DEFINE_CMPFAIL(7,
+                                       /* teardown */
+                                       LONG_L " %[len], %[rseq_scratch2]\n\t"
+                                       LONG_L " %[dst], %[rseq_scratch1]\n\t"
+                                       LONG_L " %[src], %[rseq_scratch0]\n\t",
+                                       error2)
+#endif
+               "8:\n\t"
+               : /* gcc asm goto does not allow outputs */
+               : [cpu_id]              "r" (cpu),
+                 [current_cpu_id]      "m" (__rseq_abi.cpu_id),
+                 [rseq_cs]             "m" (__rseq_abi.rseq_cs),
+                 /* final store input */
+                 [v]                   "m" (*v),
+                 [expect]              "r" (expect),
+                 [newv]                "r" (newv),
+                 /* try memcpy input */
+                 [dst]                 "r" (dst),
+                 [src]                 "r" (src),
+                 [len]                 "r" (len),
+                 [rseq_scratch0]       "m" (rseq_scratch[0]),
+                 [rseq_scratch1]       "m" (rseq_scratch[1]),
+                 [rseq_scratch2]       "m" (rseq_scratch[2])
+                 RSEQ_INJECT_INPUT
+               : "$4", "memory"
+                 RSEQ_INJECT_CLOBBER
+               : abort, cmpfail
+#ifdef RSEQ_COMPARE_TWICE
+                 , error1, error2
+#endif
+       );
+       rseq_workaround_gcc_asm_size_guess();
+       return 0;
+abort:
+       rseq_workaround_gcc_asm_size_guess();
+       RSEQ_INJECT_FAILED
+       return -1;
+cmpfail:
+       rseq_workaround_gcc_asm_size_guess();
+       return 1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+       rseq_workaround_gcc_asm_size_guess();
+       rseq_bug("cpu_id comparison failed");
+error2:
+       rseq_workaround_gcc_asm_size_guess();
+       rseq_bug("expected value comparison failed");
+#endif
+}
+
+static inline __attribute__((always_inline))
+int rseq_cmpeqv_trymemcpy_storev_release(intptr_t *v, intptr_t expect,
+                                        void *dst, void *src, size_t len,
+                                        intptr_t newv, int cpu)
+{
+       uintptr_t rseq_scratch[3];
+
+       RSEQ_INJECT_C(9)
+
+       rseq_workaround_gcc_asm_size_guess();
+       __asm__ __volatile__ goto (
+               RSEQ_ASM_DEFINE_TABLE(1f, 2f, 4f) /* start, commit, abort */
+               LONG_S " %[src], %[rseq_scratch0]\n\t"
+               LONG_S " %[dst], %[rseq_scratch1]\n\t"
+               LONG_S " %[len], %[rseq_scratch2]\n\t"
+               /* Start rseq by storing table entry pointer into rseq_cs. */
+               RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs)
+               RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+               RSEQ_INJECT_ASM(3)
+               LONG_L " $4, %[v]\n\t"
+               "bne $4, %[expect], 5f\n\t"
+               RSEQ_INJECT_ASM(4)
+#ifdef RSEQ_COMPARE_TWICE
+               RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 6f)
+               LONG_L " $4, %[v]\n\t"
+               "bne $4, %[expect], 7f\n\t"
+#endif
+               /* try memcpy */
+               "beqz %[len], 333f\n\t" \
+               "222:\n\t" \
+               "lb   $4, 0(%[src])\n\t" \
+               "sb   $4, 0(%[dst])\n\t" \
+               LONG_ADDI " %[src], 1\n\t" \
+               LONG_ADDI " %[dst], 1\n\t" \
+               LONG_ADDI " %[len], -1\n\t" \
+               "bnez %[len], 222b\n\t" \
+               "333:\n\t" \
+               RSEQ_INJECT_ASM(5)
+               "sync\n\t"      /* full sync provides store-release */
+               /* final store */
+               LONG_S " %[newv], %[v]\n\t"
+               "2:\n\t"
+               RSEQ_INJECT_ASM(6)
+               /* teardown */
+               LONG_L " %[len], %[rseq_scratch2]\n\t"
+               LONG_L " %[dst], %[rseq_scratch1]\n\t"
+               LONG_L " %[src], %[rseq_scratch0]\n\t"
+               "b 8f\n\t"
+               RSEQ_ASM_DEFINE_ABORT(3, 4,
+                                     /* teardown */
+                                     LONG_L " %[len], %[rseq_scratch2]\n\t"
+                                     LONG_L " %[dst], %[rseq_scratch1]\n\t"
+                                     LONG_L " %[src], %[rseq_scratch0]\n\t",
+                                     abort, 1b, 2b, 4f)
+               RSEQ_ASM_DEFINE_CMPFAIL(5,
+                                       /* teardown */
+                                       LONG_L " %[len], %[rseq_scratch2]\n\t"
+                                       LONG_L " %[dst], %[rseq_scratch1]\n\t"
+                                       LONG_L " %[src], %[rseq_scratch0]\n\t",
+                                       cmpfail)
+#ifdef RSEQ_COMPARE_TWICE
+               RSEQ_ASM_DEFINE_CMPFAIL(6,
+                                       /* teardown */
+                                       LONG_L " %[len], %[rseq_scratch2]\n\t"
+                                       LONG_L " %[dst], %[rseq_scratch1]\n\t"
+                                       LONG_L " %[src], %[rseq_scratch0]\n\t",
+                                       error1)
+               RSEQ_ASM_DEFINE_CMPFAIL(7,
+                                       /* teardown */
+                                       LONG_L " %[len], %[rseq_scratch2]\n\t"
+                                       LONG_L " %[dst], %[rseq_scratch1]\n\t"
+                                       LONG_L " %[src], %[rseq_scratch0]\n\t",
+                                       error2)
+#endif
+               "8:\n\t"
+               : /* gcc asm goto does not allow outputs */
+               : [cpu_id]              "r" (cpu),
+                 [current_cpu_id]      "m" (__rseq_abi.cpu_id),
+                 [rseq_cs]             "m" (__rseq_abi.rseq_cs),
+                 /* final store input */
+                 [v]                   "m" (*v),
+                 [expect]              "r" (expect),
+                 [newv]                "r" (newv),
+                 /* try memcpy input */
+                 [dst]                 "r" (dst),
+                 [src]                 "r" (src),
+                 [len]                 "r" (len),
+                 [rseq_scratch0]       "m" (rseq_scratch[0]),
+                 [rseq_scratch1]       "m" (rseq_scratch[1]),
+                 [rseq_scratch2]       "m" (rseq_scratch[2])
+                 RSEQ_INJECT_INPUT
+               : "$4", "memory"
+                 RSEQ_INJECT_CLOBBER
+               : abort, cmpfail
+#ifdef RSEQ_COMPARE_TWICE
+                 , error1, error2
+#endif
+       );
+       rseq_workaround_gcc_asm_size_guess();
+       return 0;
+abort:
+       rseq_workaround_gcc_asm_size_guess();
+       RSEQ_INJECT_FAILED
+       return -1;
+cmpfail:
+       rseq_workaround_gcc_asm_size_guess();
+       return 1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+       rseq_workaround_gcc_asm_size_guess();
+       rseq_bug("cpu_id comparison failed");
+error2:
+       rseq_workaround_gcc_asm_size_guess();
+       rseq_bug("expected value comparison failed");
+#endif
+}
+
+#endif /* !RSEQ_SKIP_FASTPATH */
diff --git a/tools/testing/selftests/rseq/rseq-s390.h b/tools/testing/selftests/rseq/rseq-s390.h
new file mode 100644 (file)
index 0000000..1069e85
--- /dev/null
@@ -0,0 +1,513 @@
+/* SPDX-License-Identifier: LGPL-2.1 OR MIT */
+
+#define RSEQ_SIG       0x53053053
+
+#define rseq_smp_mb()  __asm__ __volatile__ ("bcr 15,0" ::: "memory")
+#define rseq_smp_rmb() rseq_smp_mb()
+#define rseq_smp_wmb() rseq_smp_mb()
+
+#define rseq_smp_load_acquire(p)                                       \
+__extension__ ({                                                       \
+       __typeof(*p) ____p1 = RSEQ_READ_ONCE(*p);                       \
+       rseq_barrier();                                                 \
+       ____p1;                                                         \
+})
+
+#define rseq_smp_acquire__after_ctrl_dep()     rseq_smp_rmb()
+
+#define rseq_smp_store_release(p, v)                                   \
+do {                                                                   \
+       rseq_barrier();                                                 \
+       RSEQ_WRITE_ONCE(*p, v);                                         \
+} while (0)
+
+#ifdef RSEQ_SKIP_FASTPATH
+#include "rseq-skip.h"
+#else /* !RSEQ_SKIP_FASTPATH */
+
+#ifdef __s390x__
+
+#define LONG_L                 "lg"
+#define LONG_S                 "stg"
+#define LONG_LT_R              "ltgr"
+#define LONG_CMP               "cg"
+#define LONG_CMP_R             "cgr"
+#define LONG_ADDI              "aghi"
+#define LONG_ADD_R             "agr"
+
+#define __RSEQ_ASM_DEFINE_TABLE(label, version, flags,                 \
+                               start_ip, post_commit_offset, abort_ip) \
+               ".pushsection __rseq_table, \"aw\"\n\t"                 \
+               ".balign 32\n\t"                                        \
+               __rseq_str(label) ":\n\t"                               \
+               ".long " __rseq_str(version) ", " __rseq_str(flags) "\n\t" \
+               ".quad " __rseq_str(start_ip) ", " __rseq_str(post_commit_offset) ", " __rseq_str(abort_ip) "\n\t" \
+               ".popsection\n\t"
+
+#elif __s390__
+
+#define __RSEQ_ASM_DEFINE_TABLE(label, version, flags,                 \
+                               start_ip, post_commit_offset, abort_ip) \
+               ".pushsection __rseq_table, \"aw\"\n\t"                 \
+               ".balign 32\n\t"                                        \
+               __rseq_str(label) ":\n\t"                               \
+               ".long " __rseq_str(version) ", " __rseq_str(flags) "\n\t" \
+               ".long 0x0, " __rseq_str(start_ip) ", 0x0, " __rseq_str(post_commit_offset) ", 0x0, " __rseq_str(abort_ip) "\n\t" \
+               ".popsection\n\t"
+
+#define LONG_L                 "l"
+#define LONG_S                 "st"
+#define LONG_LT_R              "ltr"
+#define LONG_CMP               "c"
+#define LONG_CMP_R             "cr"
+#define LONG_ADDI              "ahi"
+#define LONG_ADD_R             "ar"
+
+#endif
+
+#define RSEQ_ASM_DEFINE_TABLE(label, start_ip, post_commit_ip, abort_ip) \
+       __RSEQ_ASM_DEFINE_TABLE(label, 0x0, 0x0, start_ip,              \
+                               (post_commit_ip - start_ip), abort_ip)
+
+#define RSEQ_ASM_STORE_RSEQ_CS(label, cs_label, rseq_cs)               \
+               RSEQ_INJECT_ASM(1)                                      \
+               "larl %%r0, " __rseq_str(cs_label) "\n\t"               \
+               LONG_S " %%r0, %[" __rseq_str(rseq_cs) "]\n\t"          \
+               __rseq_str(label) ":\n\t"
+
+#define RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, label)             \
+               RSEQ_INJECT_ASM(2)                                      \
+               "c %[" __rseq_str(cpu_id) "], %[" __rseq_str(current_cpu_id) "]\n\t" \
+               "jnz " __rseq_str(label) "\n\t"
+
+#define RSEQ_ASM_DEFINE_ABORT(label, teardown, abort_label)            \
+               ".pushsection __rseq_failure, \"ax\"\n\t"               \
+               ".long " __rseq_str(RSEQ_SIG) "\n\t"                    \
+               __rseq_str(label) ":\n\t"                               \
+               teardown                                                \
+               "j %l[" __rseq_str(abort_label) "]\n\t"                 \
+               ".popsection\n\t"
+
+#define RSEQ_ASM_DEFINE_CMPFAIL(label, teardown, cmpfail_label)                \
+               ".pushsection __rseq_failure, \"ax\"\n\t"               \
+               __rseq_str(label) ":\n\t"                               \
+               teardown                                                \
+               "j %l[" __rseq_str(cmpfail_label) "]\n\t"               \
+               ".popsection\n\t"
+
+static inline __attribute__((always_inline))
+int rseq_cmpeqv_storev(intptr_t *v, intptr_t expect, intptr_t newv, int cpu)
+{
+       RSEQ_INJECT_C(9)
+
+       __asm__ __volatile__ goto (
+               RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
+               /* Start rseq by storing table entry pointer into rseq_cs. */
+               RSEQ_ASM_STORE_RSEQ_CS(1, 3b, rseq_cs)
+               RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+               RSEQ_INJECT_ASM(3)
+               LONG_CMP " %[expect], %[v]\n\t"
+               "jnz %l[cmpfail]\n\t"
+               RSEQ_INJECT_ASM(4)
+#ifdef RSEQ_COMPARE_TWICE
+               RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+               LONG_CMP " %[expect], %[v]\n\t"
+               "jnz %l[error2]\n\t"
+#endif
+               /* final store */
+               LONG_S " %[newv], %[v]\n\t"
+               "2:\n\t"
+               RSEQ_INJECT_ASM(5)
+               RSEQ_ASM_DEFINE_ABORT(4, "", abort)
+               : /* gcc asm goto does not allow outputs */
+               : [cpu_id]              "r" (cpu),
+                 [current_cpu_id]      "m" (__rseq_abi.cpu_id),
+                 [rseq_cs]             "m" (__rseq_abi.rseq_cs),
+                 [v]                   "m" (*v),
+                 [expect]              "r" (expect),
+                 [newv]                "r" (newv)
+                 RSEQ_INJECT_INPUT
+               : "memory", "cc", "r0"
+                 RSEQ_INJECT_CLOBBER
+               : abort, cmpfail
+#ifdef RSEQ_COMPARE_TWICE
+                 , error1, error2
+#endif
+       );
+       return 0;
+abort:
+       RSEQ_INJECT_FAILED
+       return -1;
+cmpfail:
+       return 1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+       rseq_bug("cpu_id comparison failed");
+error2:
+       rseq_bug("expected value comparison failed");
+#endif
+}
+
+/*
+ * Compare @v against @expectnot. When it does _not_ match, load @v
+ * into @load, and store the content of *@v + voffp into @v.
+ */
+static inline __attribute__((always_inline))
+int rseq_cmpnev_storeoffp_load(intptr_t *v, intptr_t expectnot,
+                              off_t voffp, intptr_t *load, int cpu)
+{
+       RSEQ_INJECT_C(9)
+
+       __asm__ __volatile__ goto (
+               RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
+               /* Start rseq by storing table entry pointer into rseq_cs. */
+               RSEQ_ASM_STORE_RSEQ_CS(1, 3b, rseq_cs)
+               RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+               RSEQ_INJECT_ASM(3)
+               LONG_L " %%r1, %[v]\n\t"
+               LONG_CMP_R " %%r1, %[expectnot]\n\t"
+               "je %l[cmpfail]\n\t"
+               RSEQ_INJECT_ASM(4)
+#ifdef RSEQ_COMPARE_TWICE
+               RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+               LONG_L " %%r1, %[v]\n\t"
+               LONG_CMP_R " %%r1, %[expectnot]\n\t"
+               "je %l[error2]\n\t"
+#endif
+               LONG_S " %%r1, %[load]\n\t"
+               LONG_ADD_R " %%r1, %[voffp]\n\t"
+               LONG_L " %%r1, 0(%%r1)\n\t"
+               /* final store */
+               LONG_S " %%r1, %[v]\n\t"
+               "2:\n\t"
+               RSEQ_INJECT_ASM(5)
+               RSEQ_ASM_DEFINE_ABORT(4, "", abort)
+               : /* gcc asm goto does not allow outputs */
+               : [cpu_id]              "r" (cpu),
+                 [current_cpu_id]      "m" (__rseq_abi.cpu_id),
+                 [rseq_cs]             "m" (__rseq_abi.rseq_cs),
+                 /* final store input */
+                 [v]                   "m" (*v),
+                 [expectnot]           "r" (expectnot),
+                 [voffp]               "r" (voffp),
+                 [load]                "m" (*load)
+                 RSEQ_INJECT_INPUT
+               : "memory", "cc", "r0", "r1"
+                 RSEQ_INJECT_CLOBBER
+               : abort, cmpfail
+#ifdef RSEQ_COMPARE_TWICE
+                 , error1, error2
+#endif
+       );
+       return 0;
+abort:
+       RSEQ_INJECT_FAILED
+       return -1;
+cmpfail:
+       return 1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+       rseq_bug("cpu_id comparison failed");
+error2:
+       rseq_bug("expected value comparison failed");
+#endif
+}
+
+static inline __attribute__((always_inline))
+int rseq_addv(intptr_t *v, intptr_t count, int cpu)
+{
+       RSEQ_INJECT_C(9)
+
+       __asm__ __volatile__ goto (
+               RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
+               /* Start rseq by storing table entry pointer into rseq_cs. */
+               RSEQ_ASM_STORE_RSEQ_CS(1, 3b, rseq_cs)
+               RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+               RSEQ_INJECT_ASM(3)
+#ifdef RSEQ_COMPARE_TWICE
+               RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+#endif
+               LONG_L " %%r0, %[v]\n\t"
+               LONG_ADD_R " %%r0, %[count]\n\t"
+               /* final store */
+               LONG_S " %%r0, %[v]\n\t"
+               "2:\n\t"
+               RSEQ_INJECT_ASM(4)
+               RSEQ_ASM_DEFINE_ABORT(4, "", abort)
+               : /* gcc asm goto does not allow outputs */
+               : [cpu_id]              "r" (cpu),
+                 [current_cpu_id]      "m" (__rseq_abi.cpu_id),
+                 [rseq_cs]             "m" (__rseq_abi.rseq_cs),
+                 /* final store input */
+                 [v]                   "m" (*v),
+                 [count]               "r" (count)
+                 RSEQ_INJECT_INPUT
+               : "memory", "cc", "r0"
+                 RSEQ_INJECT_CLOBBER
+               : abort
+#ifdef RSEQ_COMPARE_TWICE
+                 , error1
+#endif
+       );
+       return 0;
+abort:
+       RSEQ_INJECT_FAILED
+       return -1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+       rseq_bug("cpu_id comparison failed");
+#endif
+}
+
+static inline __attribute__((always_inline))
+int rseq_cmpeqv_trystorev_storev(intptr_t *v, intptr_t expect,
+                                intptr_t *v2, intptr_t newv2,
+                                intptr_t newv, int cpu)
+{
+       RSEQ_INJECT_C(9)
+
+       __asm__ __volatile__ goto (
+               RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
+               /* Start rseq by storing table entry pointer into rseq_cs. */
+               RSEQ_ASM_STORE_RSEQ_CS(1, 3b, rseq_cs)
+               RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+               RSEQ_INJECT_ASM(3)
+               LONG_CMP " %[expect], %[v]\n\t"
+               "jnz %l[cmpfail]\n\t"
+               RSEQ_INJECT_ASM(4)
+#ifdef RSEQ_COMPARE_TWICE
+               RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+               LONG_CMP " %[expect], %[v]\n\t"
+               "jnz %l[error2]\n\t"
+#endif
+               /* try store */
+               LONG_S " %[newv2], %[v2]\n\t"
+               RSEQ_INJECT_ASM(5)
+               /* final store */
+               LONG_S " %[newv], %[v]\n\t"
+               "2:\n\t"
+               RSEQ_INJECT_ASM(6)
+               RSEQ_ASM_DEFINE_ABORT(4, "", abort)
+               : /* gcc asm goto does not allow outputs */
+               : [cpu_id]              "r" (cpu),
+                 [current_cpu_id]      "m" (__rseq_abi.cpu_id),
+                 [rseq_cs]             "m" (__rseq_abi.rseq_cs),
+                 /* try store input */
+                 [v2]                  "m" (*v2),
+                 [newv2]               "r" (newv2),
+                 /* final store input */
+                 [v]                   "m" (*v),
+                 [expect]              "r" (expect),
+                 [newv]                "r" (newv)
+                 RSEQ_INJECT_INPUT
+               : "memory", "cc", "r0"
+                 RSEQ_INJECT_CLOBBER
+               : abort, cmpfail
+#ifdef RSEQ_COMPARE_TWICE
+                 , error1, error2
+#endif
+       );
+       return 0;
+abort:
+       RSEQ_INJECT_FAILED
+       return -1;
+cmpfail:
+       return 1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+       rseq_bug("cpu_id comparison failed");
+error2:
+       rseq_bug("expected value comparison failed");
+#endif
+}
+
+/* s390 is TSO. */
+static inline __attribute__((always_inline))
+int rseq_cmpeqv_trystorev_storev_release(intptr_t *v, intptr_t expect,
+                                        intptr_t *v2, intptr_t newv2,
+                                        intptr_t newv, int cpu)
+{
+       return rseq_cmpeqv_trystorev_storev(v, expect, v2, newv2, newv, cpu);
+}
+
+static inline __attribute__((always_inline))
+int rseq_cmpeqv_cmpeqv_storev(intptr_t *v, intptr_t expect,
+                             intptr_t *v2, intptr_t expect2,
+                             intptr_t newv, int cpu)
+{
+       RSEQ_INJECT_C(9)
+
+       __asm__ __volatile__ goto (
+               RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
+               /* Start rseq by storing table entry pointer into rseq_cs. */
+               RSEQ_ASM_STORE_RSEQ_CS(1, 3b, rseq_cs)
+               RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+               RSEQ_INJECT_ASM(3)
+               LONG_CMP " %[expect], %[v]\n\t"
+               "jnz %l[cmpfail]\n\t"
+               RSEQ_INJECT_ASM(4)
+               LONG_CMP " %[expect2], %[v2]\n\t"
+               "jnz %l[cmpfail]\n\t"
+               RSEQ_INJECT_ASM(5)
+#ifdef RSEQ_COMPARE_TWICE
+               RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+               LONG_CMP " %[expect], %[v]\n\t"
+               "jnz %l[error2]\n\t"
+               LONG_CMP " %[expect2], %[v2]\n\t"
+               "jnz %l[error3]\n\t"
+#endif
+               /* final store */
+               LONG_S " %[newv], %[v]\n\t"
+               "2:\n\t"
+               RSEQ_INJECT_ASM(6)
+               RSEQ_ASM_DEFINE_ABORT(4, "", abort)
+               : /* gcc asm goto does not allow outputs */
+               : [cpu_id]              "r" (cpu),
+                 [current_cpu_id]      "m" (__rseq_abi.cpu_id),
+                 [rseq_cs]             "m" (__rseq_abi.rseq_cs),
+                 /* cmp2 input */
+                 [v2]                  "m" (*v2),
+                 [expect2]             "r" (expect2),
+                 /* final store input */
+                 [v]                   "m" (*v),
+                 [expect]              "r" (expect),
+                 [newv]                "r" (newv)
+                 RSEQ_INJECT_INPUT
+               : "memory", "cc", "r0"
+                 RSEQ_INJECT_CLOBBER
+               : abort, cmpfail
+#ifdef RSEQ_COMPARE_TWICE
+                 , error1, error2, error3
+#endif
+       );
+       return 0;
+abort:
+       RSEQ_INJECT_FAILED
+       return -1;
+cmpfail:
+       return 1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+       rseq_bug("cpu_id comparison failed");
+error2:
+       rseq_bug("1st expected value comparison failed");
+error3:
+       rseq_bug("2nd expected value comparison failed");
+#endif
+}
+
+static inline __attribute__((always_inline))
+int rseq_cmpeqv_trymemcpy_storev(intptr_t *v, intptr_t expect,
+                                void *dst, void *src, size_t len,
+                                intptr_t newv, int cpu)
+{
+       uint64_t rseq_scratch[3];
+
+       RSEQ_INJECT_C(9)
+
+       __asm__ __volatile__ goto (
+               RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
+               LONG_S " %[src], %[rseq_scratch0]\n\t"
+               LONG_S " %[dst], %[rseq_scratch1]\n\t"
+               LONG_S " %[len], %[rseq_scratch2]\n\t"
+               /* Start rseq by storing table entry pointer into rseq_cs. */
+               RSEQ_ASM_STORE_RSEQ_CS(1, 3b, rseq_cs)
+               RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+               RSEQ_INJECT_ASM(3)
+               LONG_CMP " %[expect], %[v]\n\t"
+               "jnz 5f\n\t"
+               RSEQ_INJECT_ASM(4)
+#ifdef RSEQ_COMPARE_TWICE
+               RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 6f)
+               LONG_CMP " %[expect], %[v]\n\t"
+               "jnz 7f\n\t"
+#endif
+               /* try memcpy */
+               LONG_LT_R " %[len], %[len]\n\t"
+               "jz 333f\n\t"
+               "222:\n\t"
+               "ic %%r0,0(%[src])\n\t"
+               "stc %%r0,0(%[dst])\n\t"
+               LONG_ADDI " %[src], 1\n\t"
+               LONG_ADDI " %[dst], 1\n\t"
+               LONG_ADDI " %[len], -1\n\t"
+               "jnz 222b\n\t"
+               "333:\n\t"
+               RSEQ_INJECT_ASM(5)
+               /* final store */
+               LONG_S " %[newv], %[v]\n\t"
+               "2:\n\t"
+               RSEQ_INJECT_ASM(6)
+               /* teardown */
+               LONG_L " %[len], %[rseq_scratch2]\n\t"
+               LONG_L " %[dst], %[rseq_scratch1]\n\t"
+               LONG_L " %[src], %[rseq_scratch0]\n\t"
+               RSEQ_ASM_DEFINE_ABORT(4,
+                       LONG_L " %[len], %[rseq_scratch2]\n\t"
+                       LONG_L " %[dst], %[rseq_scratch1]\n\t"
+                       LONG_L " %[src], %[rseq_scratch0]\n\t",
+                       abort)
+               RSEQ_ASM_DEFINE_CMPFAIL(5,
+                       LONG_L " %[len], %[rseq_scratch2]\n\t"
+                       LONG_L " %[dst], %[rseq_scratch1]\n\t"
+                       LONG_L " %[src], %[rseq_scratch0]\n\t",
+                       cmpfail)
+#ifdef RSEQ_COMPARE_TWICE
+               RSEQ_ASM_DEFINE_CMPFAIL(6,
+                       LONG_L " %[len], %[rseq_scratch2]\n\t"
+                       LONG_L " %[dst], %[rseq_scratch1]\n\t"
+                       LONG_L " %[src], %[rseq_scratch0]\n\t",
+                       error1)
+               RSEQ_ASM_DEFINE_CMPFAIL(7,
+                       LONG_L " %[len], %[rseq_scratch2]\n\t"
+                       LONG_L " %[dst], %[rseq_scratch1]\n\t"
+                       LONG_L " %[src], %[rseq_scratch0]\n\t",
+                       error2)
+#endif
+               : /* gcc asm goto does not allow outputs */
+               : [cpu_id]              "r" (cpu),
+                 [current_cpu_id]      "m" (__rseq_abi.cpu_id),
+                 [rseq_cs]             "m" (__rseq_abi.rseq_cs),
+                 /* final store input */
+                 [v]                   "m" (*v),
+                 [expect]              "r" (expect),
+                 [newv]                "r" (newv),
+                 /* try memcpy input */
+                 [dst]                 "r" (dst),
+                 [src]                 "r" (src),
+                 [len]                 "r" (len),
+                 [rseq_scratch0]       "m" (rseq_scratch[0]),
+                 [rseq_scratch1]       "m" (rseq_scratch[1]),
+                 [rseq_scratch2]       "m" (rseq_scratch[2])
+                 RSEQ_INJECT_INPUT
+               : "memory", "cc", "r0"
+                 RSEQ_INJECT_CLOBBER
+               : abort, cmpfail
+#ifdef RSEQ_COMPARE_TWICE
+                 , error1, error2
+#endif
+       );
+       return 0;
+abort:
+       RSEQ_INJECT_FAILED
+       return -1;
+cmpfail:
+       return 1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+       rseq_bug("cpu_id comparison failed");
+error2:
+       rseq_bug("expected value comparison failed");
+#endif
+}
+
+/* s390 is TSO. */
+static inline __attribute__((always_inline))
+int rseq_cmpeqv_trymemcpy_storev_release(intptr_t *v, intptr_t expect,
+                                        void *dst, void *src, size_t len,
+                                        intptr_t newv, int cpu)
+{
+       return rseq_cmpeqv_trymemcpy_storev(v, expect, dst, src, len,
+                                           newv, cpu);
+}
+#endif /* !RSEQ_SKIP_FASTPATH */
index 0a808575cbc443489a5713639f7076eacedbdde4..ca332efe9713535c246fd1b6906643f57cec381b 100644 (file)
@@ -73,6 +73,10 @@ extern __thread volatile struct rseq __rseq_abi;
 #include <rseq-arm.h>
 #elif defined(__PPC__)
 #include <rseq-ppc.h>
+#elif defined(__mips__)
+#include <rseq-mips.h>
+#elif defined(__s390__)
+#include <rseq-s390.h>
 #else
 #error unsupported target
 #endif
@@ -131,17 +135,27 @@ static inline uint32_t rseq_current_cpu(void)
        return cpu;
 }
 
+static inline void rseq_clear_rseq_cs(void)
+{
+#ifdef __LP64__
+       __rseq_abi.rseq_cs.ptr = 0;
+#else
+       __rseq_abi.rseq_cs.ptr.ptr32 = 0;
+#endif
+}
+
 /*
- * rseq_prepare_unload() should be invoked by each thread using rseq_finish*()
- * at least once between their last rseq_finish*() and library unload of the
- * library defining the rseq critical section (struct rseq_cs). This also
- * applies to use of rseq in code generated by JIT: rseq_prepare_unload()
- * should be invoked at least once by each thread using rseq_finish*() before
- * reclaim of the memory holding the struct rseq_cs.
+ * rseq_prepare_unload() should be invoked by each thread executing a rseq
+ * critical section at least once between their last critical section and
+ * library unload of the library defining the rseq critical section
+ * (struct rseq_cs). This also applies to use of rseq in code generated by
+ * JIT: rseq_prepare_unload() should be invoked at least once by each
+ * thread executing a rseq critical section before reclaim of the memory
+ * holding the struct rseq_cs.
  */
 static inline void rseq_prepare_unload(void)
 {
-       __rseq_abi.rseq_cs = 0;
+       rseq_clear_rseq_cs();
 }
 
 #endif  /* RSEQ_H_ */
old mode 100644 (file)
new mode 100755 (executable)
index 2082eeffd779d586b558f45883c5dc1cc08a865b..a19531dba4dc311d00e33fcb637b91bced262bf2 100644 (file)
@@ -1,7 +1,18 @@
+# SPDX-License-Identifier: GPL-2.0
+uname_M := $(shell uname -m 2>/dev/null || echo not)
+ARCH ?= $(shell echo $(uname_M) | sed -e s/x86_64/x86/)
+
+ifneq ($(ARCH),sparc64)
+nothing:
+.PHONY: all clean run_tests install
+.SILENT:
+else
+
 SUBDIRS := drivers
 
 TEST_PROGS := run.sh
 
+
 .PHONY: all clean
 
 include ../lib.mk
@@ -18,10 +29,6 @@ all:
                fi \
        done
 
-override define RUN_TESTS
-       @cd $(OUTPUT); ./run.sh
-endef
-
 override define INSTALL_RULE
        mkdir -p $(INSTALL_PATH)
        install -t $(INSTALL_PATH) $(TEST_PROGS) $(TEST_PROGS_EXTENDED) $(TEST_FILES)
@@ -33,10 +40,6 @@ override define INSTALL_RULE
        done;
 endef
 
-override define EMIT_TESTS
-       echo "./run.sh"
-endef
-
 override define CLEAN
        @for DIR in $(SUBDIRS); do              \
                BUILD_TARGET=$(OUTPUT)/$$DIR;   \
@@ -44,3 +47,4 @@ override define CLEAN
                make OUTPUT=$$BUILD_TARGET -C $$DIR $@;\
        done
 endef
+endif
index 6264f40bbdbc9dedf9fd41519823fa31771d6cd0..deb0df4155659ec1f4b13a4d74c5128673690226 100644 (file)
@@ -1,4 +1,4 @@
-
+# SPDX-License-Identifier: GPL-2.0
 INCLUDEDIR := -I.
 CFLAGS := $(CFLAGS) $(INCLUDEDIR) -Wall -O2 -g
 
index 24cff498b31aa831b388e638929f29c36db07dbd..fc9f8cde7d4223c3fd564105942d4d648acb8627 100755 (executable)
@@ -2,6 +2,19 @@
 # SPDX-License-Identifier: GPL-2.0
 # Runs static keys kernel module tests
 
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
+if ! /sbin/modprobe -q -n test_static_key_base; then
+       echo "static_key: module test_static_key_base is not found [SKIP]"
+       exit $ksft_skip
+fi
+
+if ! /sbin/modprobe -q -n test_static_keys; then
+       echo "static_key: module test_static_keys is not found [SKIP]"
+       exit $ksft_skip
+fi
+
 if /sbin/modprobe -q test_static_key_base; then
        if /sbin/modprobe -q test_static_keys; then
                echo "static_key: ok"
diff --git a/tools/testing/selftests/sync/config b/tools/testing/selftests/sync/config
new file mode 100644 (file)
index 0000000..1ab7e81
--- /dev/null
@@ -0,0 +1,4 @@
+CONFIG_STAGING=y
+CONFIG_ANDROID=y
+CONFIG_SYNC=y
+CONFIG_SW_SYNC=y
index ec232c3cfcaac3b8f52936eabb908a0c316183f8..584eb8ea780a49220782d08e104756199fc19934 100755 (executable)
@@ -14,6 +14,9 @@
 
 # This performs a series tests against the proc sysctl interface.
 
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
 TEST_NAME="sysctl"
 TEST_DRIVER="test_${TEST_NAME}"
 TEST_DIR=$(dirname $0)
@@ -41,7 +44,7 @@ test_modprobe()
                echo "$0: $DIR not present" >&2
                echo "You must have the following enabled in your kernel:" >&2
                cat $TEST_DIR/config >&2
-               exit 1
+               exit $ksft_skip
        fi
 }
 
@@ -98,28 +101,30 @@ test_reqs()
        uid=$(id -u)
        if [ $uid -ne 0 ]; then
                echo $msg must be run as root >&2
-               exit 0
+               exit $ksft_skip
        fi
 
        if ! which perl 2> /dev/null > /dev/null; then
                echo "$0: You need perl installed"
-               exit 1
+               exit $ksft_skip
        fi
        if ! which getconf 2> /dev/null > /dev/null; then
                echo "$0: You need getconf installed"
-               exit 1
+               exit $ksft_skip
        fi
        if ! which diff 2> /dev/null > /dev/null; then
                echo "$0: You need diff installed"
-               exit 1
+               exit $ksft_skip
        fi
 }
 
 function load_req_mod()
 {
-       trap "test_modprobe" EXIT
-
        if [ ! -d $DIR ]; then
+               if ! modprobe -q -n $TEST_DRIVER; then
+                       echo "$0: module $TEST_DRIVER not found [SKIP]"
+                       exit $ksft_skip
+               fi
                modprobe $TEST_DRIVER
                if [ $? -ne 0 ]; then
                        exit
@@ -765,6 +770,7 @@ function parse_args()
 test_reqs
 allow_user_defaults
 check_production_sysctl_writes_strict
+test_modprobe
 load_req_mod
 
 trap "test_finish" EXIT
index ca6cd146aafe15420cb345ef3ba934dd21e13220..dcf73c5dab6e12e9074d15efae1ba9c65a139d60 100644 (file)
@@ -134,6 +134,11 @@ int main(int argv, char **argc)
        printf(" %lld.%i(act)", ppm/1000, abs((int)(ppm%1000)));
 
        if (llabs(eppm - ppm) > 1000) {
+               if (tx1.offset || tx2.offset ||
+                   tx1.freq != tx2.freq || tx1.tick != tx2.tick) {
+                       printf("        [SKIP]\n");
+                       return ksft_exit_skip("The clock was adjusted externally. Shutdown NTPd or other time sync daemons\n");
+               }
                printf("        [FAILED]\n");
                return ksft_exit_fail();
        }
index d60506fc77f8bcba61f222db0b0df05a38e2e68b..f9b31a57439b759c1813ca94ac948a998e9dca51 100755 (executable)
@@ -2,6 +2,13 @@
 # SPDX-License-Identifier: GPL-2.0
 # Runs copy_to/from_user infrastructure using test_user_copy kernel module
 
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
+if ! /sbin/modprobe -q -n test_user_copy; then
+       echo "user: module test_user_copy is not found [SKIP]"
+       exit $ksft_skip
+fi
 if /sbin/modprobe -q test_user_copy; then
        /sbin/modprobe -q -r test_user_copy
        echo "user_copy: ok"
index 1097f04e4d80e6cff93bb9e912c4c38894cd5959..bcec71250873108efdeae50eab0874b3924204c4 100644 (file)
@@ -16,6 +16,8 @@
 #include <unistd.h>
 #include <string.h>
 
+#include "../kselftest.h"
+
 #define MAP_SIZE 1048576
 
 struct map_list {
@@ -169,7 +171,7 @@ int main(int argc, char **argv)
                printf("Either the sysctl compact_unevictable_allowed is not\n"
                       "set to 1 or couldn't read the proc file.\n"
                       "Skipping the test\n");
-               return 0;
+               return KSFT_SKIP;
        }
 
        lim.rlim_cur = RLIM_INFINITY;
index 4997b9222cfa5055f9c07f4f1f0a1454bae89d6e..637b6d0ac0d0bf63d88ff5f5782a65453b486a7a 100644 (file)
@@ -9,6 +9,8 @@
 #include <stdbool.h>
 #include "mlock2.h"
 
+#include "../kselftest.h"
+
 struct vm_boundaries {
        unsigned long start;
        unsigned long end;
@@ -303,7 +305,7 @@ static int test_mlock_lock()
        if (mlock2_(map, 2 * page_size, 0)) {
                if (errno == ENOSYS) {
                        printf("Cannot call new mlock family, skipping test\n");
-                       _exit(0);
+                       _exit(KSFT_SKIP);
                }
                perror("mlock2(0)");
                goto unmap;
@@ -412,7 +414,7 @@ static int test_mlock_onfault()
        if (mlock2_(map, 2 * page_size, MLOCK_ONFAULT)) {
                if (errno == ENOSYS) {
                        printf("Cannot call new mlock family, skipping test\n");
-                       _exit(0);
+                       _exit(KSFT_SKIP);
                }
                perror("mlock2(MLOCK_ONFAULT)");
                goto unmap;
@@ -425,7 +427,7 @@ static int test_mlock_onfault()
        if (munlock(map, 2 * page_size)) {
                if (errno == ENOSYS) {
                        printf("Cannot call new mlock family, skipping test\n");
-                       _exit(0);
+                       _exit(KSFT_SKIP);
                }
                perror("munlock()");
                goto unmap;
@@ -457,7 +459,7 @@ static int test_lock_onfault_of_present()
        if (mlock2_(map, 2 * page_size, MLOCK_ONFAULT)) {
                if (errno == ENOSYS) {
                        printf("Cannot call new mlock family, skipping test\n");
-                       _exit(0);
+                       _exit(KSFT_SKIP);
                }
                perror("mlock2(MLOCK_ONFAULT)");
                goto unmap;
@@ -583,7 +585,7 @@ static int test_vma_management(bool call_mlock)
        if (call_mlock && mlock2_(map, 3 * page_size, MLOCK_ONFAULT)) {
                if (errno == ENOSYS) {
                        printf("Cannot call new mlock family, skipping test\n");
-                       _exit(0);
+                       _exit(KSFT_SKIP);
                }
                perror("mlock(ONFAULT)\n");
                goto out;
index 22d56467383029b24b52b95e6ee09cc0bb6bf835..88cbe5575f0cf9e0d8f165ecbe27002a3c5ed8a1 100755 (executable)
@@ -2,6 +2,9 @@
 # SPDX-License-Identifier: GPL-2.0
 #please run as root
 
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
 mnt=./huge
 exitcode=0
 
@@ -36,7 +39,7 @@ if [ -n "$freepgs" ] && [ -n "$hpgsize_KB" ]; then
                echo $(( $lackpgs + $nr_hugepgs )) > /proc/sys/vm/nr_hugepages
                if [ $? -ne 0 ]; then
                        echo "Please run this test as root"
-                       exit 1
+                       exit $ksft_skip
                fi
                while read name size unit; do
                        if [ "$name" = "HugePages_Free:" ]; then
index de2f9ec8a87fb342a7a595a13b009358d9eae000..7b8171e3128a8715a62a10e020c69ea3ca1c5321 100644 (file)
@@ -69,6 +69,8 @@
 #include <setjmp.h>
 #include <stdbool.h>
 
+#include "../kselftest.h"
+
 #ifdef __NR_userfaultfd
 
 static unsigned long nr_cpus, nr_pages, nr_pages_per_cpu, page_size;
@@ -1322,7 +1324,7 @@ int main(int argc, char **argv)
 int main(void)
 {
        printf("skip: Skipping userfaultfd test (missing __NR_userfaultfd)\n");
-       return 0;
+       return KSFT_SKIP;
 }
 
 #endif /* __NR_userfaultfd */
index 246145b84a127c341fd1fdc4fb41bcf6c7d51644..4d9dc3f2fd7048212181c51f03cef4d1650e07c9 100644 (file)
@@ -610,21 +610,41 @@ static int test_valid_sigreturn(int cs_bits, bool use_16bit_ss, int force_ss)
         */
        for (int i = 0; i < NGREG; i++) {
                greg_t req = requested_regs[i], res = resulting_regs[i];
+
                if (i == REG_TRAPNO || i == REG_IP)
                        continue;       /* don't care */
-               if (i == REG_SP) {
-                       printf("\tSP: %llx -> %llx\n", (unsigned long long)req,
-                              (unsigned long long)res);
 
+               if (i == REG_SP) {
                        /*
-                        * In many circumstances, the high 32 bits of rsp
-                        * are zeroed.  For example, we could be a real
-                        * 32-bit program, or we could hit any of a number
-                        * of poorly-documented IRET or segmented ESP
-                        * oddities.  If this happens, it's okay.
+                        * If we were using a 16-bit stack segment, then
+                        * the kernel is a bit stuck: IRET only restores
+                        * the low 16 bits of ESP/RSP if SS is 16-bit.
+                        * The kernel uses a hack to restore bits 31:16,
+                        * but that hack doesn't help with bits 63:32.
+                        * On Intel CPUs, bits 63:32 end up zeroed, and, on
+                        * AMD CPUs, they leak the high bits of the kernel
+                        * espfix64 stack pointer.  There's very little that
+                        * the kernel can do about it.
+                        *
+                        * Similarly, if we are returning to a 32-bit context,
+                        * the CPU will often lose the high 32 bits of RSP.
                         */
-                       if (res == (req & 0xFFFFFFFF))
-                               continue;  /* OK; not expected to work */
+
+                       if (res == req)
+                               continue;
+
+                       if (cs_bits != 64 && ((res ^ req) & 0xFFFFFFFF) == 0) {
+                               printf("[NOTE]\tSP: %llx -> %llx\n",
+                                      (unsigned long long)req,
+                                      (unsigned long long)res);
+                               continue;
+                       }
+
+                       printf("[FAIL]\tSP mismatch: requested 0x%llx; got 0x%llx\n",
+                              (unsigned long long)requested_regs[i],
+                              (unsigned long long)resulting_regs[i]);
+                       nerrs++;
+                       continue;
                }
 
                bool ignore_reg = false;
@@ -654,25 +674,18 @@ static int test_valid_sigreturn(int cs_bits, bool use_16bit_ss, int force_ss)
 #endif
 
                /* Sanity check on the kernel */
-               if (i == REG_CX && requested_regs[i] != resulting_regs[i]) {
+               if (i == REG_CX && req != res) {
                        printf("[FAIL]\tCX (saved SP) mismatch: requested 0x%llx; got 0x%llx\n",
-                              (unsigned long long)requested_regs[i],
-                              (unsigned long long)resulting_regs[i]);
+                              (unsigned long long)req,
+                              (unsigned long long)res);
                        nerrs++;
                        continue;
                }
 
-               if (requested_regs[i] != resulting_regs[i] && !ignore_reg) {
-                       /*
-                        * SP is particularly interesting here.  The
-                        * usual cause of failures is that we hit the
-                        * nasty IRET case of returning to a 16-bit SS,
-                        * in which case bits 16:31 of the *kernel*
-                        * stack pointer persist in ESP.
-                        */
+               if (req != res && !ignore_reg) {
                        printf("[FAIL]\tReg %d mismatch: requested 0x%llx; got 0x%llx\n",
-                              i, (unsigned long long)requested_regs[i],
-                              (unsigned long long)resulting_regs[i]);
+                              i, (unsigned long long)req,
+                              (unsigned long long)res);
                        nerrs++;
                }
        }
index 754de7da426a80a2ae386042d30a5904b44446e6..232e958ec454756501f2caa8eaf2133067fe10ac 100755 (executable)
@@ -2,6 +2,9 @@
 # SPDX-License-Identifier: GPL-2.0
 TCID="zram.sh"
 
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
 . ./zram_lib.sh
 
 run_zram () {
@@ -24,5 +27,5 @@ elif [ -b /dev/zram0 ]; then
 else
        echo "$TCID : No zram.ko module or /dev/zram0 device file not found"
        echo "$TCID : CONFIG_ZRAM is not set"
-       exit 1
+       exit $ksft_skip
 fi
index f6a9c73e7a442e7988b0820ebc809a342981df91..9e73a4fb9b0aa9b2a2e81368badfbe278876695d 100755 (executable)
@@ -18,6 +18,9 @@ MODULE=0
 dev_makeswap=-1
 dev_mounted=-1
 
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
 trap INT
 
 check_prereqs()
@@ -27,7 +30,7 @@ check_prereqs()
 
        if [ $uid -ne 0 ]; then
                echo $msg must be run as root >&2
-               exit 0
+               exit $ksft_skip
        fi
 }
 
index 95dd14648ba51cf9fcc7f86a38e81e8f47aa9f61..0f395dfb7774c3f87fffc337c89afbd8b5f247dd 100644 (file)
 
 /******************** Little Endian Handling ********************************/
 
-#define cpu_to_le16(x)  htole16(x)
-#define cpu_to_le32(x)  htole32(x)
+/*
+ * cpu_to_le16/32 are used when initializing structures, a context where a
+ * function call is not allowed. To solve this, we code cpu_to_le16/32 in a way
+ * that allows them to be used when initializing structures.
+ */
+
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+#define cpu_to_le16(x)  (x)
+#define cpu_to_le32(x)  (x)
+#else
+#define cpu_to_le16(x)  ((((x) >> 8) & 0xffu) | (((x) & 0xffu) << 8))
+#define cpu_to_le32(x)  \
+       ((((x) & 0xff000000u) >> 24) | (((x) & 0x00ff0000u) >>  8) | \
+       (((x) & 0x0000ff00u) <<  8) | (((x) & 0x000000ffu) << 24))
+#endif
+
 #define le32_to_cpu(x)  le32toh(x)
 #define le16_to_cpu(x)  le16toh(x)
 
-
 /******************** Messages and Errors ***********************************/
 
 static const char argv0[] = "ffs-test";
index 0ac3caf90877fefd73384c78470359b110e7c053..d0351f83aebed9f1ce87c390e586ce45c7068337 100644 (file)
@@ -13,8 +13,8 @@
 } while (0);
 /* Weak barriers should be used. If not - it's a bug */
 # define mb() abort()
-# define rmb() abort()
-# define wmb() abort()
+# define dma_rmb() abort()
+# define dma_wmb() abort()
 #else
 #error Please fill in barrier macros
 #endif
index fca8381bbe041315d955f96e7e045d70d1cc89fb..fb22bccfbc8a7f2df651474211b48aa67efdf8f9 100644 (file)
@@ -52,6 +52,11 @@ static inline void *kmalloc(size_t s, gfp_t gfp)
                return __kmalloc_fake;
        return malloc(s);
 }
+static inline void *kmalloc_array(unsigned n, size_t s, gfp_t gfp)
+{
+       return kmalloc(n * s, gfp);
+}
+
 static inline void *kzalloc(size_t s, gfp_t gfp)
 {
        void *p = kmalloc(s, gfp);
index 9a45f90e2d08974c42c6e6dc242b5cfd35d5e120..369ee308b6686ca4a106581b91f8d382e45c79e8 100644 (file)
@@ -36,7 +36,6 @@ static inline void sg_assign_page(struct scatterlist *sg, struct page *page)
         */
        BUG_ON((unsigned long) page & 0x03);
 #ifdef CONFIG_DEBUG_SG
-       BUG_ON(sg->sg_magic != SG_MAGIC);
        BUG_ON(sg_is_chain(sg));
 #endif
        sg->page_link = page_link | (unsigned long) page;
@@ -67,7 +66,6 @@ static inline void sg_set_page(struct scatterlist *sg, struct page *page,
 static inline struct page *sg_page(struct scatterlist *sg)
 {
 #ifdef CONFIG_DEBUG_SG
-       BUG_ON(sg->sg_magic != SG_MAGIC);
        BUG_ON(sg_is_chain(sg));
 #endif
        return (struct page *)((sg)->page_link & ~0x3);
@@ -116,9 +114,6 @@ static inline void sg_chain(struct scatterlist *prv, unsigned int prv_nents,
  **/
 static inline void sg_mark_end(struct scatterlist *sg)
 {
-#ifdef CONFIG_DEBUG_SG
-       BUG_ON(sg->sg_magic != SG_MAGIC);
-#endif
        /*
         * Set termination bit, clear potential chain bit
         */
@@ -136,17 +131,11 @@ static inline void sg_mark_end(struct scatterlist *sg)
  **/
 static inline void sg_unmark_end(struct scatterlist *sg)
 {
-#ifdef CONFIG_DEBUG_SG
-       BUG_ON(sg->sg_magic != SG_MAGIC);
-#endif
        sg->page_link &= ~0x02;
 }
 
 static inline struct scatterlist *sg_next(struct scatterlist *sg)
 {
-#ifdef CONFIG_DEBUG_SG
-       BUG_ON(sg->sg_magic != SG_MAGIC);
-#endif
        if (sg_is_last(sg))
                return NULL;
 
@@ -160,13 +149,6 @@ static inline struct scatterlist *sg_next(struct scatterlist *sg)
 static inline void sg_init_table(struct scatterlist *sgl, unsigned int nents)
 {
        memset(sgl, 0, sizeof(*sgl) * nents);
-#ifdef CONFIG_DEBUG_SG
-       {
-               unsigned int i;
-               for (i = 0; i < nents; i++)
-                       sgl[i].sg_magic = SG_MAGIC;
-       }
-#endif
        sg_mark_end(&sgl[nents - 1]);
 }
 
index 72143cfaf6ec39404dad5f72a8cf08c5e5fefc7e..ea434ddc849925c6e2577a9ed6acea906ea8eafd 100644 (file)
@@ -47,7 +47,7 @@ config KVM_GENERIC_DIRTYLOG_READ_PROTECT
 
 config KVM_COMPAT
        def_bool y
-       depends on KVM && COMPAT && !S390
+       depends on KVM && COMPAT && !(S390 || ARM64)
 
 config HAVE_KVM_IRQ_BYPASS
        bool
index 04e554cae3a2066e5eb6e4d2544efc84a62d88de..108250e4d37640846c36c991420eafe3d4cbb18a 100644 (file)
@@ -604,7 +604,7 @@ void kvm_arm_resume_guest(struct kvm *kvm)
 
        kvm_for_each_vcpu(i, vcpu, kvm) {
                vcpu->arch.pause = false;
-               swake_up(kvm_arch_vcpu_wq(vcpu));
+               swake_up_one(kvm_arch_vcpu_wq(vcpu));
        }
 }
 
@@ -612,7 +612,7 @@ static void vcpu_req_sleep(struct kvm_vcpu *vcpu)
 {
        struct swait_queue_head *wq = kvm_arch_vcpu_wq(vcpu);
 
-       swait_event_interruptible(*wq, ((!vcpu->arch.power_off) &&
+       swait_event_interruptible_exclusive(*wq, ((!vcpu->arch.power_off) &&
                                       (!vcpu->arch.pause)));
 
        if (vcpu->arch.power_off || vcpu->arch.pause) {
index 8d90de213ce9b89340b7dc11927862f8344829c7..1d90d79706bd5b71d3914ecd808d2bd6c127286c 100644 (file)
@@ -297,6 +297,8 @@ static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
        phys_addr_t next;
 
        assert_spin_locked(&kvm->mmu_lock);
+       WARN_ON(size & ~PAGE_MASK);
+
        pgd = kvm->arch.pgd + stage2_pgd_index(addr);
        do {
                /*
index c95ab4c5a47516067737b1612d7545fa9543b3b1..9b73d3ad918a4520a3a7ecf6897836abc0887f9c 100644 (file)
@@ -155,7 +155,7 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
        smp_mb();               /* Make sure the above is visible */
 
        wq = kvm_arch_vcpu_wq(vcpu);
-       swake_up(wq);
+       swake_up_one(wq);
 
        return PSCI_RET_SUCCESS;
 }
index ff7dc890941a8447d6e5abeae6dfe6544fac18d7..cdce653e3c47fb31b9eb0ccf73c3bebd830d8496 100644 (file)
@@ -617,11 +617,6 @@ int vgic_v3_probe(const struct gic_kvm_info *info)
                pr_warn("GICV physical address 0x%llx not page aligned\n",
                        (unsigned long long)info->vcpu.start);
                kvm_vgic_global_state.vcpu_base = 0;
-       } else if (!PAGE_ALIGNED(resource_size(&info->vcpu))) {
-               pr_warn("GICV size 0x%llx not a multiple of page size 0x%lx\n",
-                       (unsigned long long)resource_size(&info->vcpu),
-                       PAGE_SIZE);
-               kvm_vgic_global_state.vcpu_base = 0;
        } else {
                kvm_vgic_global_state.vcpu_base = info->vcpu.start;
                kvm_vgic_global_state.can_emulate_gicv2 = true;
index 57bcb27dcf30f61e14361c675617f08a0e995600..23c2519c5b32a40a2513c7c974ec44ed7199159e 100644 (file)
@@ -107,7 +107,7 @@ static void async_pf_execute(struct work_struct *work)
        trace_kvm_async_pf_completed(addr, gva);
 
        if (swq_has_sleeper(&vcpu->wq))
-               swake_up(&vcpu->wq);
+               swake_up_one(&vcpu->wq);
 
        mmput(mm);
        kvm_put_kvm(vcpu->kvm);
index 90d30fbe95aefb1e1a943d5bf29d7aee763fb9d0..b20b751286fc612214c59c95e787c9fb0fac50b7 100644 (file)
@@ -119,8 +119,12 @@ irqfd_shutdown(struct work_struct *work)
 {
        struct kvm_kernel_irqfd *irqfd =
                container_of(work, struct kvm_kernel_irqfd, shutdown);
+       struct kvm *kvm = irqfd->kvm;
        u64 cnt;
 
+       /* Make sure irqfd has been initalized in assign path. */
+       synchronize_srcu(&kvm->irq_srcu);
+
        /*
         * Synchronize with the wait-queue and unhook ourselves to prevent
         * further events.
@@ -387,7 +391,6 @@ kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
 
        idx = srcu_read_lock(&kvm->irq_srcu);
        irqfd_update(kvm, irqfd);
-       srcu_read_unlock(&kvm->irq_srcu, idx);
 
        list_add_tail(&irqfd->list, &kvm->irqfds.items);
 
@@ -402,11 +405,6 @@ kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
        if (events & EPOLLIN)
                schedule_work(&irqfd->inject);
 
-       /*
-        * do not drop the file until the irqfd is fully initialized, otherwise
-        * we might race against the EPOLLHUP
-        */
-       fdput(f);
 #ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
        if (kvm_arch_has_irq_bypass()) {
                irqfd->consumer.token = (void *)irqfd->eventfd;
@@ -421,6 +419,13 @@ kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
        }
 #endif
 
+       srcu_read_unlock(&kvm->irq_srcu, idx);
+
+       /*
+        * do not drop the file until the irqfd is fully initialized, otherwise
+        * we might race against the EPOLLHUP
+        */
+       fdput(f);
        return 0;
 
 fail:
index ada21f47f22b5a902e81572ba94efb16a2a7bccb..3d233ebfbee9955d0cd49f4f23ba1d6423a1b3b0 100644 (file)
@@ -116,6 +116,11 @@ static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
 #ifdef CONFIG_KVM_COMPAT
 static long kvm_vcpu_compat_ioctl(struct file *file, unsigned int ioctl,
                                  unsigned long arg);
+#define KVM_COMPAT(c)  .compat_ioctl   = (c)
+#else
+static long kvm_no_compat_ioctl(struct file *file, unsigned int ioctl,
+                               unsigned long arg) { return -EINVAL; }
+#define KVM_COMPAT(c)  .compat_ioctl   = kvm_no_compat_ioctl
 #endif
 static int hardware_enable_all(void);
 static void hardware_disable_all(void);
@@ -2167,7 +2172,7 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu)
        kvm_arch_vcpu_blocking(vcpu);
 
        for (;;) {
-               prepare_to_swait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE);
+               prepare_to_swait_exclusive(&vcpu->wq, &wait, TASK_INTERRUPTIBLE);
 
                if (kvm_vcpu_check_block(vcpu) < 0)
                        break;
@@ -2209,7 +2214,7 @@ bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu)
 
        wqp = kvm_arch_vcpu_wq(vcpu);
        if (swq_has_sleeper(wqp)) {
-               swake_up(wqp);
+               swake_up_one(wqp);
                ++vcpu->stat.halt_wakeup;
                return true;
        }
@@ -2396,11 +2401,9 @@ static int kvm_vcpu_release(struct inode *inode, struct file *filp)
 static struct file_operations kvm_vcpu_fops = {
        .release        = kvm_vcpu_release,
        .unlocked_ioctl = kvm_vcpu_ioctl,
-#ifdef CONFIG_KVM_COMPAT
-       .compat_ioctl   = kvm_vcpu_compat_ioctl,
-#endif
        .mmap           = kvm_vcpu_mmap,
        .llseek         = noop_llseek,
+       KVM_COMPAT(kvm_vcpu_compat_ioctl),
 };
 
 /*
@@ -2824,10 +2827,8 @@ static int kvm_device_release(struct inode *inode, struct file *filp)
 
 static const struct file_operations kvm_device_fops = {
        .unlocked_ioctl = kvm_device_ioctl,
-#ifdef CONFIG_KVM_COMPAT
-       .compat_ioctl = kvm_device_ioctl,
-#endif
        .release = kvm_device_release,
+       KVM_COMPAT(kvm_device_ioctl),
 };
 
 struct kvm_device *kvm_device_from_filp(struct file *filp)
@@ -3165,10 +3166,8 @@ static long kvm_vm_compat_ioctl(struct file *filp,
 static struct file_operations kvm_vm_fops = {
        .release        = kvm_vm_release,
        .unlocked_ioctl = kvm_vm_ioctl,
-#ifdef CONFIG_KVM_COMPAT
-       .compat_ioctl   = kvm_vm_compat_ioctl,
-#endif
        .llseek         = noop_llseek,
+       KVM_COMPAT(kvm_vm_compat_ioctl),
 };
 
 static int kvm_dev_ioctl_create_vm(unsigned long type)
@@ -3259,8 +3258,8 @@ out:
 
 static struct file_operations kvm_chardev_ops = {
        .unlocked_ioctl = kvm_dev_ioctl,
-       .compat_ioctl   = kvm_dev_ioctl,
        .llseek         = noop_llseek,
+       KVM_COMPAT(kvm_dev_ioctl),
 };
 
 static struct miscdevice kvm_dev = {